Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoSign in
Socket

chainbench

Package Overview
Dependencies
Maintainers
1
Versions
37
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

chainbench - pypi Package Compare versions

Comparing version
0.8.0
to
0.8.2
+28
chainbench/shapes/spike.py
from locust import LoadTestShape
class SpikeLoadShape(LoadTestShape):
"""
A step load shape class that has the following shape:
10% of users start at the beginning for 40% of the test duration, then 100% of users for 20% of the test duration,
then 10% of users until the end of the test duration.
"""
use_common_options = True
def tick(self):
run_time = self.get_run_time()
total_run_time = self.runner.environment.parsed_options.run_time
period_duration = round(total_run_time / 10)
spike_run_time_start = period_duration * 4
spike_run_time_end = period_duration * 6
if run_time < spike_run_time_start:
user_count = round(self.runner.environment.parsed_options.num_users / 10)
return user_count, self.runner.environment.parsed_options.spawn_rate
elif run_time < spike_run_time_end:
return self.runner.environment.parsed_options.num_users, self.runner.environment.parsed_options.spawn_rate
elif run_time < total_run_time:
user_count = round(self.runner.environment.parsed_options.num_users / 10)
return user_count, self.runner.environment.parsed_options.spawn_rate
return None
import math
from locust import LoadTestShape
class StepLoadShape(LoadTestShape):
"""
This load shape determines the number of steps by using the total number of users divided by the spawn rate.
Duration of each step is calculated by dividing the total run time by the number of steps equally.
"""
use_common_options = True
def tick(self):
run_time = self.get_run_time()
total_run_time = self.runner.environment.parsed_options.run_time
if run_time < total_run_time:
step = self.runner.environment.parsed_options.spawn_rate
users = self.runner.environment.parsed_options.num_users
no_of_steps = round(users / step)
step_time = total_run_time / no_of_steps
user_count = min(step * math.ceil(run_time / step_time), users)
return user_count, step
return None
+25
-5

@@ -1,2 +0,1 @@

import logging
import os

@@ -37,5 +36,3 @@ import shlex

logger = logging.getLogger(__name__)
@click.group(

@@ -112,2 +109,8 @@ help="Tool for flexible blockchain infrastructure benchmarking.",

)
@click.option(
"-s",
"--shape",
default=None,
help="Shape of load pattern",
)
@click.option("-H", "--host", default=MASTER_HOST, help="Host to run on", show_default=True)

@@ -193,2 +196,3 @@ @click.option("-P", "--port", default=MASTER_PORT, help="Port to run on", show_default=True)

profile_dir: Path | None,
shape: str | None,
host: str,

@@ -254,3 +258,3 @@ port: int,

if test_by_directory:
from locust.argument_parser import find_locustfiles
from locust.argument_parser import parse_locustfile_paths
from locust.util.load_locustfile import load_locustfile

@@ -260,3 +264,3 @@

test_data_types = set()
for locustfile in find_locustfiles([profile_path.__str__()], True):
for locustfile in parse_locustfile_paths([profile_path.__str__()]):
_, _user_classes, _ = load_locustfile(locustfile)

@@ -294,2 +298,9 @@ for key, value in _user_classes.items():

if shape is not None:
shapes_dir = get_base_path(__file__) / "shapes"
shape_path = get_profile_path(shapes_dir, shape)
click.echo(f"Using load shape: {shape}")
else:
shape_path = None
results_dir = Path(results_dir).resolve()

@@ -340,2 +351,3 @@ results_path = ensure_results_dir(profile=profile, parent_dir=results_dir, run_id=run_id)

exclude_tags=custom_exclude_tags,
shape_path=shape_path,
timescale=timescale,

@@ -491,2 +503,10 @@ pg_host=pg_host,

@_list.command(
help="Lists all available load shapes.",
)
def shapes() -> None:
for shape in get_profiles(get_base_path(__file__) / "shapes"):
click.echo(shape)
@_list.command(
help="Lists all available methods.",

@@ -493,0 +513,0 @@ )

+1
-1

@@ -10,3 +10,3 @@ """

class AvalancheArchiveProfile(EvmUser):
class ArbitrumArchiveProfile(EvmUser):
wait_time = constant_pacing(1)

@@ -13,0 +13,0 @@ rpc_calls = {

@@ -32,3 +32,2 @@ """

EvmUser.debug_trace_block_by_hash: 3,
EvmUser.net_listening: 2,
EvmUser.net_version: 2,

@@ -35,0 +34,0 @@ EvmUser.eth_syncing: 1,

@@ -26,2 +26,4 @@ """

EvmUser.debug_get_raw_receipts: 1,
EvmUser.debug_storage_range_at: 1,
EvmUser.debug_trace_block: 1,
}

@@ -28,0 +30,0 @@

@@ -156,4 +156,2 @@ import logging

data_dict = data
logger.debug(f"data_dict type {type(data_dict)}")
logger.debug("Data: %s", data_dict)

@@ -160,0 +158,0 @@ slot = data_dict["block_number"]

@@ -367,2 +367,29 @@ import logging

},
534352: {
"name": "scroll-mainnet",
"start_block": 1,
"contract_addresses": [
"0xf55BEC9cafDbE8730f096Aa55dad6D22d44099Df",
"0x06eFdBFf2a14a7c8E15944D1F4A48F9F95F663A4",
"0xf610A9dfB7C89644979b4A0f27063E9e7d7Cda32",
"0x3C1BCa5a656e69edCD0D4E36BEbb3FcDAcA60Cf1",
"0xcA77eB3fEFe3725Dc33bccB54eDEFc3D9f764f97",
"0x434cdA25E8a2CA5D9c1C449a8Cb6bCbF719233E8",
"0x01f0a31698C4d065659b9bdC21B3610292a1c506",
"0x79379C0E09a41d7978f883a56246290eE9a8c4d3",
"0x53878B874283351D26d206FA512aEcE1Bef6C0dD",
"0x1b896893dfc86bb67Cf57767298b9073D2c1bA2c",
],
},
534351: {
"name": "scroll-sepolia-testnet",
"start_block": 1,
"contract_addresses": [
"0x5300000000000000000000000000000000000004",
"0xD9692f1748aFEe00FACE2da35242417dd05a8615",
"0x231d45b53C905c3d6201318156BDC725c9c3B9B1",
"0x6D5871D41F9c38BBa419D28b001260fC9c7071Ba",
"0x186C0C26c45A8DA1Da34339ee513624a9609156d",
],
},
# TODO: Move StarkNet out of EVMNetwork

@@ -369,0 +396,0 @@ 23448594291968334: {

@@ -61,10 +61,5 @@ import logging

def check_http_error(self, response: ResponseContextManager) -> None:
if response.request is not None:
self.logger.debug(f"Request: {response.request.method} {response.request.url_split}")
if response.request.body is not None:
self.logger.debug(f"{response.request.body}")
"""Check the response for errors."""
if response.status_code != 200:
self.logger.info(f"Request failed with {response.status_code} code")
self.logger.error(f"Request failed with {response.status_code} code")
self.logger.debug(

@@ -71,0 +66,0 @@ f"Request to {response.url} failed with HTTP Error {response.status_code} code: {response.text}"

@@ -0,1 +1,2 @@

import logging
import random

@@ -97,3 +98,3 @@ import typing as t

params: list[t.Any] | dict | None = None,
name: str = "",
name: str | None = None,
path: str = "",

@@ -107,4 +108,3 @@ ) -> None:

rpc_call = RpcCall(method, params)
name = method
else:
if name is None:
name = rpc_call.method

@@ -117,2 +117,6 @@

self.check_json_rpc_response(response, name=name)
if logging.getLogger("locust").level == logging.DEBUG:
self.logger.debug(f"jsonrpc: {rpc_call.method} - params: {rpc_call.params}, response: {response.text}")
else:
self.logger.info(f"jsonrpc: {rpc_call.method} - params: {rpc_call.params}")

@@ -119,0 +123,0 @@ def make_batch_rpc_call(self, rpc_calls: list[RpcCall], name: str = "", path: str = "") -> None:

@@ -113,3 +113,6 @@ import logging

def method_to_task_function(cls, method: str) -> t.Callable:
return getattr(cls, f"{method}_task")
if not method.endswith("task"):
return getattr(cls, f"{method}_task")
else:
return getattr(cls, method)

@@ -336,3 +339,2 @@ def eth_v1_beacon_states_head_fork_task(self):

def run_task(self) -> None:
self.logger.info(f"Running task {self.environment.parsed_options.method}")
self.method_to_task_function(self.environment.parsed_options.method)(self)

@@ -339,0 +341,0 @@

@@ -325,3 +325,26 @@ import re

def debug_trace_block(self) -> RpcCall:
return RpcCall(method="debug_traceBlock", params=self._block_params_factory())
return RpcCall(
method="debug_traceBlock",
params=[
"0xf9036cf90224a071186c550133ded9590e8ab51136f315dc258ad601bbac062aacf506bbf2edffa01dcc4de8dec75d7aab85"
"b567b6ccd41ad312451b948a7413f0a142fd40d4934794a4b000000000000000000073657175656e636572a0686a9910da075d"
"d39e8a68979d3027a36403cd09a7c435bc22a2919c85b2852ca0a2ba0585dbe8234916933d1763c7df54c93cb28045470a1392"
"b4105351198f97a0d9da07a58821dd91f1ab783e20ab822c09faa19baa8c8ee4365596856d559e10b901000000000000000000"
"000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000"
"000020000000000000000000000000000000000000000000000000400000000000000000000000000000000080100000000000"
"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002"
"000000000000000000000000000000000000000000000000000000000000200000002000000000000000000000000000000000"
"0000200000000000000010000010000000000000000800000000000000000000000000000000000000000000018406b98aab87"
"04000000000000830babdb8464b7f075a0751711edf20569a455e8a526f9c4dec4768c135fb345a964a257bc72bbe9a3e0a000"
"00000000014a6e00000000010e8119000000000000000a00000000000000008800000000000f00198405f5e100f90141b88c6a"
"f88982a4b1b8846bf6a42d00000000000000000000000000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000010e8119000000000000000000000000000000000000000000000000000000"
"0006b98aab0000000000000000000000000000000000000000000000000000000000000001b8b102f8ae82a4b1088084080bef"
"c0830ec8bc94ff970a61a04b1ca14834a43f5de4533ebddb5cc880b844095ea7b300000000000000000000000053bf833a5d6c"
"4dda888f69c22c88c9f356a416140000000000000000000000000000000000000000000000000000000000c5bcdfc001a0a2a8"
"c4eb9756429430d1b9c2ed32ec76c075211cac98a4c142014ec15aef5f09a02c3d9d670f4c8ed9eca5b035b4ea366f0753e8a0"
"680b2b84dd5444c1cf0d06f6c0",
{"tracer": "callTracer", "timeout": "10s"},
],
)

@@ -340,9 +363,12 @@ def debug_trace_block_by_hash(self) -> RpcCall:

def debug_storage_range_at(self) -> RpcCall:
block = self.test_data.get_random_block(self.rng.get_rng())
contract = block.txs[0]["to"]
return RpcCall(
method="debug_storageRangeAt",
params=[
self.test_data.get_random_block_hash(self.rng.get_rng()),
self.test_data.get_random_account(self.rng.get_rng()),
hex(self.rng.get_rng().random.randint(0, 20)),
hex(self.rng.get_rng().random.randint(0, 20)),
block.block_hash,
0,
contract,
"0x00000000000000000000000000000000",
self.rng.get_rng().random.randint(1, 20),
],

@@ -349,0 +375,0 @@ )

@@ -33,6 +33,6 @@ import subprocess

"""Get list of profiles in given directory."""
from locust.argument_parser import find_locustfiles
from locust.argument_parser import parse_locustfile_paths
result = []
for locustfile in find_locustfiles([profile_dir.__str__()], True):
for locustfile in parse_locustfile_paths([profile_dir.__str__()]):
locustfile_path = Path(locustfile).relative_to(profile_dir)

@@ -125,2 +125,3 @@ if locustfile_path.parent.__str__() != ".":

target: str
shape_path: Path | None = None
headless: bool = False

@@ -141,4 +142,5 @@ timescale: bool = False

"""Generate master command."""
profile_args = f"{self.profile_path},{self.shape_path}" if self.shape_path else self.profile_path
command = (
f"locust -f {self.profile_path} --master "
f"locust -f {profile_args} --master "
f"--master-bind-host {self.host} --master-bind-port {self.port} "

@@ -160,4 +162,5 @@ f"--web-host {self.host} "

"""Generate worker command."""
profile_args = f"{self.profile_path},{self.shape_path}" if self.shape_path else self.profile_path
command = (
f"locust -f {self.profile_path} --worker --master-host {self.host} --master-port {self.port} "
f"locust -f {profile_args} --worker --master-host {self.host} --master-port {self.port} "
f"--logfile {self.results_path}/worker_{worker_id}.log --loglevel {self.log_level} --stop-timeout 30"

@@ -164,0 +167,0 @@ )

@@ -52,3 +52,2 @@ import logging

else:
logger.debug(f"Response: {self.content}")
return data

@@ -55,0 +54,0 @@

Metadata-Version: 2.1
Name: chainbench
Version: 0.8.0
Version: 0.8.2
Summary:

@@ -15,9 +15,9 @@ Author: Egor Molodik

Requires-Dist: click (>=8.1.6,<9.0.0)
Requires-Dist: locust (>=2.24.1,<3.0.0)
Requires-Dist: locust-plugins[dashboards] (>=4.4.2,<5.0.0)
Requires-Dist: orjson (>=3.10.6,<4.0.0)
Requires-Dist: solders (>=0.21.0,<0.22.0)
Requires-Dist: tenacity (>=8.2.2,<9.0.0)
Requires-Dist: locust (>=2.32.0,<3.0.0)
Requires-Dist: locust-plugins[dashboards] (>=4.5.3,<5.0.0)
Requires-Dist: orjson (>=3.10.10,<4.0.0)
Requires-Dist: solders (>=0.22.0,<0.23.0)
Requires-Dist: tenacity (>=9.0.0,<10.0.0)
Requires-Dist: websocket-client (>=1.8.0,<2.0.0)
Requires-Dist: wsaccel (>=0.6.6,<0.7.0)
Requires-Dist: wsaccel (>=0.6.7,<0.7.0)
Description-Content-Type: text/markdown

@@ -139,3 +139,5 @@

- `-p, --profile`: Specifies the profile to use for the benchmark. Available profiles can be found in the profile directory. Sample usage `-p bsc.general`
- `-s, --shape`: Specifies the shape of the load pattern. List available shapes with `chainbench list shapes`.
- `-u, --users`: Sets the number of simulated users to use for the benchmark.
- `-r, --spawn-rate`: Sets the spawn rate of users per second.
- `-w, --workers`: Sets the number of worker threads to use for the benchmark.

@@ -183,2 +185,11 @@ - `-t, --test-time`: Sets the duration of the test to run.

## Load Pattern Shapes
Load pattern shapes are used to define how the load will be distributed over time. You may specify the shape of the load pattern using the `-s` or `--shape` flag.
This is an optional flag and if not specified, the default shape will be used. The default shape is `ramp-up` which means the load will increase linearly over time at
the spawn-rate until the specified number of users is reached, after that it will maintain the number of users until test duration is over.
Other available shapes are:
- `step` - The load will increase in steps. `--spawn-rate` flag is required to specify the step size. The number of steps will be calculated based on `--users` divided by `--spawn-rate`. The duration of each step will be calculated based on `--test-time` divided by the number of steps.
- `spike` - The load will run in a spike pattern. The load will ramp up to 10% of the total users for 40% of the test duration and then spike to 100% of the total users as specified by `--users` for 20% of test duration and then reduce back to 10% of total users until the test duration is over.
### Test Data Size

@@ -185,0 +196,0 @@ You may specify the test data size using the `--size` flag. This will determine how much data is used in the test.

[tool.poetry]
name = "chainbench"
version = "0.8.0"
version = "0.8.2"
description = ""

@@ -15,11 +15,11 @@ authors = [

python = "^3.10"
locust = "^2.24.1"
locust = "^2.32.0"
click = "^8.1.6"
locust-plugins = {extras = ["dashboards"], version = "^4.4.2"}
tenacity = "^8.2.2"
locust-plugins = {extras = ["dashboards"], version = "^4.5.3"}
tenacity = "^9.0.0"
base58 = "^2.1.1"
solders = "^0.21.0"
solders = "^0.22.0"
websocket-client = "^1.8.0"
orjson = "^3.10.6"
wsaccel = "^0.6.6"
orjson = "^3.10.10"
wsaccel = "^0.6.7"

@@ -26,0 +26,0 @@ [tool.poetry.group.dev.dependencies]

@@ -115,3 +115,5 @@ <img width="1200" alt="Labs" src="https://user-images.githubusercontent.com/99700157/213291931-5a822628-5b8a-4768-980d-65f324985d32.png">

- `-p, --profile`: Specifies the profile to use for the benchmark. Available profiles can be found in the profile directory. Sample usage `-p bsc.general`
- `-s, --shape`: Specifies the shape of the load pattern. List available shapes with `chainbench list shapes`.
- `-u, --users`: Sets the number of simulated users to use for the benchmark.
- `-r, --spawn-rate`: Sets the spawn rate of users per second.
- `-w, --workers`: Sets the number of worker threads to use for the benchmark.

@@ -159,2 +161,11 @@ - `-t, --test-time`: Sets the duration of the test to run.

## Load Pattern Shapes
Load pattern shapes are used to define how the load will be distributed over time. You may specify the shape of the load pattern using the `-s` or `--shape` flag.
This is an optional flag and if not specified, the default shape will be used. The default shape is `ramp-up` which means the load will increase linearly over time at
the spawn-rate until the specified number of users is reached, after that it will maintain the number of users until test duration is over.
Other available shapes are:
- `step` - The load will increase in steps. `--spawn-rate` flag is required to specify the step size. The number of steps will be calculated based on `--users` divided by `--spawn-rate`. The duration of each step will be calculated based on `--test-time` divided by the number of steps.
- `spike` - The load will run in a spike pattern. The load will ramp up to 10% of the total users for 40% of the test duration and then spike to 100% of the total users as specified by `--users` for 20% of test duration and then reduce back to 10% of total users until the test duration is over.
### Test Data Size

@@ -161,0 +172,0 @@ You may specify the test data size using the `--size` flag. This will determine how much data is used in the test.