Skip to content

Commit

Permalink
[spellcheck] Part 3: Spell check directories debug_scripts and docs (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
shreyan-gupta authored Jan 24, 2025
1 parent 5b32984 commit e268099
Show file tree
Hide file tree
Showing 57 changed files with 459 additions and 414 deletions.
1 change: 1 addition & 0 deletions cspell.json
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@
"deallocate",
"deallocating",
"deallocation",
"dedup",
"Demultiplexer",
"demultiplexing",
"Demux",
Expand Down
8 changes: 4 additions & 4 deletions debug_scripts/READEME.md
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
# Debug Scripts

## Content

* request_chain_info.py

This script can be used to request blockchain info
* send_validator_logs.py


This script can be used to send Validator logs to a Pagoda S3 bucket when issues are encountered. The pagoda team can use the logs to help the validators troubleshoot issues.


## Instruction to RUN

```
Expand All @@ -21,7 +21,9 @@
```

## Instruction to run test

Add nearcore/debug_scripts to your PYTHONPATH

```
export PYTHONPATH="<absolute path>/nearcore/debug_scripts:$PYTHONPATH"
```
Expand All @@ -32,5 +34,3 @@ python3 -m pipenv sync
python3 -m pipenv shell
python3 -m unittest tests.send_validator_logs_test
```


92 changes: 53 additions & 39 deletions debug_scripts/estimate_epoch_start_time.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,21 +14,22 @@ def get_block(url, block_hash):
"method": "block",
}

payload["params"] = {
payload["params"] = ({
"block_id": block_hash
} if block_hash is not None else {
"finality": "final"
}
})

response = requests.post(url, json=payload)
return response.json()['result']['header']
return response.json()["result"]["header"]


def ns_to_seconds(ns):
return ns / 1e9


def format_time(seconds):
# cspell:words gmtime
return time.strftime("%H hours, %M minutes", time.gmtime(seconds))


Expand All @@ -45,14 +46,14 @@ def get_exponential_weighted_epoch_lengths(url,
block_data = get_block(url, current_hash)

# Get the timestamp of this block (start of current epoch)
current_timestamp = int(block_data['timestamp'])
current_timestamp = int(block_data["timestamp"])

# Get the next epoch hash (last block hash of previous epoch.)
previous_hash = block_data['next_epoch_id']
previous_hash = block_data["next_epoch_id"]

# Fetch the block data for start of previous epoch
previous_block_data = get_block(url, previous_hash)
previous_timestamp = int(previous_block_data['timestamp'])
previous_timestamp = int(previous_block_data["timestamp"])

# Calculate the length of the epoch in nanoseconds
epoch_length = current_timestamp - previous_timestamp
Expand Down Expand Up @@ -106,14 +107,14 @@ def predict_future_epochs(starting_epoch_timestamp, avg_epoch_length,
target_timezone)

# Format date
future_date = future_datetime.strftime('%Y-%m-%d %H:%M:%S %Z%z %A')
future_date = future_datetime.strftime("%Y-%m-%d %H:%M:%S %Z%z %A")
print(f"Predicted start of epoch {i}: {future_date}")

return future_epochs


def find_epoch_for_timestamp(future_epochs, voting_timestamp):
for (epoch_number, epoch_timestamp) in enumerate(future_epochs):
for epoch_number, epoch_timestamp in enumerate(future_epochs):
if voting_timestamp < epoch_timestamp:
return epoch_number
return len(future_epochs)
Expand All @@ -129,7 +130,7 @@ def find_best_voting_hour(voting_date_str, future_epochs):
for hour in range(24):
# Construct datetime for each hour of the voting date
voting_datetime = datetime.strptime(
f"{voting_date_str} {hour:02d}:00:00", '%Y-%m-%d %H:%M:%S')
f"{voting_date_str} {hour:02d}:00:00", "%Y-%m-%d %H:%M:%S")
voting_datetime = pytz.utc.localize(voting_datetime)
voting_timestamp = voting_datetime.timestamp()

Expand Down Expand Up @@ -159,7 +160,7 @@ def find_best_voting_hour(voting_date_str, future_epochs):
print(
f"\nVoting hours on {voting_date_str} UTC that result in upgrade during working hours (UTC {WORKING_HOURS_START}:00-{WORKING_HOURS_END}:00):"
)
for (hour, epoch) in valid_hours:
for hour, epoch in valid_hours:
print(f"- {hour:02d}:00, Upgrade Epoch: {epoch}")
else:
print(
Expand All @@ -169,7 +170,7 @@ def find_best_voting_hour(voting_date_str, future_epochs):

def valid_voting_datetime(s):
try:
dt = datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
dt = datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
return dt
except ValueError:
raise argparse.ArgumentTypeError(
Expand Down Expand Up @@ -199,7 +200,7 @@ def find_protocol_upgrade_time(voting_date, future_epochs, target_timezone):
protocol_upgrade_datetime = datetime.fromtimestamp(
protocol_upgrade_timestamp, tz=target_timezone)
protocol_upgrade_formatted = protocol_upgrade_datetime.strftime(
'%Y-%m-%d %H:%M:%S %Z%z %A')
"%Y-%m-%d %H:%M:%S %Z%z %A")
print(f"\nVoting date falls into epoch {epoch_T}.")
print(
f"Protocol upgrade will happen at the start of epoch {protocol_upgrade_epoch_number}: {protocol_upgrade_formatted}"
Expand All @@ -209,19 +210,24 @@ def find_protocol_upgrade_time(voting_date, future_epochs, target_timezone):
# Main function to run the process
def main(args):
latest_block = get_block(args.url, None)
next_epoch_id = latest_block['next_epoch_id']
next_epoch_id = latest_block["next_epoch_id"]
current_epoch_first_block = get_block(args.url, next_epoch_id)
current_timestamp = int(current_epoch_first_block['timestamp']
current_timestamp = int(current_epoch_first_block["timestamp"]
) # Current epoch start timestamp in nanoseconds

# Get epoch lengths and the exponential weighted average
epoch_lengths, exponential_weighted_average_epoch_length = get_exponential_weighted_epoch_lengths(
args.url, next_epoch_id, args.num_past_epochs, args.decay_rate)
epoch_lengths, exponential_weighted_average_epoch_length = (
get_exponential_weighted_epoch_lengths(args.url, next_epoch_id,
args.num_past_epochs,
args.decay_rate))

# Predict future epoch start dates
future_epochs = predict_future_epochs(
current_timestamp, exponential_weighted_average_epoch_length,
args.num_future_epochs, args.timezone)
current_timestamp,
exponential_weighted_average_epoch_length,
args.num_future_epochs,
args.timezone,
)

if args.voting_date:
find_protocol_upgrade_time(args.voting_date, future_epochs,
Expand All @@ -234,10 +240,10 @@ def main(args):
class SetURLFromChainID(argparse.Action):

def __call__(self, parser, namespace, values, option_string=None):
if values == 'mainnet':
setattr(namespace, 'url', 'https://archival-rpc.mainnet.near.org')
elif values == 'testnet':
setattr(namespace, 'url', 'https://archival-rpc.testnet.near.org')
if values == "mainnet":
setattr(namespace, "url", "https://archival-rpc.mainnet.near.org")
elif values == "testnet":
setattr(namespace, "url", "https://archival-rpc.testnet.near.org")


# Set up command-line argument parsing
Expand All @@ -249,39 +255,47 @@ def __call__(self, parser, namespace, values, option_string=None):
group.add_argument("--url", help="The RPC URL to query.")
group.add_argument(
"--chain_id",
choices=['mainnet', 'testnet'],
choices=["mainnet", "testnet"],
action=SetURLFromChainID,
help=
"The chain ID (either 'mainnet' or 'testnet'). Sets the corresponding URL."
"The chain ID (either 'mainnet' or 'testnet'). Sets the corresponding URL.",
)

parser.add_argument("--num_past_epochs",
type=int,
default=4,
help="Number of past epochs to analyze.")
parser.add_argument("--decay_rate",
type=float,
default=0.1,
help="Decay rate for exponential weighting.")
parser.add_argument("--num_future_epochs",
type=int,
default=10,
help="Number of future epochs to predict.")
parser.add_argument(
"--num_past_epochs",
type=int,
default=4,
help="Number of past epochs to analyze.",
)
parser.add_argument(
"--decay_rate",
type=float,
default=0.1,
help="Decay rate for exponential weighting.",
)
parser.add_argument(
"--num_future_epochs",
type=int,
default=10,
help="Number of future epochs to predict.",
)
parser.add_argument(
"--timezone",
type=valid_timezone,
default="UTC",
help="Time zone to display times in (e.g., 'America/New_York').")
help="Time zone to display times in (e.g., 'America/New_York').",
)
# Voting date arguments
voting_group = parser.add_mutually_exclusive_group()
voting_group.add_argument(
"--voting_date",
type=valid_voting_datetime,
help="Voting date in 'YYYY-MM-DD HH:MM:SS' format.")
help="Voting date in 'YYYY-MM-DD HH:MM:SS' format.",
)
voting_group.add_argument(
"--voting_date_day",
help=
"Voting date (day) in 'YYYY-MM-DD' format to find voting hours resulting in upgrade during working hours."
"Voting date (day) in 'YYYY-MM-DD' format to find voting hours resulting in upgrade during working hours.",
)

args = parser.parse_args()
Expand Down
62 changes: 31 additions & 31 deletions debug_scripts/request_chain_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,64 +4,64 @@
import json
import argparse

if __name__ == '__main__':
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='This is a script to request for blockchain info')
parser.add_argument('--chain',
choices=['mainnet', 'testnet', 'betanet'],
description="This is a script to request for blockchain info")
parser.add_argument("--chain",
choices=["mainnet", "testnet", "betanet"],
required=True)
parser.add_argument('--archive',
action='store_true',
help='whether to request from archival nodes')
parser.add_argument('--method',
choices=['block', 'chunk'],
parser.add_argument("--archive",
action="store_true",
help="whether to request from archival nodes")
parser.add_argument("--method",
choices=["block", "chunk"],
required=True,
help='type of request')
parser.add_argument('--block_id',
help="type of request")
parser.add_argument("--block_id",
type=str,
help='block id, can be either block height or hash')
parser.add_argument('--shard_id', type=int, help='shard id for the chunk')
parser.add_argument('--chunk_id', type=str, help='chunk hash')
parser.add_argument('--result_key',
help="block id, can be either block height or hash")
parser.add_argument("--shard_id", type=int, help="shard id for the chunk")
parser.add_argument("--chunk_id", type=str, help="chunk hash")
parser.add_argument("--result_key",
type=str,
nargs='*',
help='filter results by these keys')
nargs="*",
help="filter results by these keys")
args = parser.parse_args()

url = 'https://{}.{}.near.org'.format(
'archival-rpc' if args.archive else 'rpc', args.chain)
url = "https://{}.{}.near.org".format(
"archival-rpc" if args.archive else "rpc", args.chain)

def get_block_id(block_id):
if block_id.isnumeric():
return int(block_id)
return block_id

if args.method == 'block':
if args.method == "block":
if args.block_id is not None:
params = {'block_id': get_block_id(args.block_id)}
params = {"block_id": get_block_id(args.block_id)}
else:
params = {'finality': 'final'}
elif args.method == 'chunk':
params = {"finality": "final"}
elif args.method == "chunk":
if args.shard_id is not None:
assert args.block_id is not None
params = {
'shard_id': args.shard_id,
'block_id': get_block_id(args.block_id)
"shard_id": args.shard_id,
"block_id": get_block_id(args.block_id),
}
elif args.chunk_id is not None:
params = {'chunk_id': args.chunk_id}
params = {"chunk_id": args.chunk_id}
else:
assert False

payload = {
'jsonrpc': '2.0',
'id': 'dontcare',
'method': args.method,
'params': params
"jsonrpc": "2.0",
"id": "dontcare",
"method": args.method,
"params": params,
}

response = requests.post(url, json=payload)
result = response.json()['result']
result = response.json()["result"]
if args.result_key is not None:
for key in args.result_key:
result = result[key]
Expand Down
29 changes: 17 additions & 12 deletions debug_scripts/send_validator_logs.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,35 +50,40 @@ def upload_to_s3(file_lines: list, account: str) -> str:
for line in file_lines:
file_string.write(line)

# cspell:words getsizeof
file_obj = io.BytesIO(file_string.getvalue().encode())
gzipped_content = gzip.compress(file_obj.read())
print(
f"uploading compressed file. File size is: {sys.getsizeof(gzipped_content)} Bytes"
)

s3 = boto3.resource('s3')
s3 = boto3.resource("s3")
s3.Bucket(BUCKET).upload_fileobj(io.BytesIO(gzipped_content),
f"logs/{s3_destination}")
s3_link = f"https://{BUCKET}.s3.amazonaws.com/logs/{urllib.parse.quote(s3_destination)}"
s3_link = (
f"https://{BUCKET}.s3.amazonaws.com/logs/{urllib.parse.quote(s3_destination)}"
)
print(f"Log File was uploaded to S3: {s3_link}")
file_obj.close()
return s3_link


if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Send logs to near.')
parser.add_argument('--log_file',
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Send logs to near.")
parser.add_argument("--log_file",
type=str,
help='Absolute path to log file.',
help="Absolute path to log file.",
required=True)
parser.add_argument('--account',
parser.add_argument("--account",
type=str,
help='Near account id.',
required=True)
parser.add_argument('--last_seconds',
type=int,
help='Filter logs for last x seconds.',
help="Near account id.",
required=True)
parser.add_argument(
"--last_seconds",
type=int,
help="Filter logs for last x seconds.",
required=True,
)
args = parser.parse_args()

log_file_path = args.log_file
Expand Down
Loading

0 comments on commit e268099

Please sign in to comment.