diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 3e0491d64f..9193748bba 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -36,4 +36,7 @@ export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL" # `std: # "found possibly newer version of crate `std` which `xyz` depends on rm -rf target/bpfel-unknown-unknown + if [[ $BUILDKITE_LABEL = "stable-perf" ]]; then + rm -rf target/release + fi ) diff --git a/.gitignore b/.gitignore index 55a67b207b..eb9a4cb7ff 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,7 @@ *.log log-*.txt log-*/ +!log-analyzer # intellij files /.idea/ diff --git a/.mergify.yml b/.mergify.yml index 8b4b89e0bb..70c4aad68f 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -50,14 +50,6 @@ pull_request_rules: label: add: - automerge - - name: v1.3 backport - conditions: - - label=v1.3 - actions: - backport: - ignore_conflicts: true - branches: - - v1.3 - name: v1.4 backport conditions: - label=v1.4 @@ -82,3 +74,11 @@ pull_request_rules: ignore_conflicts: true branches: - v1.6 + - name: v1.7 backport + conditions: + - label=v1.7 + actions: + backport: + ignore_conflicts: true + branches: + - v1.7 diff --git a/Cargo.lock b/Cargo.lock index 1e44782f6b..7acac8a6c5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "Inflector" version = "0.11.4" @@ -62,9 +64,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.40" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b2cd92db5cbd74e8e5028f7e27dd7aa3090e89e4f2a197cc7c8dfb69c7063b" +checksum = "15af2628f6890fe2609a3b91bef4c83450512802e59489f9c1cb1fa5df064a61" [[package]] name = "arc-swap" @@ -72,6 +74,12 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dabe5a181f83789739c194cbe5a897dde195078fac08568d09221fd6137a7ba8" +[[package]] +name = "arc-swap" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e906254e445520903e7fc9da4f709886c84ae4bc4ddaf0e093188d66df4dc820" + [[package]] name = "arrayref" version = "0.3.6" @@ -102,9 +110,9 @@ checksum = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e" [[package]] name = "assert_cmd" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f57fec1ac7e4de72dcc69811795f1a7172ed06012f80a5d1ee651b62484f588" +checksum = "a88b6bd5df287567ffdf4ddf4d33060048e1068308e5f62d81c6f9824a045a48" dependencies = [ "bstr", "doc-comment", @@ -122,9 +130,9 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-stream" -version = "0.2.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22068c0c19514942eefcfd4daf8976ef1aad84e61539f95cd200c35202f80af5" +checksum = "171374e7e3b2504e0e5236e3b59260560f9fe94bfe9ac39ba5e4e929c5590625" dependencies = [ "async-stream-impl", "futures-core", @@ -132,13 +140,13 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.2.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f9db3b38af870bf7e5cc649167533b493928e50744e2c30ae350230b414670" +checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -149,7 +157,7 @@ checksum = "0b98e84bbb4cbcdd97da190ba0c58a1bb0de2c1fdf67d159e192ed766aeca722" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -181,7 +189,7 @@ checksum = "da47c46001293a2c4b744d731958be22cff408a2ab76e2279328f9713b1267b4" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -194,7 +202,7 @@ dependencies = [ "derive_utils", "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -211,15 +219,16 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backoff" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721c249ab59cbc483ad4294c9ee2671835c1e43e9ffc277e6b4ecfef733cfdc5" +checksum = "9fe17f59a06fe8b87a6fc8bf53bb70b3aba76d7685f432487a68cd5552853625" dependencies = [ "futures-core", + "getrandom 0.2.3", "instant", - "pin-project 0.4.28", - "rand 0.7.3", - "tokio 0.2.25", + "pin-project", + "rand 0.8.4", + "tokio 1.7.1", ] [[package]] @@ -297,13 +306,12 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.55.1" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b13ce559e6433d360c26305643803cb52cfbabbc2b9c47ce04a58493dfb443" +checksum = "fd4865004a46a0aafb2a0a5eb19d3c9fc46ee5f063a6cfc605c69ac9ecf5263d" dependencies = [ "bitflags", "cexpr", - "cfg-if 0.1.10", "clang-sys", "lazy_static 1.4.0", "lazycell", @@ -425,7 +433,7 @@ dependencies = [ "borsh-schema-derive-internal", "proc-macro-crate", "proc-macro2 1.0.27", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -436,7 +444,7 @@ checksum = "d2104c73179359431cc98e016998f2f23bc7a05bc53e79741bcba705f30047bc" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -447,7 +455,7 @@ checksum = "ae29eb8418fcd46f723f8691a2ac06857d31179d33d2f2d91eb13967de97c728" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -470,9 +478,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.6.1" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" +checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" [[package]] name = "bv" @@ -570,9 +578,9 @@ dependencies = [ [[package]] name = "bzip2-sys" -version = "0.1.10+1.0.8" +version = "0.1.11+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17fa3d1ac1ca21c5c4e36a97f3c3eb25084576f6fc47bf0139c1123434216c6c" +checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" dependencies = [ "cc", "libc", @@ -803,9 +811,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8" +checksum = "66c99696f6c9dd7f35d486b9d04d7e6e202aa3e8c40d553f2fdf5e7e0c6a71ef" dependencies = [ "libc", ] @@ -830,7 +838,7 @@ dependencies = [ "clap", "criterion-plot", "csv", - "itertools 0.10.0", + "itertools 0.10.1", "lazy_static 1.4.0", "num-traits", "oorandom", @@ -885,7 +893,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.4", + "crossbeam-utils 0.8.5", ] [[package]] @@ -906,8 +914,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" dependencies = [ "cfg-if 1.0.0", - "crossbeam-epoch 0.9.4", - "crossbeam-utils 0.8.4", + "crossbeam-epoch 0.9.5", + "crossbeam-utils 0.8.5", ] [[package]] @@ -927,12 +935,12 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52fb27eab85b17fbb9f6fd667089e07d6a2eb8743d02639ee7f6a7a7729c9c94" +checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.4", + "crossbeam-utils 0.8.5", "lazy_static 1.4.0", "memoffset 0.6.4", "scopeguard", @@ -962,11 +970,10 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4feb231f0d4d6af81aed15928e58ecf5816aa62a2393e2c82f46973e92a9a278" +checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" dependencies = [ - "autocfg 1.0.1", "cfg-if 1.0.0", "lazy_static 1.4.0", ] @@ -997,6 +1004,16 @@ dependencies = [ "subtle 2.4.0", ] +[[package]] +name = "crypto-mac" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58bcd97a54c7ca5ce2f6eb16f6bede5b0ab5f0055fedc17d2f0b4466e21671ca" +dependencies = [ + "generic-array 0.14.4", + "subtle 2.4.0", +] + [[package]] name = "crypto-mac" version = "0.10.0" @@ -1076,6 +1093,15 @@ dependencies = [ "rayon", ] +[[package]] +name = "derivation-path" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "193388a8c8c75a490b604ff61775e236541b8975e98e5ca1f6ea97d122b7e2db" +dependencies = [ + "failure", +] + [[package]] name = "derivative" version = "2.2.0" @@ -1084,7 +1110,7 @@ checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -1096,7 +1122,7 @@ dependencies = [ "convert_case", "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -1107,7 +1133,7 @@ checksum = "532b4c15dccee12c7044f1fcad956e98410860b22231e44a3b827464797ca7bf" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -1237,18 +1263,44 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "1.0.0-pre.4" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a8a37f4e8b35af971e6db5e3897e7a6344caa3f92f6544f88125a1f5f0035a" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "curve25519-dalek 2.1.2", + "curve25519-dalek 3.1.0", "ed25519", "rand 0.7.3", "serde", - "sha2 0.8.2", + "serde_bytes", + "sha2 0.9.5", "zeroize", ] +[[package]] +name = "ed25519-dalek-bip32" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "057f328f31294b5ab432e6c39642f54afd1531677d6d4ba2905932844cc242f3" +dependencies = [ + "derivation-path", + "ed25519-dalek", + "failure", + "hmac 0.9.0", + "sha2 0.9.5", +] + +[[package]] +name = "educe" +version = "0.4.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86b50932a01e7ec5c06160492ab660fb19b6bb2a7878030dd6cd68d21df9d4d" +dependencies = [ + "enum-ordinalize", + "proc-macro2 1.0.27", + "quote 1.0.9", + "syn 1.0.73", +] + [[package]] name = "either" version = "1.6.1" @@ -1293,7 +1345,20 @@ checksum = "1e94aa31f7c0dc764f57896dc615ddd76fc13b0d5dca7eb6cc5e018a5a09ec06" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", +] + +[[package]] +name = "enum-ordinalize" +version = "3.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b166c9e378360dd5a6666a9604bb4f54ae0cac39023ffbac425e917a2a04fef" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2 1.0.27", + "quote 1.0.9", + "syn 1.0.73", ] [[package]] @@ -1308,9 +1373,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" dependencies = [ "atty", "humantime", @@ -1494,9 +1559,10 @@ version = "0.1.0" dependencies = [ "anyhow", "bincode", - "env_logger 0.8.3", + "env_logger 0.8.4", "evm-rpc", "evm-state", + "futures-util", "hex", "jsonrpc-core", "jsonrpc-core-client", @@ -1597,7 +1663,7 @@ dependencies = [ "fixed-hash", "hex", "impl-rlp", - "itertools 0.10.0", + "itertools 0.10.1", "keccak-hash", "lazy_static 1.4.0", "log 0.4.14", @@ -1607,7 +1673,7 @@ dependencies = [ "quickcheck 0.9.2", "quickcheck_macros 0.9.1", "rand 0.6.1", - "rand 0.8.3", + "rand 0.8.4", "rlp", "rocksdb", "secp256k1", @@ -1625,7 +1691,7 @@ name = "evm-utils" version = "0.1.0" dependencies = [ "bincode", - "env_logger 0.8.3", + "env_logger 0.8.4", "evm-rpc", "evm-state", "hex", @@ -1658,7 +1724,7 @@ checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", "synstructure", ] @@ -1679,11 +1745,10 @@ dependencies = [ [[package]] name = "fd-lock" -version = "1.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15bec795244d49f5ee3024bdc6c3883fb035f7f6601d4a4821c3d5d60784454" +checksum = "0010f02effd88c702318c5dde0463206be67495d0b4d906ba7c0a8f166cc7f06" dependencies = [ - "failure", "libc", "winapi 0.3.9", ] @@ -1702,7 +1767,7 @@ checksum = "1d34cfa13a63ae058bfa601fe9e313bbdb3746427c1459185464ce0fcf62e1e8" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.2.8", + "redox_syscall 0.2.9", "winapi 0.3.9", ] @@ -1713,7 +1778,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", - "rand 0.8.3", + "rand 0.8.4", "rustc-hex", "static_assertions", ] @@ -1832,16 +1897,6 @@ version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0402f765d8a89a26043b889b26ce3c4679d268fa6bb22cd7c6aad98340e179d1" -[[package]] -name = "futures-cpupool" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" -dependencies = [ - "futures 0.1.31", - "num_cpus", -] - [[package]] name = "futures-executor" version = "0.3.15" @@ -1851,6 +1906,7 @@ dependencies = [ "futures-core", "futures-task", "futures-util", + "num_cpus", ] [[package]] @@ -1869,7 +1925,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -1891,6 +1947,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "feb5c238d27e2bf94ffdfd27b2c29e3df4a68c4193bb6427384259e2bf191967" dependencies = [ "autocfg 1.0.1", + "futures 0.1.31", "futures-channel", "futures-core", "futures-io", @@ -1898,7 +1955,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.6", + "pin-project-lite 0.2.7", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -1997,9 +2054,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "globset" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c152169ef1e421390738366d2f796655fec62621dabbd0fd476f905934061e4a" +checksum = "10463d9ff00a2a068db14231982f5132edebad0d7660cd956a1c30292dbcbfbd" dependencies = [ "aho-corasick", "bstr", @@ -2010,11 +2067,11 @@ dependencies = [ [[package]] name = "goauth" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c55b7ac37895bd6e4ca0b357c074248358c95e20cf1cf2b462603121f7b87" +checksum = "d94101e84ede813c04773b0a43396c01b5a3a9376537dbce1125858ae090ae60" dependencies = [ - "arc-swap", + "arc-swap 1.3.0", "futures 0.3.15", "log 0.4.14", "reqwest", @@ -2023,8 +2080,8 @@ dependencies = [ "serde_json", "simpl", "smpl_jwt", - "time 0.2.26", - "tokio 0.2.25", + "time 0.2.27", + "tokio 1.7.1", ] [[package]] @@ -2040,40 +2097,41 @@ dependencies = [ [[package]] name = "h2" -version = "0.1.26" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" +checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" dependencies = [ - "byteorder", - "bytes 0.4.12", + "bytes 0.5.6", "fnv", - "futures 0.1.31", - "http 0.1.21", + "futures-core", + "futures-sink", + "futures-util", + "http", "indexmap", - "log 0.4.14", "slab", - "string", - "tokio-io", + "tokio 0.2.25", + "tokio-util 0.3.1", + "tracing", + "tracing-futures", ] [[package]] name = "h2" -version = "0.2.7" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" +checksum = "825343c4eef0b63f541f8903f395dc5beb362a979b5799a84062527ef1e37726" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "fnv", "futures-core", "futures-sink", "futures-util", - "http 0.2.4", + "http", "indexmap", "slab", - "tokio 0.2.25", - "tokio-util 0.3.1", + "tokio 1.7.1", + "tokio-util 0.6.7", "tracing", - "tracing-futures", ] [[package]] @@ -2117,18 +2175,18 @@ dependencies = [ [[package]] name = "heck" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" dependencies = [ "unicode-segmentation", ] [[package]] name = "hermit-abi" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] @@ -2172,6 +2230,26 @@ dependencies = [ "digest 0.8.1", ] +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", +] + +[[package]] +name = "hmac" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deae6d9dbb35ec2c502d62b8f7b1c000a0822c3b0794ba36b3149c0a1c840dff" +dependencies = [ + "crypto-mac 0.9.1", + "digest 0.9.0", +] + [[package]] name = "hmac" version = "0.10.1" @@ -2193,17 +2271,6 @@ dependencies = [ "hmac 0.7.1", ] -[[package]] -name = "http" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" -dependencies = [ - "bytes 0.4.12", - "fnv", - "itoa", -] - [[package]] name = "http" version = "0.2.4" @@ -2217,24 +2284,23 @@ dependencies = [ [[package]] name = "http-body" -version = "0.1.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" +checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "http 0.1.21", - "tokio-buf", + "bytes 0.5.6", + "http", ] [[package]] name = "http-body" -version = "0.3.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" dependencies = [ - "bytes 0.5.6", - "http 0.2.4", + "bytes 1.0.1", + "http", + "pin-project-lite 0.2.7", ] [[package]] @@ -2249,6 +2315,12 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +[[package]] +name = "httpdate" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" + [[package]] name = "humantime" version = "2.1.0" @@ -2276,85 +2348,78 @@ dependencies = [ [[package]] name = "hyper" -version = "0.12.36" +version = "0.13.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c843caf6296fc1f93444735205af9ed4e109a539005abb2564ae1d6fad34c52" +checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "futures-cpupool", - "h2 0.1.26", - "http 0.1.21", - "http-body 0.1.0", + "bytes 0.5.6", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.2.7", + "http", + "http-body 0.3.1", "httparse", - "iovec", + "httpdate 0.3.2", "itoa", - "log 0.4.14", - "net2", - "rustc_version 0.2.3", - "time 0.1.44", - "tokio 0.1.22", - "tokio-buf", - "tokio-executor", - "tokio-io", - "tokio-reactor", - "tokio-tcp", - "tokio-threadpool", - "tokio-timer", - "want 0.2.0", + "pin-project", + "socket2 0.3.19", + "tokio 0.2.25", + "tower-service", + "tracing", + "want", ] [[package]] name = "hyper" -version = "0.13.10" +version = "0.14.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" +checksum = "07d6baa1b441335f3ce5098ac421fb6547c46dda735ca1bc6d0153c838f9dd83" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "futures-channel", "futures-core", "futures-util", - "h2 0.2.7", - "http 0.2.4", - "http-body 0.3.1", + "h2 0.3.3", + "http", + "http-body 0.4.2", "httparse", - "httpdate", + "httpdate 1.0.1", "itoa", - "pin-project 1.0.7", - "socket2", - "tokio 0.2.25", + "pin-project-lite 0.2.7", + "socket2 0.4.0", + "tokio 1.7.1", "tower-service", "tracing", - "want 0.3.0", + "want", ] [[package]] name = "hyper-rustls" -version = "0.21.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6" +checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" dependencies = [ - "bytes 0.5.6", "futures-util", - "hyper 0.13.10", + "hyper 0.14.9", "log 0.4.14", "rustls", - "tokio 0.2.25", + "tokio 1.7.1", "tokio-rustls", "webpki", ] [[package]] name = "hyper-tls" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 0.5.6", - "hyper 0.13.10", + "bytes 1.0.1", + "hyper 0.14.9", "native-tls", - "tokio 0.2.25", - "tokio-tls 0.3.1", + "tokio 1.7.1", + "tokio-native-tls", ] [[package]] @@ -2470,18 +2535,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" - -[[package]] -name = "itertools" -version = "0.8.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484" -dependencies = [ - "either", -] +checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" [[package]] name = "itertools" @@ -2494,9 +2550,9 @@ dependencies = [ [[package]] name = "itertools" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" +checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" dependencies = [ "either", ] @@ -2559,29 +2615,33 @@ dependencies = [ [[package]] name = "jsonrpc-client-transports" -version = "15.1.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "489b9c612e60c766f751ab40fcb43cbb55a1e10bb44a9b4307ed510ca598cbd7" +checksum = "a2f81014e2706fde057e9dcb1036cf6bbf9418d972c597be5c7158c984656722" dependencies = [ - "failure", - "futures 0.1.31", + "derive_more", + "futures 0.3.15", "jsonrpc-core", "jsonrpc-pubsub", + "jsonrpc-server-utils", "log 0.4.14", + "parity-tokio-ipc", "serde", "serde_json", - "tokio 0.1.22", + "tokio 0.2.25", "url 1.7.2", "websocket", ] [[package]] name = "jsonrpc-core" -version = "15.1.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0745a6379e3edc893c84ec203589790774e4247420033e71a76d3ab4687991fa" +checksum = "d4467ab6dfa369b69e52bd0692e480c4d117410538526a57a304a0f2250fd95e" dependencies = [ - "futures 0.1.31", + "futures 0.3.15", + "futures-executor", + "futures-util", "log 0.4.14", "serde", "serde_derive", @@ -2590,80 +2650,101 @@ dependencies = [ [[package]] name = "jsonrpc-core-client" -version = "15.1.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f764902d7b891344a0acb65625f32f6f7c6db006952143bd650209fbe7d94db" +checksum = "5c366c092d6bccc6e7ab44dd635a0f22ab2f201215339915fb7ff9508404f431" dependencies = [ + "futures 0.3.15", "jsonrpc-client-transports", ] [[package]] name = "jsonrpc-derive" -version = "15.1.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99a847f9ec7bb52149b2786a17c9cb260d6effc6b8eeb8c16b343a487a7563a3" +checksum = "34f6326966ebac440db89eba788f5a0e5ac2614b4b4bfbdc049a971e71040f32" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] name = "jsonrpc-http-server" -version = "15.1.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb5c4513b7b542f42da107942b7b759f27120b5cc894729f88254b28dff44b7" +checksum = "522a047cac0958097ee71d047dd71cb84979fd2fa21c7a68fbe12736bef870a2" dependencies = [ - "hyper 0.12.36", + "futures 0.3.15", + "hyper 0.13.10", "jsonrpc-core", "jsonrpc-server-utils", "log 0.4.14", "net2", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "unicase 2.6.0", ] +[[package]] +name = "jsonrpc-ipc-server" +version = "17.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b1d782052ef17051d12681bcc2fa2e9e1aabf3f634588125493d63ddcca6fe1" +dependencies = [ + "futures 0.3.15", + "jsonrpc-core", + "jsonrpc-server-utils", + "log 0.4.14", + "parity-tokio-ipc", + "parking_lot 0.11.1", + "tower-service", +] + [[package]] name = "jsonrpc-pubsub" -version = "15.1.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "639558e0604013be9787ae52f798506ae42bf4220fe587bdc5625871cc8b9c77" +checksum = "14739e5523a40739882cc34a44ab2dd9356bce5ce102513f5984a9efbe342f3d" dependencies = [ + "futures 0.3.15", "jsonrpc-core", + "lazy_static 1.4.0", "log 0.4.14", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "rand 0.7.3", "serde", ] [[package]] name = "jsonrpc-server-utils" -version = "15.1.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72f1f3990650c033bd8f6bd46deac76d990f9bbfb5f8dc8c4767bf0a00392176" +checksum = "bce68fa279a2822b3619369cd024f8a4f8e5ce485468834f8679a3c7919aae2d" dependencies = [ - "bytes 0.4.12", + "bytes 0.5.6", + "futures 0.3.15", "globset", "jsonrpc-core", "lazy_static 1.4.0", "log 0.4.14", - "tokio 0.1.22", - "tokio-codec", + "tokio 0.2.25", + "tokio-util 0.3.1", "unicase 2.6.0", ] [[package]] name = "jsonrpc-ws-server" -version = "15.1.0" +version = "17.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6596fe75209b73a2a75ebe1dce4e60e03b88a2b25e8807b667597f6315150d22" +checksum = "b1d267a8649ec37e4452dd7b3f48827c9cdf36de3a3539cf73242c222ba2eb50" dependencies = [ + "futures 0.3.15", "jsonrpc-core", "jsonrpc-server-utils", "log 0.4.14", "parity-ws", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "slab", ] @@ -2746,9 +2827,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36" +checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6" [[package]] name = "libloading" @@ -2772,8 +2853,9 @@ dependencies = [ [[package]] name = "librocksdb-sys" -version = "6.15.4" -source = "git+https://github.com/rust-rocksdb/rust-rocksdb?rev=39b877b#39b877b41aac99f6accee814410c478878a79454" +version = "6.17.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5da125e1c0f22c7cae785982115523a0738728498547f415c9054cb17c7e89f9" dependencies = [ "bindgen", "cc", @@ -2909,18 +2991,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" [[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -dependencies = [ - "mime 0.3.16", - "unicase 2.6.0", -] - -[[package]] -name = "miniz_oxide" -version = "0.4.4" +name = "miniz_oxide" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ @@ -2949,9 +3021,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.11" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf80d3e903b34e0bd7282b218398aec54e082c840d9baf8339e0080a0c542956" +checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" dependencies = [ "libc", "log 0.4.14", @@ -3107,6 +3179,17 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "num-bigint" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e0d047c1062aa51e256408c560894e5251f08925980e53cf1aa5bd00eec6512" +dependencies = [ + "autocfg 1.0.1", + "num-integer", + "num-traits", +] + [[package]] name = "num-derive" version = "0.3.3" @@ -3115,7 +3198,7 @@ checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -3166,7 +3249,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -3189,9 +3272,9 @@ checksum = "4eae0151b9dacf24fcc170d9995e511669a082856a91f958a2fe380bfab3fb22" [[package]] name = "once_cell" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" +checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" dependencies = [ "parking_lot 0.11.1", ] @@ -3216,9 +3299,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.34" +version = "0.10.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8" +checksum = "549430950c79ae24e6d02e0b7404534ecf311d94cc9f861e9e4020187d13d885" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -3236,9 +3319,9 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-sys" -version = "0.9.63" +version = "0.9.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98" +checksum = "7a7907e3bfa08bb85105209cdfcb6c63d109f8f6c1ed6ca318fff5c1853fbc1d" dependencies = [ "autocfg 1.0.1", "cc", @@ -3266,7 +3349,7 @@ dependencies = [ "Inflector", "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -3291,7 +3374,23 @@ dependencies = [ "proc-macro-crate", "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", +] + +[[package]] +name = "parity-tokio-ipc" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd7f6c69d7687501b2205fe51ade1d7b8797bb3aa141fe5bf13dd78c0483bc89" +dependencies = [ + "futures 0.3.15", + "libc", + "log 0.4.14", + "mio-named-pipes", + "miow 0.3.7", + "rand 0.7.3", + "tokio 0.2.25", + "winapi 0.3.9", ] [[package]] @@ -3382,7 +3481,7 @@ dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall 0.2.8", + "redox_syscall 0.2.9", "smallvec 1.6.1", "winapi 0.3.9", ] @@ -3430,7 +3529,7 @@ checksum = "0f35583365be5d148e959284f42526841917b7bfa09e2d1a7ad5dde2cf0eaa39" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -3449,6 +3548,15 @@ dependencies = [ "crypto-mac 0.7.0", ] +[[package]] +name = "pbkdf2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" +dependencies = [ + "crypto-mac 0.8.0", +] + [[package]] name = "pbkdf2" version = "0.6.0" @@ -3498,33 +3606,13 @@ dependencies = [ "serde_yaml", ] -[[package]] -name = "pin-project" -version = "0.4.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "918192b5c59119d51e0cd221f4d49dde9112824ba717369e903c97d076083d0f" -dependencies = [ - "pin-project-internal 0.4.28", -] - [[package]] name = "pin-project" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7509cc106041c40a4518d2af7a61530e1eed0e6285296a3d8c5472806ccc4a4" dependencies = [ - "pin-project-internal 1.0.7", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be26700300be6d9d23264c73211d8190e755b6b5ca7a1b28230025511b52a5e" -dependencies = [ - "proc-macro2 1.0.27", - "quote 1.0.9", - "syn 1.0.72", + "pin-project-internal", ] [[package]] @@ -3535,7 +3623,7 @@ checksum = "48c950132583b500556b1efd71d45b319029f2b71518d979fcc208e16b42426f" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -3546,9 +3634,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905" +checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" [[package]] name = "pin-utils" @@ -3665,7 +3753,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", "version_check 0.9.3", ] @@ -3712,37 +3800,46 @@ dependencies = [ [[package]] name = "prost" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212" +checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "prost-derive", ] [[package]] name = "prost-derive" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72" +checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" dependencies = [ "anyhow", - "itertools 0.8.2", + "itertools 0.9.0", "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] name = "prost-types" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1834f67c0697c001304b75be76f67add9c89742eda3a085ad8ee0bb38c3417aa" +checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "prost", ] +[[package]] +name = "qstring" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d464fae65fff2680baf48019211ce37aaec0c78e9264c84a3e484717f965104e" +dependencies = [ + "percent-encoding 2.1.0", +] + [[package]] name = "quickcheck" version = "0.9.2" @@ -3761,9 +3858,9 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ - "env_logger 0.8.3", + "env_logger 0.8.4", "log 0.4.14", - "rand 0.8.3", + "rand 0.8.4", ] [[package]] @@ -3774,7 +3871,7 @@ checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -3785,7 +3882,7 @@ checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -3876,14 +3973,14 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ "libc", - "rand_chacha 0.3.0", - "rand_core 0.6.2", - "rand_hc 0.3.0", + "rand_chacha 0.3.1", + "rand_core 0.6.3", + "rand_hc 0.3.1", ] [[package]] @@ -3908,12 +4005,12 @@ dependencies = [ [[package]] name = "rand_chacha" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.2", + "rand_core 0.6.3", ] [[package]] @@ -3942,9 +4039,9 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ "getrandom 0.2.3", ] @@ -3969,11 +4066,11 @@ dependencies = [ [[package]] name = "rand_hc" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" dependencies = [ - "rand_core 0.6.2", + "rand_core 0.6.3", ] [[package]] @@ -4039,7 +4136,7 @@ checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" dependencies = [ "crossbeam-channel 0.5.1", "crossbeam-deque 0.8.0", - "crossbeam-utils 0.8.4", + "crossbeam-utils 0.8.5", "lazy_static 1.4.0", "num_cpus", ] @@ -4061,9 +4158,9 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_syscall" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc" +checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" dependencies = [ "bitflags", ] @@ -4075,7 +4172,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ "getrandom 0.2.3", - "redox_syscall 0.2.8", + "redox_syscall 0.2.9", ] [[package]] @@ -4102,12 +4199,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" -dependencies = [ - "byteorder", -] +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-syntax" @@ -4126,18 +4220,18 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.10" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" +checksum = "246e9f61b9bb77df069a947682be06e31ac43ea37862e244a69f177694ea6d22" dependencies = [ "base64 0.13.0", - "bytes 0.5.6", + "bytes 1.0.1", "encoding_rs", "futures-core", "futures-util", - "http 0.2.4", - "http-body 0.3.1", - "hyper 0.13.10", + "http", + "http-body 0.4.2", + "hyper 0.14.9", "hyper-rustls", "hyper-tls", "ipnet", @@ -4145,17 +4239,16 @@ dependencies = [ "lazy_static 1.4.0", "log 0.4.14", "mime 0.3.16", - "mime_guess", "native-tls", "percent-encoding 2.1.0", - "pin-project-lite 0.2.6", + "pin-project-lite 0.2.7", "rustls", "serde", "serde_json", "serde_urlencoded", - "tokio 0.2.25", + "tokio 1.7.1", + "tokio-native-tls", "tokio-rustls", - "tokio-tls 0.3.1", "url 2.2.2", "wasm-bindgen", "wasm-bindgen-futures", @@ -4214,13 +4307,14 @@ checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] name = "rocksdb" -version = "0.15.0" -source = "git+https://github.com/rust-rocksdb/rust-rocksdb?rev=39b877b#39b877b41aac99f6accee814410c478878a79454" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c749134fda8bfc90d0de643d59bfc841dcb3ac8a1062e12b6754bd60235c48b3" dependencies = [ "libc", "librocksdb-sys", @@ -4238,9 +4332,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410f7acf3cb3a44527c5d9546bad4bf4e6c460915d5f9f2fc524498bfe8f70ce" +checksum = "dead70b0b5e03e9c814bcb6b01e03e68f7c57a80aa48c72ec92152ab3e818d49" [[package]] name = "rustc-hash" @@ -4280,11 +4374,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.18.1" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "log 0.4.14", "ring", "sct", @@ -4351,7 +4445,7 @@ checksum = "aaaae8f38bb311444cfb7f1979af0bc9240d95795f75f9ceddf6a59b79ceffa0" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -4399,9 +4493,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3670b1d2fdf6084d192bc71ead7aabe6c06aa2ea3fbd9cc3ac111fa5c2b1bd84" +checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" dependencies = [ "bitflags", "core-foundation", @@ -4412,9 +4506,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3676258fd3cfe2c9a0ec99ce3038798d847ce3e4bb17746373eb9f0f1ac16339" +checksum = "7e4effb91b4b8b6fb7732e670b6cee160278ff8e6bf485c7805d9e319d76e284" dependencies = [ "core-foundation-sys", "libc", @@ -4490,7 +4584,7 @@ checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -4547,7 +4641,7 @@ checksum = "d08338d8024b227c62bd68a12c7c9883f5c66780abaef15c550dc56f46ee6515" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -4648,9 +4742,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ "libc", ] @@ -4703,18 +4797,18 @@ checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" [[package]] name = "smpl_jwt" -version = "0.5.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "547e9c1059500ce0fe6cfa325f868b5621214957922be60a49d86e3e844ee9dc" +checksum = "4370044f8b20f944e05c35d77edd3518e6f21fc4de77e593919f287c6a3f428a" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "log 0.4.14", "openssl", "serde", "serde_derive", "serde_json", "simpl", - "time 0.2.26", + "time 0.2.27", ] [[package]] @@ -4735,7 +4829,7 @@ checksum = "1508efa03c362e23817f96cde18abed596a25219a8b2c66e8db33c03543d315b" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -4749,9 +4843,19 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "socket2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" +dependencies = [ + "libc", + "winapi 0.3.9", +] + [[package]] name = "solana-account-decoder" -version = "1.5.19" +version = "1.6.14" dependencies = [ "Inflector", "base64 0.12.3", @@ -4774,23 +4878,47 @@ dependencies = [ [[package]] name = "solana-accounts-bench" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "crossbeam-channel 0.4.4", "log 0.4.14", "rand 0.7.3", "rayon", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-measure", "solana-runtime", "solana-sdk", "solana-version", ] +[[package]] +name = "solana-accounts-cluster-bench" +version = "1.6.14" +dependencies = [ + "clap", + "log 0.4.14", + "rand 0.7.3", + "rayon", + "solana-account-decoder", + "solana-clap-utils", + "solana-client", + "solana-core", + "solana-faucet", + "solana-local-cluster", + "solana-logger 1.6.14", + "solana-measure", + "solana-net-utils", + "solana-runtime", + "solana-sdk", + "solana-transaction-status", + "solana-version", + "spl-token", +] + [[package]] name = "solana-banking-bench" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "crossbeam-channel 0.4.4", @@ -4800,7 +4928,7 @@ dependencies = [ "solana-clap-utils", "solana-core", "solana-ledger", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-measure", "solana-perf", "solana-runtime", @@ -4811,54 +4939,55 @@ dependencies = [ [[package]] name = "solana-banks-client" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "borsh", "borsh-derive", "futures 0.3.15", - "mio 0.7.11", + "mio 0.7.13", "solana-banks-interface", "solana-banks-server", - "solana-program 1.5.19", + "solana-program 1.6.14", "solana-runtime", "solana-sdk", "tarpc", - "tokio 0.3.7", + "tokio 1.7.1", "tokio-serde", ] [[package]] name = "solana-banks-interface" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "mio 0.7.11", + "mio 0.7.13", "serde", "solana-sdk", "tarpc", - "tokio 0.3.7", + "tokio 1.7.1", ] [[package]] name = "solana-banks-server" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "futures 0.3.15", "log 0.4.14", - "mio 0.7.11", + "mio 0.7.13", "solana-banks-interface", "solana-metrics", "solana-runtime", "solana-sdk", "tarpc", - "tokio 0.3.7", + "tokio 1.7.1", "tokio-serde", + "tokio-stream", ] [[package]] name = "solana-bench-exchange" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "itertools 0.9.0", @@ -4876,7 +5005,7 @@ dependencies = [ "solana-faucet", "solana-genesis", "solana-local-cluster", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-metrics", "solana-net-utils", "solana-runtime", @@ -4886,11 +5015,11 @@ dependencies = [ [[package]] name = "solana-bench-streamer" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "solana-clap-utils", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-net-utils", "solana-streamer", "solana-version", @@ -4898,7 +5027,7 @@ dependencies = [ [[package]] name = "solana-bench-tps" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "clap", @@ -4907,14 +5036,13 @@ dependencies = [ "serde_json", "serde_yaml", "serial_test", - "serial_test_derive", "solana-clap-utils", "solana-client", "solana-core", "solana-faucet", "solana-genesis", "solana-local-cluster", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-measure", "solana-metrics", "solana-net-utils", @@ -4944,7 +5072,7 @@ dependencies = [ "solana-faucet", "solana-genesis", "solana-local-cluster", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-measure", "solana-metrics", "solana-net-utils", @@ -4955,17 +5083,18 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "byteorder", - "curve25519-dalek 3.1.0", "log 0.4.14", "num-derive", "num-traits", "rand 0.7.3", - "rand_core 0.6.2", + "rand_core 0.6.3", "rustversion", + "sha3 0.9.1", + "solana-measure", "solana-runtime", "solana-sdk", "solana_rbpf", @@ -4974,7 +5103,7 @@ dependencies = [ [[package]] name = "solana-budget-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "chrono", @@ -4990,16 +5119,19 @@ dependencies = [ [[package]] name = "solana-cargo-build-bpf" -version = "1.5.19" +version = "1.6.14" dependencies = [ + "bzip2", "cargo_metadata", "clap", + "solana-download-utils", "solana-sdk", + "tar", ] [[package]] name = "solana-cargo-test-bpf" -version = "1.5.19" +version = "1.6.14" dependencies = [ "cargo_metadata", "clap", @@ -5007,22 +5139,23 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "1.5.19" +version = "1.6.14" dependencies = [ "chrono", "clap", "rpassword", "solana-remote-wallet", "solana-sdk", + "tempfile", "thiserror", - "tiny-bip39", + "tiny-bip39 0.8.0", "uriparse", "url 2.2.2", ] [[package]] name = "solana-cli" -version = "1.5.19" +version = "1.6.14" dependencies = [ "Inflector", "bincode", @@ -5051,7 +5184,7 @@ dependencies = [ "solana-config-program", "solana-core", "solana-faucet", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-net-utils", "solana-remote-wallet", "solana-sdk", @@ -5060,15 +5193,16 @@ dependencies = [ "solana-version", "solana-vote-program", "solana_rbpf", + "spl-memo", "tempfile", "thiserror", - "tiny-bip39", + "tiny-bip39 0.7.3", "url 2.2.2", ] [[package]] name = "solana-cli-config" -version = "1.5.19" +version = "1.6.14" dependencies = [ "dirs-next", "lazy_static 1.4.0", @@ -5080,7 +5214,7 @@ dependencies = [ [[package]] name = "solana-cli-output" -version = "1.5.19" +version = "1.6.14" dependencies = [ "Inflector", "base64 0.13.0", @@ -5098,11 +5232,12 @@ dependencies = [ "solana-stake-program", "solana-transaction-status", "solana-vote-program", + "spl-memo", ] [[package]] name = "solana-client" -version = "1.5.19" +version = "1.6.14" dependencies = [ "assert_matches", "base64 0.13.0", @@ -5126,7 +5261,7 @@ dependencies = [ "solana-account-decoder", "solana-clap-utils", "solana-faucet", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-net-utils", "solana-sdk", "solana-stake-program", @@ -5134,27 +5269,28 @@ dependencies = [ "solana-version", "solana-vote-program", "thiserror", + "tokio 1.7.1", "tungstenite", "url 2.2.2", ] [[package]] name = "solana-config-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "chrono", "log 0.4.14", - "rand_core 0.6.2", + "rand_core 0.6.3", "serde", "serde_derive", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-sdk", ] [[package]] name = "solana-core" -version = "1.5.19" +version = "1.6.14" dependencies = [ "ahash 0.6.3", "anyhow", @@ -5164,7 +5300,6 @@ dependencies = [ "bs58", "bv", "byteorder", - "bytes 0.4.12", "chrono", "core_affinity", "crossbeam-channel 0.4.4", @@ -5182,6 +5317,7 @@ dependencies = [ "jsonrpc-http-server", "jsonrpc-pubsub", "jsonrpc-ws-server", + "libc", "log 0.4.14", "lru", "matches", @@ -5192,6 +5328,7 @@ dependencies = [ "primitive-types", "rand 0.7.3", "rand_chacha 0.2.2", + "rand_core 0.6.3", "raptorq", "rayon", "regex", @@ -5199,14 +5336,12 @@ dependencies = [ "retain_mut", "rlp", "rustc_version 0.2.3", - "rustversion", "secp256k1", "serde", "serde_bytes", "serde_derive", "serde_json", "serial_test", - "serial_test_derive", "sha3 0.9.1", "snafu", "solana-account-decoder", @@ -5215,10 +5350,10 @@ dependencies = [ "solana-client", "solana-evm-loader-program", "solana-faucet", - "solana-frozen-abi 1.5.19", - "solana-frozen-abi-macro 1.5.19", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", "solana-ledger", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-measure", "solana-merkle-tree", "solana-metrics", @@ -5236,21 +5371,20 @@ dependencies = [ "solana-version", "solana-vote-program", "spl-token", + "symlink", "systemstat", "tempfile", "thiserror", - "tokio 0.1.22", "tokio 0.2.25", - "tokio-codec", - "tokio-fs", - "tokio-io", + "tokio 1.7.1", + "tokio-util 0.3.1", "trees", "velas-account-program", ] [[package]] name = "solana-crate-features" -version = "1.5.19" +version = "1.6.14" dependencies = [ "backtrace", "bytes 0.4.12", @@ -5265,14 +5399,14 @@ dependencies = [ "reqwest", "serde", "syn 0.15.44", - "syn 1.0.72", + "syn 1.0.73", "tokio 0.1.22", "winapi 0.3.9", ] [[package]] name = "solana-dos" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "clap", @@ -5283,7 +5417,7 @@ dependencies = [ "solana-client", "solana-core", "solana-ledger", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-net-utils", "solana-runtime", "solana-sdk", @@ -5292,7 +5426,7 @@ dependencies = [ [[package]] name = "solana-download-utils" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bzip2", "console 0.11.3", @@ -5327,13 +5461,13 @@ dependencies = [ "sha3 0.9.1", "simple_logger", "snafu", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-sdk", ] [[package]] name = "solana-exchange-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "log 0.4.14", @@ -5341,7 +5475,7 @@ dependencies = [ "num-traits", "serde", "serde_derive", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-metrics", "solana-runtime", "solana-sdk", @@ -5350,7 +5484,7 @@ dependencies = [ [[package]] name = "solana-failure-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ "solana-runtime", "solana-sdk", @@ -5358,7 +5492,7 @@ dependencies = [ [[package]] name = "solana-faucet" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "byteorder", @@ -5368,18 +5502,18 @@ dependencies = [ "serde_derive", "solana-clap-utils", "solana-cli-config", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-metrics", "solana-sdk", "solana-version", - "spl-memo 3.0.0", + "spl-memo", "thiserror", - "tokio 0.3.7", + "tokio 1.7.1", ] [[package]] name = "solana-frozen-abi" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bs58", "bv", @@ -5390,16 +5524,16 @@ dependencies = [ "serde", "serde_derive", "sha2 0.9.5", - "solana-frozen-abi-macro 1.5.19", - "solana-logger 1.5.19", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", "thiserror", ] [[package]] name = "solana-frozen-abi" -version = "1.6.10" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03201ac63bad0f18339e1631b75d10b18d4a0a0adef5551298174fb8346cf6de" +checksum = "dc00a9f7c3eb2fb8687d34ce6d8672fbf7bd8f67002a5f75ccd6f6c4e8cd8a91" dependencies = [ "bs58", "bv", @@ -5410,38 +5544,37 @@ dependencies = [ "serde", "serde_derive", "sha2 0.9.5", - "solana-frozen-abi-macro 1.6.10", - "solana-logger 1.6.10", + "solana-frozen-abi-macro 1.7.3", + "solana-logger 1.7.3", "thiserror", ] [[package]] name = "solana-frozen-abi-macro" -version = "1.5.19" +version = "1.6.14" dependencies = [ "lazy_static 1.4.0", "proc-macro2 1.0.27", "quote 1.0.9", "rustc_version 0.2.3", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] name = "solana-frozen-abi-macro" -version = "1.6.10" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7817d332895c39ee895508c1327bef17eb35e34fbdc5b06ca4ae26649a0392d5" +checksum = "bc381a29ab68515e69dcfad633ab78dd98d83c0b959c2cae9a9a98df6e265acf" dependencies = [ - "lazy_static 1.4.0", "proc-macro2 1.0.27", "quote 1.0.9", "rustc_version 0.2.3", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] name = "solana-genesis" -version = "1.5.19" +version = "1.6.14" dependencies = [ "base64 0.12.3", "chrono", @@ -5459,7 +5592,7 @@ dependencies = [ "solana-evm-loader-program", "solana-exchange-program", "solana-ledger", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-runtime", "solana-sdk", "solana-stake-program", @@ -5469,9 +5602,25 @@ dependencies = [ "tempfile", ] +[[package]] +name = "solana-keygen" +version = "1.6.14" +dependencies = [ + "bs58", + "clap", + "dirs-next", + "num_cpus", + "solana-clap-utils", + "solana-cli-config", + "solana-remote-wallet", + "solana-sdk", + "solana-version", + "tiny-bip39 0.7.3", +] + [[package]] name = "solana-ledger" -version = "1.5.19" +version = "1.6.14" dependencies = [ "assert_matches", "bincode", @@ -5505,9 +5654,9 @@ dependencies = [ "solana-account-decoder", "solana-bpf-loader-program", "solana-budget-program", - "solana-frozen-abi 1.5.19", - "solana-frozen-abi-macro 1.5.19", - "solana-logger 1.5.19", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", "solana-measure", "solana-merkle-tree", "solana-metrics", @@ -5522,13 +5671,14 @@ dependencies = [ "solana-vote-program", "tempfile", "thiserror", - "tokio 0.2.25", + "tokio 1.7.1", + "tokio-stream", "trees", ] [[package]] name = "solana-ledger-tool" -version = "1.5.19" +version = "1.6.14" dependencies = [ "assert_cmd", "bs58", @@ -5550,7 +5700,7 @@ dependencies = [ "solana-clap-utils", "solana-cli-output", "solana-ledger", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-measure", "solana-runtime", "solana-sdk", @@ -5560,12 +5710,12 @@ dependencies = [ "solana-version", "solana-vote-program", "tempfile", - "tokio 0.2.25", + "tokio 1.7.1", ] [[package]] name = "solana-local-cluster" -version = "1.5.19" +version = "1.6.14" dependencies = [ "assert_matches", "crossbeam-channel 0.4.4", @@ -5576,7 +5726,6 @@ dependencies = [ "rand 0.7.3", "rayon", "serial_test", - "serial_test_derive", "solana-client", "solana-config-program", "solana-core", @@ -5584,7 +5733,7 @@ dependencies = [ "solana-exchange-program", "solana-faucet", "solana-ledger", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-rayon-threadlimit", "solana-runtime", "solana-sdk", @@ -5596,40 +5745,40 @@ dependencies = [ [[package]] name = "solana-log-analyzer" -version = "1.5.19" +version = "1.6.14" dependencies = [ "byte-unit", "clap", "serde", "serde_json", "solana-clap-utils", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-version", ] [[package]] name = "solana-logger" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "env_logger 0.8.3", + "env_logger 0.8.4", "lazy_static 1.4.0", "log 0.4.14", ] [[package]] name = "solana-logger" -version = "1.6.10" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff64fd45c789b34870b58c48c27f2c137b95a446e3a359873608468d3efe77f" +checksum = "62f8e4921602f61681d8d29d2606d4f8e1c848d4f6b9964813bfc1b457dfd7ce" dependencies = [ - "env_logger 0.8.3", + "env_logger 0.8.4", "lazy_static 1.4.0", "log 0.4.14", ] [[package]] name = "solana-measure" -version = "1.5.19" +version = "1.6.14" dependencies = [ "jemalloc-ctl", "jemallocator", @@ -5640,11 +5789,11 @@ dependencies = [ [[package]] name = "solana-merkle-root-bench" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "log 0.4.14", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-measure", "solana-runtime", "solana-sdk", @@ -5653,44 +5802,43 @@ dependencies = [ [[package]] name = "solana-merkle-tree" -version = "1.5.19" +version = "1.6.14" dependencies = [ "fast-math", "hex", "matches", - "solana-program 1.5.19", + "solana-program 1.6.14", ] [[package]] name = "solana-metrics" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "env_logger 0.8.3", + "env_logger 0.8.4", "gethostname", "lazy_static 1.4.0", "log 0.4.14", "rand 0.7.3", "reqwest", "serial_test", - "serial_test_derive", "solana-sdk", ] [[package]] name = "solana-net-shaper" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "rand 0.7.3", "serde", "serde_json", "solana-clap-utils", - "solana-logger 1.5.19", + "solana-logger 1.6.14", ] [[package]] name = "solana-net-utils" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "clap", @@ -5699,26 +5847,26 @@ dependencies = [ "rand 0.7.3", "serde", "serde_derive", - "socket2", + "socket2 0.3.19", "solana-clap-utils", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-version", - "tokio 0.3.7", + "tokio 1.7.1", "url 2.2.2", ] [[package]] name = "solana-noop-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ "log 0.4.14", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-sdk", ] [[package]] name = "solana-notifier" -version = "1.5.19" +version = "1.6.14" dependencies = [ "log 0.4.14", "reqwest", @@ -5727,7 +5875,7 @@ dependencies = [ [[package]] name = "solana-ownable" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "num-derive", @@ -5739,7 +5887,7 @@ dependencies = [ [[package]] name = "solana-perf" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "curve25519-dalek 2.1.2", @@ -5752,7 +5900,7 @@ dependencies = [ "rayon", "serde", "solana-budget-program", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-measure", "solana-metrics", "solana-rayon-threadlimit", @@ -5761,7 +5909,7 @@ dependencies = [ [[package]] name = "solana-poh-bench" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "log 0.4.14", @@ -5769,7 +5917,7 @@ dependencies = [ "rayon", "solana-clap-utils", "solana-ledger", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-measure", "solana-perf", "solana-sdk", @@ -5778,7 +5926,7 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ "assert_matches", "bincode", @@ -5802,18 +5950,19 @@ dependencies = [ "serde_derive", "serde_json", "sha2 0.9.5", - "solana-frozen-abi 1.5.19", - "solana-frozen-abi-macro 1.5.19", - "solana-logger 1.5.19", - "solana-sdk-macro 1.5.19", + "sha3 0.9.1", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", + "solana-sdk-macro 1.6.14", "thiserror", ] [[package]] name = "solana-program" -version = "1.6.10" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632dc4e769b8dbb1884167737fd390cfa4266159ce37229e6ed64d61aba192d6" +checksum = "9f9c454274436aac77286369e35835fafa1f79d1da1c4b7a1c662b2c41705f77" dependencies = [ "bincode", "blake3", @@ -5836,39 +5985,42 @@ dependencies = [ "serde_derive", "sha2 0.9.5", "sha3 0.9.1", - "solana-frozen-abi 1.6.10", - "solana-frozen-abi-macro 1.6.10", - "solana-logger 1.6.10", - "solana-sdk-macro 1.6.10", + "solana-frozen-abi 1.7.3", + "solana-frozen-abi-macro 1.7.3", + "solana-logger 1.7.3", + "solana-sdk-macro 1.7.3", "thiserror", ] [[package]] name = "solana-program-test" -version = "1.5.19" +version = "1.6.14" dependencies = [ + "assert_matches", "async-trait", "base64 0.12.3", + "bincode", "chrono", "chrono-humanize", "log 0.4.14", - "mio 0.7.11", + "mio 0.7.13", + "serde", + "serde_derive", "solana-banks-client", "solana-banks-server", "solana-bpf-loader-program", - "solana-logger 1.5.19", - "solana-program 1.5.19", + "solana-logger 1.6.14", "solana-runtime", "solana-sdk", "solana-stake-program", "solana-vote-program", "thiserror", - "tokio 0.3.7", + "tokio 1.7.1", ] [[package]] name = "solana-ramp-tps" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bzip2", "clap", @@ -5879,7 +6031,7 @@ dependencies = [ "serde_yaml", "solana-client", "solana-core", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-metrics", "solana-net-utils", "solana-notifier", @@ -5890,7 +6042,7 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "1.5.19" +version = "1.6.14" dependencies = [ "lazy_static 1.4.0", "num_cpus", @@ -5898,7 +6050,7 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.5.19" +version = "1.6.14" dependencies = [ "base32", "console 0.11.3", @@ -5908,15 +6060,16 @@ dependencies = [ "num-derive", "num-traits", "parking_lot 0.10.2", + "qstring", "semver 0.9.0", "solana-sdk", "thiserror", - "url 2.2.2", + "uriparse", ] [[package]] name = "solana-runtime" -version = "1.5.19" +version = "1.6.14" dependencies = [ "arrayref", "assert_matches", @@ -5932,7 +6085,6 @@ dependencies = [ "evm-state", "flate2", "fnv", - "fs_extra", "itertools 0.9.0", "lazy_static 1.4.0", "libc", @@ -5951,16 +6103,15 @@ dependencies = [ "serde_derive", "solana-config-program", "solana-evm-loader-program", - "solana-frozen-abi 1.5.19", - "solana-frozen-abi-macro 1.5.19", - "solana-logger 1.5.19", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", "solana-measure", "solana-metrics", "solana-noop-program", "solana-rayon-threadlimit", "solana-sdk", "solana-secp256k1-program", - "solana-sleep-program", "solana-stake-program", "solana-vote-program", "symlink", @@ -5973,7 +6124,7 @@ dependencies = [ [[package]] name = "solana-scripts" -version = "1.5.19" +version = "1.6.14" dependencies = [ "csv", "serde", @@ -5981,7 +6132,7 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.5.19" +version = "1.6.14" dependencies = [ "assert_matches", "bincode", @@ -5990,8 +6141,10 @@ dependencies = [ "byteorder", "chrono", "curve25519-dalek 2.1.2", + "derivation-path", "digest 0.9.0", "ed25519-dalek", + "ed25519-dalek-bip32", "evm-rpc", "evm-state", "generic-array 0.14.4", @@ -6007,8 +6160,10 @@ dependencies = [ "num-traits", "once_cell", "pbkdf2 0.6.0", + "qstring", "rand 0.7.3", "rand_chacha 0.2.2", + "rand_core 0.6.3", "rlp", "rustc_version 0.2.3", "rustversion", @@ -6019,51 +6174,52 @@ dependencies = [ "sha2 0.9.5", "sha3 0.9.1", "solana-crate-features", - "solana-frozen-abi 1.5.19", - "solana-frozen-abi-macro 1.5.19", - "solana-logger 1.5.19", - "solana-program 1.5.19", - "solana-sdk-macro 1.5.19", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", + "solana-program 1.6.14", + "solana-sdk-macro 1.6.14", "tempfile", "thiserror", - "tiny-bip39", + "tiny-bip39 0.7.3", "triehash", + "uriparse", ] [[package]] name = "solana-sdk-macro" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bs58", "proc-macro2 1.0.27", "quote 1.0.9", "rustversion", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] name = "solana-sdk-macro" -version = "1.6.10" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6762b630db60c40e3efbb461cc945e0c5dfb7b0bcf719f76551f0f97f0c005" +checksum = "85ee9c0af66098ec40bf9012b7910c8cdb1ce8b95fc9fad90e6a0cbe692a48fe" dependencies = [ "bs58", "proc-macro2 1.0.27", "quote 1.0.9", "rustversion", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] name = "solana-secp256k1-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "digest 0.9.0", "libsecp256k1", "rand 0.7.3", "sha3 0.9.1", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-sdk", ] @@ -6073,13 +6229,13 @@ version = "1.5.14" dependencies = [ "byteorder", "log 0.4.14", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-sdk", ] [[package]] name = "solana-stake-accounts" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "solana-clap-utils", @@ -6093,7 +6249,7 @@ dependencies = [ [[package]] name = "solana-stake-monitor" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "console 0.11.3", @@ -6101,13 +6257,12 @@ dependencies = [ "serde", "serde_yaml", "serial_test", - "serial_test_derive", "solana-clap-utils", "solana-cli-config", "solana-client", "solana-core", "solana-local-cluster", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-metrics", "solana-sdk", "solana-stake-program", @@ -6118,26 +6273,31 @@ dependencies = [ [[package]] name = "solana-stake-o-matic" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "log 0.4.14", + "reqwest", + "semver 0.9.0", + "serde", + "serde_json", "serde_yaml", "solana-clap-utils", "solana-cli-config", "solana-cli-output", "solana-client", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-metrics", "solana-notifier", "solana-sdk", "solana-stake-program", "solana-transaction-status", + "thiserror", ] [[package]] name = "solana-stake-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "log 0.4.14", @@ -6147,9 +6307,9 @@ dependencies = [ "serde", "serde_derive", "solana-config-program", - "solana-frozen-abi 1.5.19", - "solana-frozen-abi-macro 1.5.19", - "solana-logger 1.5.19", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", "solana-metrics", "solana-sdk", "solana-vote-program", @@ -6158,9 +6318,9 @@ dependencies = [ [[package]] name = "solana-storage-bigtable" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "arc-swap", + "arc-swap 0.4.8", "backoff", "bincode", "bzip2", @@ -6173,6 +6333,7 @@ dependencies = [ "log 0.4.14", "prost", "prost-types", + "rand_core 0.6.3", "serde", "serde_derive", "smpl_jwt", @@ -6186,7 +6347,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "bs58", @@ -6202,11 +6363,11 @@ dependencies = [ [[package]] name = "solana-store-tool" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "log 0.4.14", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-measure", "solana-runtime", "solana-sdk", @@ -6215,12 +6376,12 @@ dependencies = [ [[package]] name = "solana-streamer" -version = "1.5.19" +version = "1.6.14" dependencies = [ "libc", "log 0.4.14", "nix 0.19.1", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-measure", "solana-metrics", "solana-perf", @@ -6230,14 +6391,14 @@ dependencies = [ [[package]] name = "solana-sys-tuner" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "libc", "log 0.4.14", "nix 0.19.1", "solana-clap-utils", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-version", "sysctl", "unix_socket2", @@ -6246,7 +6407,7 @@ dependencies = [ [[package]] name = "solana-tokens" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "chrono", @@ -6264,7 +6425,7 @@ dependencies = [ "solana-cli-config", "solana-client", "solana-core", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-program-test", "solana-remote-wallet", "solana-runtime", @@ -6280,7 +6441,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.5.19" +version = "1.6.14" dependencies = [ "Inflector", "base64 0.12.3", @@ -6296,15 +6457,14 @@ dependencies = [ "solana-stake-program", "solana-vote-program", "spl-associated-token-account", - "spl-memo 2.0.1", - "spl-memo 3.0.0", + "spl-memo", "spl-token", "thiserror", ] [[package]] name = "solana-upload-perf" -version = "1.5.19" +version = "1.6.14" dependencies = [ "serde_json", "solana-metrics", @@ -6312,21 +6472,21 @@ dependencies = [ [[package]] name = "solana-version" -version = "0.3.6" +version = "1.6.14" dependencies = [ "log 0.4.14", "rustc_version 0.2.3", "serde", "serde_derive", - "solana-frozen-abi 1.5.19", - "solana-frozen-abi-macro 1.5.19", - "solana-logger 1.5.19", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", "solana-sdk", ] [[package]] name = "solana-vest-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "chrono", @@ -6342,7 +6502,7 @@ dependencies = [ [[package]] name = "solana-vote-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ "bincode", "log 0.4.14", @@ -6351,9 +6511,9 @@ dependencies = [ "rustc_version 0.2.3", "serde", "serde_derive", - "solana-frozen-abi 1.5.19", - "solana-frozen-abi-macro 1.5.19", - "solana-logger 1.5.19", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", "solana-metrics", "solana-sdk", "thiserror", @@ -6361,7 +6521,7 @@ dependencies = [ [[package]] name = "solana-watchtower" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "humantime", @@ -6370,7 +6530,7 @@ dependencies = [ "solana-cli-config", "solana-cli-output", "solana-client", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-metrics", "solana-notifier", "solana-sdk", @@ -6379,9 +6539,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14e36c51d5aa290416c5dea3c43ac467cb57c0b643184af23e6bdab7434710fb" +checksum = "debbc13545a1d972955a4fd3014e7c9d6d81da16c3626ee5f64bf3aa619548f8" dependencies = [ "byteorder", "combine", @@ -6407,39 +6567,30 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4adc47eebe5d2b662cbaaba1843719c28a67e5ec5d0460bc3ca60900a51f74e2" dependencies = [ - "solana-program 1.6.10", + "solana-program 1.7.3", "spl-token", ] [[package]] name = "spl-memo" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb2b771f6146dec14ef5fbf498f9374652c54badc3befc8c40c1d426dd45d720" -dependencies = [ - "solana-program 1.6.10", -] - -[[package]] -name = "spl-memo" -version = "3.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e76b60c6f58279b5469beb1705744e9778ee94d643c8e3e2ff91874c59bb3c63" +checksum = "bd0dc6f70db6bacea7ff25870b016a65ba1d1b6013536f08e4fd79a8f9005325" dependencies = [ - "solana-program 1.6.10", + "solana-program 1.7.3", ] [[package]] name = "spl-token" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b795e50d15dfd35aa5460b80a16414503a322be115a417a43db987c5824c6798" +checksum = "fbfa8fd791aeb4d7ad5fedb7872478de9f4e8b4fcb02dfd9e7f2f9ae3f3ddd73" dependencies = [ "arrayref", "num-derive", "num-traits", "num_enum", - "solana-program 1.6.10", + "solana-program 1.7.3", "thiserror", ] @@ -6488,7 +6639,7 @@ dependencies = [ "quote 1.0.9", "serde", "serde_derive", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -6504,7 +6655,7 @@ dependencies = [ "serde_derive", "serde_json", "sha1", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -6513,15 +6664,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" -[[package]] -name = "string" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" -dependencies = [ - "bytes 0.4.12", -] - [[package]] name = "strsim" version = "0.8.0" @@ -6550,7 +6692,7 @@ dependencies = [ "proc-macro-error", "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -6584,9 +6726,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.72" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82" +checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", @@ -6601,7 +6743,7 @@ checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", "unicode-xid 0.2.2", ] @@ -6620,9 +6762,9 @@ dependencies = [ [[package]] name = "systemstat" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31c241679f72241744c20d064a4db7feeb2caa214a8d6e2d4243b8c674a29a5" +checksum = "a934f8fe2f893260080fdde71e840b35308f48bf3bd3b261cb24e668c4b48db3" dependencies = [ "bytesize", "chrono", @@ -6641,9 +6783,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tar" -version = "0.4.33" +version = "0.4.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0bcfbd6a598361fda270d82469fff3d65089dc33e175c9a131f7b4cd395f228" +checksum = "7d779dc6aeff029314570f666ec83f19df7280bb36ef338442cfa8c604021b80" dependencies = [ "filetime", "libc", @@ -6652,34 +6794,34 @@ dependencies = [ [[package]] name = "tarpc" -version = "0.23.1" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1035e0e1b7064c1080702a8a5b3d044a3dea10a1096766be6f5c22580096fa75" +checksum = "e325774dd5b35d979e9f4db2b0f0d7d85dc2ff2b676a3150af56c09eafc14b07" dependencies = [ "anyhow", "fnv", "futures 0.3.15", "humantime", "log 0.4.14", - "pin-project 1.0.7", + "pin-project", "rand 0.7.3", "serde", "static_assertions", "tarpc-plugins", - "tokio 0.3.7", + "tokio 1.7.1", "tokio-serde", - "tokio-util 0.4.0", + "tokio-util 0.6.7", ] [[package]] name = "tarpc-plugins" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edbaf92ceea0a2ab555bea18a47a891e46ba2d6f930ec9506771662f4ab82bb7" +checksum = "f3240378a22b1195734e085ba71d1d4188d50f034aea82635acc430b7005afb5" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -6690,8 +6832,8 @@ checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ "cfg-if 1.0.0", "libc", - "rand 0.8.3", - "redox_syscall 0.2.8", + "rand 0.8.4", + "redox_syscall 0.2.9", "remove_dir_all", "winapi 0.3.9", ] @@ -6750,7 +6892,7 @@ checksum = "8a36768c0fbf1bb15eca10defa29526bda730a2376c2ab4393ccfa16fb1a318d" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -6772,9 +6914,9 @@ dependencies = [ [[package]] name = "time" -version = "0.2.26" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a8cbfbf47955132d0202d1662f49b2423ae35862aee471f3ba4b133358f372" +checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" dependencies = [ "const_fn", "libc", @@ -6797,15 +6939,15 @@ dependencies = [ [[package]] name = "time-macros-impl" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" +checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" dependencies = [ "proc-macro-hack", "proc-macro2 1.0.27", "quote 1.0.9", "standback", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -6824,6 +6966,24 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "tiny-bip39" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" +dependencies = [ + "anyhow", + "hmac 0.8.1", + "once_cell", + "pbkdf2 0.4.0", + "rand 0.7.3", + "rustc-hash", + "sha2 0.9.5", + "thiserror", + "unicode-normalization", + "zeroize", +] + [[package]] name = "tiny-keccak" version = "2.0.2" @@ -6908,37 +7068,24 @@ dependencies = [ [[package]] name = "tokio" -version = "0.3.7" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46409491c9375a693ce7032101970a54f8a2010efb77e13f70788f0d84489e39" +checksum = "5fb2ed024293bb19f7a5dc54fe83bf86532a44c12a2bb8ba40d64a4509395ca2" dependencies = [ "autocfg 1.0.1", - "bytes 0.6.0", - "futures-core", + "bytes 1.0.1", "libc", "memchr", - "mio 0.7.11", + "mio 0.7.13", "num_cpus", "once_cell", "parking_lot 0.11.1", - "pin-project-lite 0.2.6", + "pin-project-lite 0.2.7", "signal-hook-registry", - "slab", - "tokio-macros 0.3.2", + "tokio-macros 1.2.0", "winapi 0.3.9", ] -[[package]] -name = "tokio-buf" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" -dependencies = [ - "bytes 0.4.12", - "either", - "futures 0.1.31", -] - [[package]] name = "tokio-codec" version = "0.1.2" @@ -7000,18 +7147,28 @@ checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] name = "tokio-macros" -version = "0.3.2" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46dfffa59fc3c8aad216ed61bdc2c263d2b9d87a9c8ac9de0c11a813e51b6db7" +checksum = "c49e3df43841dafb86046472506755d8501c5615673955f6aa17181125d13c37" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio 1.7.1", ] [[package]] @@ -7035,28 +7192,40 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.14.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "futures-core", "rustls", - "tokio 0.2.25", + "tokio 1.7.1", "webpki", ] [[package]] name = "tokio-serde" -version = "0.6.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebdd897b01021779294eb09bb3b52b6e11b0747f9f7e333a84bef532b656de99" +checksum = "911a61637386b789af998ee23f50aa30d5fd7edcec8d6d3dedae5e5815205466" dependencies = [ "bincode", - "bytes 0.5.6", - "derivative", - "futures 0.3.15", - "pin-project 0.4.28", + "bytes 1.0.1", + "educe", + "futures-core", + "futures-sink", + "pin-project", "serde", + "serde_json", +] + +[[package]] +name = "tokio-stream" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8864d706fdb3cc0843a49647ac892720dac98a6eeb818b77190592cf4994066" +dependencies = [ + "futures-core", + "pin-project-lite 0.2.7", + "tokio 1.7.1", ] [[package]] @@ -7123,16 +7292,6 @@ dependencies = [ "tokio-io", ] -[[package]] -name = "tokio-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" -dependencies = [ - "native-tls", - "tokio 0.2.25", -] - [[package]] name = "tokio-udp" version = "0.1.6" @@ -7182,16 +7341,16 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.4.0" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24793699f4665ba0416ed287dc794fe6b11a4aa5e4e95b58624f45f6c46b97d4" +checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "futures-core", "futures-sink", "log 0.4.14", - "pin-project-lite 0.1.12", - "tokio 0.3.7", + "pin-project-lite 0.2.7", + "tokio 1.7.1", ] [[package]] @@ -7205,30 +7364,29 @@ dependencies = [ [[package]] name = "tonic" -version = "0.3.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74a5d6e7439ecf910463667080de772a9c7ddf26bc9fb4f3252ac3862e43337d" +checksum = "2ac42cd97ac6bd2339af5bcabf105540e21e45636ec6fa6aae5e85d44db31be0" dependencies = [ "async-stream", "async-trait", - "base64 0.12.3", - "bytes 0.5.6", + "base64 0.13.0", + "bytes 1.0.1", "futures-core", "futures-util", - "http 0.2.4", - "http-body 0.3.1", - "hyper 0.13.10", + "h2 0.3.3", + "http", + "http-body 0.4.2", + "hyper 0.14.9", "percent-encoding 2.1.0", - "pin-project 0.4.28", + "pin-project", "prost", "prost-derive", - "tokio 0.2.25", + "tokio 1.7.1", "tokio-rustls", - "tokio-util 0.3.1", + "tokio-stream", + "tokio-util 0.6.7", "tower", - "tower-balance", - "tower-load", - "tower-make", "tower-service", "tracing", "tracing-futures", @@ -7236,182 +7394,36 @@ dependencies = [ [[package]] name = "tower" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3169017c090b7a28fce80abaad0ab4f5566423677c9331bb320af7e49cfe62" -dependencies = [ - "futures-core", - "tower-buffer", - "tower-discover", - "tower-layer", - "tower-limit", - "tower-load-shed", - "tower-retry", - "tower-service", - "tower-timeout", - "tower-util", -] - -[[package]] -name = "tower-balance" -version = "0.3.0" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a792277613b7052448851efcf98a2c433e6f1d01460832dc60bef676bc275d4c" +checksum = "f60422bc7fefa2f3ec70359b8ff1caff59d785877eb70595904605bcc412470f" dependencies = [ "futures-core", "futures-util", "indexmap", - "pin-project 0.4.28", - "rand 0.7.3", + "pin-project", + "rand 0.8.4", "slab", - "tokio 0.2.25", - "tower-discover", + "tokio 1.7.1", + "tokio-stream", + "tokio-util 0.6.7", "tower-layer", - "tower-load", - "tower-make", - "tower-ready-cache", "tower-service", "tracing", ] -[[package]] -name = "tower-buffer" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4887dc2a65d464c8b9b66e0e4d51c2fd6cf5b3373afc72805b0a60bce00446a" -dependencies = [ - "futures-core", - "pin-project 0.4.28", - "tokio 0.2.25", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower-discover" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f6b5000c3c54d269cc695dff28136bb33d08cbf1df2c48129e143ab65bf3c2a" -dependencies = [ - "futures-core", - "pin-project 0.4.28", - "tower-service", -] - [[package]] name = "tower-layer" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" -[[package]] -name = "tower-limit" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c3040c5dbed68abffaa0d4517ac1a454cd741044f33ab0eefab6b8d1361404" -dependencies = [ - "futures-core", - "pin-project 0.4.28", - "tokio 0.2.25", - "tower-layer", - "tower-load", - "tower-service", -] - -[[package]] -name = "tower-load" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc79fc3afd07492b7966d7efa7c6c50f8ed58d768a6075dd7ae6591c5d2017b" -dependencies = [ - "futures-core", - "log 0.4.14", - "pin-project 0.4.28", - "tokio 0.2.25", - "tower-discover", - "tower-service", -] - -[[package]] -name = "tower-load-shed" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f021e23900173dc315feb4b6922510dae3e79c689b74c089112066c11f0ae4e" -dependencies = [ - "futures-core", - "pin-project 0.4.28", - "tower-layer", - "tower-service", -] - -[[package]] -name = "tower-make" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce50370d644a0364bf4877ffd4f76404156a248d104e2cc234cd391ea5cdc965" -dependencies = [ - "tokio 0.2.25", - "tower-service", -] - -[[package]] -name = "tower-ready-cache" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eabb6620e5481267e2ec832c780b31cad0c15dcb14ed825df5076b26b591e1f" -dependencies = [ - "futures-core", - "futures-util", - "indexmap", - "log 0.4.14", - "tokio 0.2.25", - "tower-service", -] - -[[package]] -name = "tower-retry" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6727956aaa2f8957d4d9232b308fe8e4e65d99db30f42b225646e86c9b6a952" -dependencies = [ - "futures-core", - "pin-project 0.4.28", - "tokio 0.2.25", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-service" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" -[[package]] -name = "tower-timeout" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "127b8924b357be938823eaaec0608c482d40add25609481027b96198b2e4b31e" -dependencies = [ - "pin-project 0.4.28", - "tokio 0.2.25", - "tower-layer", - "tower-service", -] - -[[package]] -name = "tower-util" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1093c19826d33807c72511e68f73b4a0469a3f22c2bd5f7d5212178b4b89674" -dependencies = [ - "futures-core", - "futures-util", - "pin-project 0.4.28", - "tower-service", -] - [[package]] name = "tracing" version = "0.1.26" @@ -7420,7 +7432,7 @@ checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" dependencies = [ "cfg-if 1.0.0", "log 0.4.14", - "pin-project-lite 0.2.6", + "pin-project-lite 0.2.7", "tracing-attributes", "tracing-core", ] @@ -7433,7 +7445,7 @@ checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", ] [[package]] @@ -7451,7 +7463,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.0.7", + "pin-project", "tracing", ] @@ -7479,7 +7491,7 @@ dependencies = [ [[package]] name = "triedb" version = "0.5.0" -source = "git+https://github.com/velas/triedb?branch=chore/bump-rocksdb#eba324ad94551de29145023d58d31a4a86def84e" +source = "git+https://github.com/velas/triedb?branch=chore/bump-rocksdb#ea0083bc880a47f51c11df4cbfebb90ee4988545" dependencies = [ "primitive-types", "rlp", @@ -7513,7 +7525,7 @@ dependencies = [ "base64 0.11.0", "byteorder", "bytes 0.5.6", - "http 0.2.4", + "http", "httparse", "input_buffer", "log 0.4.14", @@ -7583,9 +7595,9 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.17" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" +checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" dependencies = [ "tinyvec", ] @@ -7695,9 +7707,9 @@ checksum = "7cf7d77f457ef8dfa11e4cd5933c5ddb5dc52a94664071951219a97710f0a32b" [[package]] name = "vcpkg" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "025ce40a007e1907e58d5bc1a594def78e5573bb0b1160bc389634e8f12e4faa" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "vec_map" @@ -7718,13 +7730,12 @@ dependencies = [ [[package]] name = "velas-gossip" -version = "1.5.19" +version = "1.6.14" dependencies = [ "clap", "solana-clap-utils", - "solana-client", "solana-core", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-net-utils", "solana-sdk", "solana-version", @@ -7732,7 +7743,7 @@ dependencies = [ [[package]] name = "velas-install" -version = "1.5.19" +version = "1.6.14" dependencies = [ "atty", "bincode", @@ -7753,7 +7764,7 @@ dependencies = [ "solana-clap-utils", "solana-client", "solana-config-program", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-sdk", "solana-version", "tar", @@ -7763,25 +7774,9 @@ dependencies = [ "winreg", ] -[[package]] -name = "velas-keygen" -version = "1.5.19" -dependencies = [ - "bs58", - "clap", - "dirs-next", - "num_cpus", - "solana-clap-utils", - "solana-cli-config", - "solana-remote-wallet", - "solana-sdk", - "solana-version", - "tiny-bip39", -] - [[package]] name = "velas-validator" -version = "1.5.19" +version = "1.6.14" dependencies = [ "base64 0.12.3", "bincode", @@ -7791,11 +7786,16 @@ dependencies = [ "core_affinity", "fd-lock", "indicatif", + "jsonrpc-core", + "jsonrpc-core-client", + "jsonrpc-derive", + "jsonrpc-ipc-server", + "jsonrpc-server-utils", "libc", "log 0.4.14", "num_cpus", "rand 0.7.3", - "serde_json", + "serde", "signal-hook", "solana-clap-utils", "solana-cli-config", @@ -7804,7 +7804,7 @@ dependencies = [ "solana-download-utils", "solana-faucet", "solana-ledger", - "solana-logger 1.5.19", + "solana-logger 1.6.14", "solana-metrics", "solana-net-utils", "solana-perf", @@ -7853,17 +7853,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "want" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" -dependencies = [ - "futures 0.1.31", - "log 0.4.14", - "try-lock", -] - [[package]] name = "want" version = "0.3.0" @@ -7909,7 +7898,7 @@ dependencies = [ "log 0.4.14", "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", "wasm-bindgen-shared", ] @@ -7943,7 +7932,7 @@ checksum = "be2241542ff3d9f241f5e2cb6dd09b37efe786df8851c54957683a49f0987a97" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -7976,9 +7965,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.20.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f20dea7535251981a9670857150d571846545088359b28e4951d350bdaf179f" +checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" dependencies = [ "webpki", ] @@ -7998,7 +7987,7 @@ dependencies = [ "tokio-io", "tokio-reactor", "tokio-tcp", - "tokio-tls 0.2.1", + "tokio-tls", "unicase 1.4.2", "url 1.7.2", "websocket-base", @@ -8021,7 +8010,7 @@ dependencies = [ "tokio-codec", "tokio-io", "tokio-tcp", - "tokio-tls 0.2.1", + "tokio-tls", ] [[package]] @@ -8127,7 +8116,7 @@ checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" dependencies = [ "proc-macro2 1.0.27", "quote 1.0.9", - "syn 1.0.72", + "syn 1.0.73", "synstructure", ] diff --git a/Cargo.toml b/Cargo.toml index 167b1152da..d091289ed8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,6 @@ [workspace] members = [ + "accounts-cluster-bench", "bench-exchange", "bench-streamer", "bench-tps", @@ -81,4 +82,9 @@ members = [ "watchtower", ] -exclude = ["programs/bpf"] +exclude = [ + "programs/bpf", +] + +[profile.dev] +split-debuginfo = "unpacked" diff --git a/README.md b/README.md index f29e3d00f9..3b04eaa4ed 100644 --- a/README.md +++ b/README.md @@ -81,6 +81,3 @@ $ cargo +nightly bench The release process for this project is described [here](RELEASE.md). -# Disclaimer - -All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..ee9df8d7b6 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,155 @@ +# Security Policy + +1. [Reporting security problems](#reporting) +4. [Security Bug Bounties](#bounty) +2. [Incident Response Process](#process) + + +## Reporting security problems to Solana + +**DO NOT CREATE AN ISSUE** to report a security problem. Instead, please send an +email to security@solana.com and provide your github username so we can add you +to a new draft security advisory for further discussion. + +Expect a response as fast as possible, within one business day at the latest. + + +## Security Bug Bounties +We offer bounties for critical security issues. Please see below for more details. + +Loss of Funds: +$500,000 USD in locked SOL tokens (locked for 12 months) +* Theft of funds without users signature from any account +* Theft of funds without users interaction in system, token, stake, vote programs +* Theft of funds that requires users signature - creating a vote program that drains the delegated stakes. + +Consensus/Safety Violations: +$250,000 USD in locked SOL tokens (locked for 12 months) +* Consensus safety violation +* Tricking a validator to accept an optimistic confirmation or rooted slot without a double vote, etc.. + +Other Attacks: + $100,000 USD in locked SOL tokens (locked for 12 months) +* Protocol liveness attacks, +* Eclipse attacks, +* Remote attacks that partition the network, + +DoS Attacks: +$25,000 USD in locked SOL tokens (locked for 12 months) +* Remote resource exaustion via Non-RPC protocols + +RPC DoS/Crashes: +$5,000 USD in locked SOL tokens (locked for 12 months) +* RPC attacks + +Out of Scope: +The following components are out of scope for the bounty program +* Metrics: `/metrics` in the monorepo as well as https://metrics.solana.com +* Explorer: `/explorer` in the monorepo as well as https://explorer.solana.com +* Any encrypted credentials, auth tokens, etc. checked into the repo +* Bugs in dependencies. Please take them upstream! +* Attacks that require social engineering + +Eligibility: +* The participant submitting the bug bounty shall follow the process outlined within this document +* Valid exploits can be eligible even if they are not successfully executed on the cluster +* Multiple submissions for the same class of exploit are still eligible for compensation, though may be compensated at a lower rate, however these will be assessed on a case-by-case basis +* Participants must complete KYC and sign the participation agreement here when the registrations are open https://solana.com/validator-registration. Security exploits will still be assessed and open for submission at all times. This needs only be done prior to distribution of tokens. + +Notes: +* All locked tokens can be staked during the lockup period + + +## Incident Response Process + +In case an incident is discovered or reported, the following process will be +followed to contain, respond and remediate: + +### 1. Establish a new draft security advisory +In response to an email to security@solana.com, a member of the `solana-labs/admins` group will +1. Create a new draft security advisory for the incident at https://github.com/solana-labs/solana/security/advisories +1. Add the reporter's github user and the `solana-labs/security-incident-response` group to the draft security advisory +1. Create a private fork of the repository (grey button towards the bottom of the page) +1. Respond to the reporter by email, sharing a link to the draft security advisory + +### 2. Triage +Within the draft security advisory, discuss and determine the severity of the +issue. If necessary, members of the `solana-labs/security-incident-response` +group may add other github users to the advisory to assist. + +If it is determined that this not a critical network issue then the advisory +should be closed and if more follow-up is required a normal Solana public github +issue should be created. + +### 3. Prepare Fixes +For the affected branches, typically all three (edge, beta and stable), prepare +a fix for the issue and push them to the corresponding branch in the private +repository associated with the draft security advisory. + +There is no CI available in the private repository so you must build from source +and manually verify fixes. + +Code review from the reporter is ideal, as well as from multiple members of the +core development team. + +### 4. Notify Security Group Validators +Once an ETA is available for the fix, a member of the +`solana-labs/security-incident-response` group should notify the validators so +they can prepare for an update using the "Solana Red Alert" notification system. + +The teams are all over the world and it's critical to provide actionable +information at the right time. Don't be the person that wakes everybody up at +2am when a fix won't be available for hours. + +### 5. Ship the patch +Once the fix is accepted, a member of the +`solana-labs/security-incident-response` group should prepare a single patch +file for each affected branch. The commit title for the patch should only +contain the advisory id, and not disclose any further details about the +incident. + +Copy the patches to https://release.solana.com/ under a subdirectory named after +the advisory id (example: +https://release.solana.com/GHSA-hx59-f5g4-jghh/v1.4.patch). Contact a member of +the `solana-labs/admins` group if you require access to release.solana.com + +Using the "Solana Red Alert" channel: +1. Notify validators that there's an issue and a patch will be provided in X minutes +2. If X minutes expires and there's no patch, notify of the delay and provide a + new ETA +3. Provide links to patches of https://release.solana.com/ for each affected branch + +Validators can be expected to build the patch from source against the latest +release for the affected branch. + +Since the software version will not change after the patch is applied, request +that each validator notify in the existing channel once they've updated. Manually +monitor the roll out until a sufficient amount of stake has updated - typically +at least 33.3% or 66.6% depending on the issue. + +### 6. Public Disclosure and Release +Once the fix has been deployed to the security group validators, the patches from the security +advisory may be merged into the main source repository. A new official release +for each affected branch should be shipped and all validators requested to +upgrade as quickly as possible. + +### 7. Security Advisory Bounty Accounting and Cleanup + +If this issue is eligible for a bounty, prefix the title of the security +advisory with one of the following, depending on the severity: +* `[Bounty Category: Critical: Loss of Funds]` +* `[Bounty Category: Critical: Loss of Availability]` +* `[Bounty Category: Critical: DoS]` +* `[Bounty Category: Critical: Other]` +* `[Bounty Category: Non-critical]` +* `[Bounty Category: RPC]` + +Confirm with the reporter that they agree with the severity assessment, and +discuss as required to reach a conclusion. + +We currently do not use the Github workflow to publish security advisories. +Once the issue and fix have been disclosed, and a bounty category is assessed if +appropriate, the GitHub security advisory is no longer needed and can be closed. + +Bounties are currently awarded once a quarter (TODO: link to this process, or +inline the workflow) diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index a869f39913..e404c9fd80 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-account-decoder" -version = "1.5.19" +version = "1.6.14" description = "Solana account decoder" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -19,14 +19,16 @@ bs58 = "0.3.1" bv = "0.11.1" Inflector = "0.11.4" lazy_static = "1.4.0" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" serde_json = "1.0.56" thiserror = "1.0" zstd = "0.5.1" -solana-config-program = { path = "../programs/config", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } -solana-vote-program = { path = "../programs/vote", version = "=1.5.19" } -spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] } + +solana-config-program = { path = "../programs/config", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } +solana-vote-program = { path = "../programs/vote", version = "=1.6.14" } +spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] } + velas-account-program = { path = "../programs/velas-account-program" } diff --git a/account-decoder/src/lib.rs b/account-decoder/src/lib.rs index 611be374e4..7f1e7c40c7 100644 --- a/account-decoder/src/lib.rs +++ b/account-decoder/src/lib.rs @@ -16,7 +16,10 @@ pub mod validator_info; use { crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount}, - solana_sdk::{account::Account, clock::Epoch, fee_calculator::FeeCalculator, pubkey::Pubkey}, + solana_sdk::{ + account::ReadableAccount, account::WritableAccount, clock::Epoch, + fee_calculator::FeeCalculator, pubkey::Pubkey, + }, std::{ io::{Read, Write}, str::FromStr, @@ -45,7 +48,7 @@ pub enum UiAccountData { Binary(String, UiAccountEncoding), } -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq)] #[serde(rename_all = "camelCase")] pub enum UiAccountEncoding { Binary, // Legacy. Retained for RPC backwards compatibility @@ -57,58 +60,61 @@ pub enum UiAccountEncoding { } impl UiAccount { - pub fn encode( + pub fn encode( pubkey: &Pubkey, - account: Account, + account: &T, encoding: UiAccountEncoding, additional_data: Option, data_slice_config: Option, ) -> Self { let data = match encoding { UiAccountEncoding::Binary => UiAccountData::LegacyBinary( - bs58::encode(slice_data(&account.data, data_slice_config)).into_string(), + bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(), ), UiAccountEncoding::Base58 => UiAccountData::Binary( - bs58::encode(slice_data(&account.data, data_slice_config)).into_string(), + bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(), encoding, ), UiAccountEncoding::Base64 => UiAccountData::Binary( - base64::encode(slice_data(&account.data, data_slice_config)), + base64::encode(slice_data(&account.data(), data_slice_config)), encoding, ), UiAccountEncoding::Base64Zstd => { let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap(); match encoder - .write_all(slice_data(&account.data, data_slice_config)) + .write_all(slice_data(&account.data(), data_slice_config)) .and_then(|()| encoder.finish()) { Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding), Err(_) => UiAccountData::Binary( - base64::encode(slice_data(&account.data, data_slice_config)), + base64::encode(slice_data(&account.data(), data_slice_config)), UiAccountEncoding::Base64, ), } } UiAccountEncoding::JsonParsed => { if let Ok(parsed_data) = - parse_account_data(pubkey, &account.owner, &account.data, additional_data) + parse_account_data(pubkey, &account.owner(), &account.data(), additional_data) { UiAccountData::Json(parsed_data) } else { - UiAccountData::Binary(base64::encode(&account.data), UiAccountEncoding::Base64) + UiAccountData::Binary( + base64::encode(&account.data()), + UiAccountEncoding::Base64, + ) } } }; UiAccount { - lamports: account.lamports, + lamports: account.lamports(), data, - owner: account.owner.to_string(), - executable: account.executable, - rent_epoch: account.rent_epoch, + owner: account.owner().to_string(), + executable: account.executable(), + rent_epoch: account.rent_epoch(), } } - pub fn decode(&self) -> Option { + pub fn decode(&self) -> Option { let data = match &self.data { UiAccountData::Json(_) => None, UiAccountData::LegacyBinary(blob) => bs58::decode(blob).into_vec().ok(), @@ -128,13 +134,13 @@ impl UiAccount { UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None, }, }?; - Some(Account { - lamports: self.lamports, + Some(T::create( + self.lamports, data, - owner: Pubkey::from_str(&self.owner).ok()?, - executable: self.executable, - rent_epoch: self.rent_epoch, - }) + Pubkey::from_str(&self.owner).ok()?, + self.executable, + self.rent_epoch, + )) } } @@ -184,6 +190,7 @@ fn slice_data(data: &[u8], data_slice_config: Option) -> &[u8 #[cfg(test)] mod test { use super::*; + use solana_sdk::account::{Account, AccountSharedData}; #[test] fn test_slice_data() { @@ -217,10 +224,10 @@ mod test { fn test_base64_zstd() { let encoded_account = UiAccount::encode( &Pubkey::default(), - Account { + &AccountSharedData::from(Account { data: vec![0; 1024], ..Account::default() - }, + }), UiAccountEncoding::Base64Zstd, None, None, @@ -230,7 +237,9 @@ mod test { UiAccountData::Binary(_, UiAccountEncoding::Base64Zstd) )); - let decoded_account = encoded_account.decode().unwrap(); - assert_eq!(decoded_account.data, vec![0; 1024]); + let decoded_account = encoded_account.decode::().unwrap(); + assert_eq!(decoded_account.data(), &vec![0; 1024]); + let decoded_account = encoded_account.decode::().unwrap(); + assert_eq!(decoded_account.data(), &vec![0; 1024]); } } diff --git a/account-decoder/src/parse_config.rs b/account-decoder/src/parse_config.rs index a0d5623b32..3a9d6151b5 100644 --- a/account-decoder/src/parse_config.rs +++ b/account-decoder/src/parse_config.rs @@ -91,6 +91,7 @@ mod test { use crate::validator_info::ValidatorInfo; use serde_json::json; use solana_config_program::create_config_account; + use solana_sdk::account::ReadableAccount; #[test] fn test_parse_config() { @@ -101,7 +102,7 @@ mod test { let stake_config_account = create_config_account(vec![], &stake_config, 10); assert_eq!( parse_config( - &stake_config_account.data, + &stake_config_account.data(), &solana_stake_program::config::id() ) .unwrap(), @@ -124,7 +125,7 @@ mod test { 10, ); assert_eq!( - parse_config(&validator_info_config_account.data, &info_pubkey).unwrap(), + parse_config(&validator_info_config_account.data(), &info_pubkey).unwrap(), ConfigAccountType::ValidatorInfo(UiConfig { keys: vec![ UiConfigKey { diff --git a/account-decoder/src/parse_token.rs b/account-decoder/src/parse_token.rs index c7f1587c0c..b1bccfc537 100644 --- a/account-decoder/src/parse_token.rs +++ b/account-decoder/src/parse_token.rs @@ -14,23 +14,23 @@ use std::str::FromStr; // A helper function to convert spl_token_v2_0::id() as spl_sdk::pubkey::Pubkey to // solana_sdk::pubkey::Pubkey pub fn spl_token_id_v2_0() -> Pubkey { - Pubkey::from_str(&spl_token_v2_0::id().to_string()).unwrap() + Pubkey::new_from_array(spl_token_v2_0::id().to_bytes()) } // A helper function to convert spl_token_v2_0::native_mint::id() as spl_sdk::pubkey::Pubkey to // solana_sdk::pubkey::Pubkey pub fn spl_token_v2_0_native_mint() -> Pubkey { - Pubkey::from_str(&spl_token_v2_0::native_mint::id().to_string()).unwrap() + Pubkey::new_from_array(spl_token_v2_0::native_mint::id().to_bytes()) } // A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey pub fn spl_token_v2_0_pubkey(pubkey: &Pubkey) -> SplTokenPubkey { - SplTokenPubkey::from_str(&pubkey.to_string()).unwrap() + SplTokenPubkey::new_from_array(pubkey.to_bytes()) } // A helper function to convert a spl_sdk::pubkey::Pubkey to solana_sdk::pubkey::Pubkey pub fn pubkey_from_spl_token_v2_0(pubkey: &SplTokenPubkey) -> Pubkey { - Pubkey::from_str(&pubkey.to_string()).unwrap() + Pubkey::new_from_array(pubkey.to_bytes()) } pub fn parse_token( diff --git a/accounts-bench/Cargo.toml b/accounts-bench/Cargo.toml index 47eeb976dc..e93d4fb719 100644 --- a/accounts-bench/Cargo.toml +++ b/accounts-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-accounts-bench" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -10,12 +10,12 @@ publish = false [dependencies] log = "0.4.11" -rayon = "1.4.0" -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-measure = { path = "../measure", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-version = { path = "../version" } +rayon = "1.5.0" +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-measure = { path = "../measure", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } rand = "0.7.0" clap = "2.33.1" crossbeam-channel = "0.4" diff --git a/accounts-bench/src/main.rs b/accounts-bench/src/main.rs index 9d49e7853f..e23be3ce71 100644 --- a/accounts-bench/src/main.rs +++ b/accounts-bench/src/main.rs @@ -6,10 +6,10 @@ use rayon::prelude::*; use solana_measure::measure::Measure; use solana_runtime::{ accounts::{create_test_accounts, update_accounts_bench, Accounts}, - accounts_index::Ancestors, + accounts_index::{AccountSecondaryIndexes, Ancestors}, }; use solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey}; -use std::{collections::HashSet, env, fs, path::PathBuf}; +use std::{env, fs, path::PathBuf}; fn main() { solana_logger::setup(); @@ -58,8 +58,12 @@ fn main() { if fs::remove_dir_all(path.clone()).is_err() { println!("Warning: Couldn't remove {:?}", path); } - let accounts = - Accounts::new_with_config(vec![path], &ClusterType::Testnet, HashSet::new(), false); + let accounts = Accounts::new_with_config( + vec![path], + &ClusterType::Testnet, + AccountSecondaryIndexes::default(), + false, + ); println!("Creating {} accounts", num_accounts); let mut create_time = Measure::start("create accounts"); let pubkeys: Vec<_> = (0..num_slots) diff --git a/accounts-cluster-bench/.gitignore b/accounts-cluster-bench/.gitignore new file mode 100644 index 0000000000..b645148aa9 --- /dev/null +++ b/accounts-cluster-bench/.gitignore @@ -0,0 +1 @@ +/farf/ diff --git a/accounts-cluster-bench/Cargo.toml b/accounts-cluster-bench/Cargo.toml new file mode 100644 index 0000000000..8e04c6ba37 --- /dev/null +++ b/accounts-cluster-bench/Cargo.toml @@ -0,0 +1,34 @@ +[package] +authors = ["Solana Maintainers "] +edition = "2018" +name = "solana-accounts-cluster-bench" +version = "1.6.14" +repository = "https://github.com/solana-labs/solana" +license = "Apache-2.0" +homepage = "https://solana.com/" +publish = false + +[dependencies] +clap = "2.33.1" +log = "0.4.11" +rand = "0.7.0" +rayon = "1.4.1" +solana-account-decoder = { path = "../account-decoder", version = "=1.6.14" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-core = { path = "../core", version = "=1.6.14" } +solana-measure = { path = "../measure", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-net-utils = { path = "../net-utils", version = "=1.6.14" } +solana-faucet = { path = "../faucet", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-transaction-status = { path = "../transaction-status", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } +spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] } + +[dev-dependencies] +solana-local-cluster = { path = "../local-cluster", version = "=1.6.14" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/accounts-cluster-bench/src/main.rs b/accounts-cluster-bench/src/main.rs new file mode 100644 index 0000000000..5896843f94 --- /dev/null +++ b/accounts-cluster-bench/src/main.rs @@ -0,0 +1,742 @@ +#![allow(clippy::integer_arithmetic)] +use clap::{crate_description, crate_name, value_t, values_t_or_exit, App, Arg}; +use log::*; +use rand::{thread_rng, Rng}; +use rayon::prelude::*; +use solana_account_decoder::parse_token::spl_token_v2_0_pubkey; +use solana_clap_utils::input_parsers::pubkey_of; +use solana_client::rpc_client::RpcClient; +use solana_core::gossip_service::discover; +use solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT}; +use solana_measure::measure::Measure; +use solana_runtime::inline_spl_token_v2_0; +use solana_sdk::{ + commitment_config::CommitmentConfig, + message::Message, + pubkey::Pubkey, + rpc_port::DEFAULT_RPC_PORT, + signature::{read_keypair_file, Keypair, Signature, Signer}, + system_instruction, system_program, + timing::timestamp, + transaction::Transaction, +}; +use solana_transaction_status::parse_token::spl_token_v2_0_instruction; +use std::{ + net::SocketAddr, + process::exit, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, RwLock, + }, + thread::{sleep, Builder, JoinHandle}, + time::{Duration, Instant}, +}; + +// Create and close messages both require 2 signatures; if transaction construction changes, update +// this magic number +const NUM_SIGNATURES: u64 = 2; + +pub fn airdrop_lamports( + client: &RpcClient, + faucet_addr: &SocketAddr, + id: &Keypair, + desired_balance: u64, +) -> bool { + let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0); + info!("starting balance {}", starting_balance); + + if starting_balance < desired_balance { + let airdrop_amount = desired_balance - starting_balance; + info!( + "Airdropping {:?} lamports from {} for {}", + airdrop_amount, + faucet_addr, + id.pubkey(), + ); + + let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap(); + match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) { + Ok(transaction) => { + let mut tries = 0; + loop { + tries += 1; + let result = client.send_and_confirm_transaction(&transaction); + + if result.is_ok() { + break; + } + if tries >= 5 { + panic!( + "Error requesting airdrop: to addr: {:?} amount: {} {:?}", + faucet_addr, airdrop_amount, result + ) + } + } + } + Err(err) => { + panic!( + "Error requesting airdrop: {:?} to addr: {:?} amount: {}", + err, faucet_addr, airdrop_amount + ); + } + }; + + let current_balance = client.get_balance(&id.pubkey()).unwrap_or_else(|e| { + panic!("airdrop error {}", e); + }); + info!("current balance {}...", current_balance); + + if current_balance - starting_balance != airdrop_amount { + info!( + "Airdrop failed? {} {} {} {}", + id.pubkey(), + current_balance, + starting_balance, + airdrop_amount, + ); + } + } + true +} + +// signature, timestamp, id +type PendingQueue = Vec<(Signature, u64, u64)>; + +struct TransactionExecutor { + sig_clear_t: JoinHandle<()>, + sigs: Arc>, + cleared: Arc>>, + exit: Arc, + counter: AtomicU64, + client: RpcClient, +} + +impl TransactionExecutor { + fn new(entrypoint_addr: SocketAddr) -> Self { + let sigs = Arc::new(RwLock::new(Vec::new())); + let cleared = Arc::new(RwLock::new(Vec::new())); + let exit = Arc::new(AtomicBool::new(false)); + let sig_clear_t = Self::start_sig_clear_thread(&exit, &sigs, &cleared, entrypoint_addr); + let client = + RpcClient::new_socket_with_commitment(entrypoint_addr, CommitmentConfig::confirmed()); + Self { + sigs, + cleared, + sig_clear_t, + exit, + counter: AtomicU64::new(0), + client, + } + } + + fn num_outstanding(&self) -> usize { + self.sigs.read().unwrap().len() + } + + fn push_transactions(&self, txs: Vec) -> Vec { + let mut ids = vec![]; + let new_sigs = txs.into_iter().filter_map(|tx| { + let id = self.counter.fetch_add(1, Ordering::Relaxed); + ids.push(id); + match self.client.send_transaction(&tx) { + Ok(sig) => { + return Some((sig, timestamp(), id)); + } + Err(e) => { + info!("error: {:#?}", e); + } + } + None + }); + let mut sigs_w = self.sigs.write().unwrap(); + sigs_w.extend(new_sigs); + ids + } + + fn drain_cleared(&self) -> Vec { + std::mem::take(&mut *self.cleared.write().unwrap()) + } + + fn close(self) { + self.exit.store(true, Ordering::Relaxed); + self.sig_clear_t.join().unwrap(); + } + + fn start_sig_clear_thread( + exit: &Arc, + sigs: &Arc>, + cleared: &Arc>>, + entrypoint_addr: SocketAddr, + ) -> JoinHandle<()> { + let sigs = sigs.clone(); + let exit = exit.clone(); + let cleared = cleared.clone(); + Builder::new() + .name("sig_clear".to_string()) + .spawn(move || { + let client = RpcClient::new_socket_with_commitment( + entrypoint_addr, + CommitmentConfig::confirmed(), + ); + let mut success = 0; + let mut error_count = 0; + let mut timed_out = 0; + let mut last_log = Instant::now(); + while !exit.load(Ordering::Relaxed) { + let sigs_len = sigs.read().unwrap().len(); + if sigs_len > 0 { + let mut sigs_w = sigs.write().unwrap(); + let mut start = Measure::start("sig_status"); + let statuses: Vec<_> = sigs_w + .chunks(200) + .flat_map(|sig_chunk| { + let only_sigs: Vec<_> = sig_chunk.iter().map(|s| s.0).collect(); + client + .get_signature_statuses(&only_sigs) + .expect("status fail") + .value + }) + .collect(); + let mut num_cleared = 0; + let start_len = sigs_w.len(); + let now = timestamp(); + let mut new_ids = vec![]; + let mut i = 0; + let mut j = 0; + while i != sigs_w.len() { + let mut retain = true; + let sent_ts = sigs_w[i].1; + if let Some(e) = &statuses[j] { + debug!("error: {:?}", e); + if e.status.is_ok() { + success += 1; + } else { + error_count += 1; + } + num_cleared += 1; + retain = false; + } else if now - sent_ts > 30_000 { + retain = false; + timed_out += 1; + } + if !retain { + new_ids.push(sigs_w.remove(i).2); + } else { + i += 1; + } + j += 1; + } + let final_sigs_len = sigs_w.len(); + drop(sigs_w); + cleared.write().unwrap().extend(new_ids); + start.stop(); + debug!( + "sigs len: {:?} success: {} took: {}ms cleared: {}/{}", + final_sigs_len, + success, + start.as_ms(), + num_cleared, + start_len, + ); + if last_log.elapsed().as_millis() > 5000 { + info!( + "success: {} error: {} timed_out: {}", + success, error_count, timed_out, + ); + last_log = Instant::now(); + } + } + sleep(Duration::from_millis(200)); + } + }) + .unwrap() + } +} + +struct SeedTracker { + max_created: Arc, + max_closed: Arc, +} + +fn make_create_message( + keypair: &Keypair, + base_keypair: &Keypair, + max_created_seed: Arc, + num_instructions: usize, + balance: u64, + maybe_space: Option, + mint: Option, +) -> Message { + let space = maybe_space.unwrap_or_else(|| thread_rng().gen_range(0, 1000)); + + let instructions: Vec<_> = (0..num_instructions) + .into_iter() + .map(|_| { + let program_id = if mint.is_some() { + inline_spl_token_v2_0::id() + } else { + system_program::id() + }; + let seed = max_created_seed.fetch_add(1, Ordering::Relaxed).to_string(); + let to_pubkey = + Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap(); + let mut instructions = vec![system_instruction::create_account_with_seed( + &keypair.pubkey(), + &to_pubkey, + &base_keypair.pubkey(), + &seed, + balance, + space, + &program_id, + )]; + if let Some(mint_address) = mint { + instructions.push(spl_token_v2_0_instruction( + spl_token_v2_0::instruction::initialize_account( + &spl_token_v2_0::id(), + &spl_token_v2_0_pubkey(&to_pubkey), + &spl_token_v2_0_pubkey(&mint_address), + &spl_token_v2_0_pubkey(&base_keypair.pubkey()), + ) + .unwrap(), + )); + } + + instructions + }) + .collect(); + let instructions: Vec<_> = instructions.into_iter().flatten().collect(); + + Message::new(&instructions, Some(&keypair.pubkey())) +} + +fn make_close_message( + keypair: &Keypair, + base_keypair: &Keypair, + max_closed_seed: Arc, + num_instructions: usize, + balance: u64, + spl_token: bool, +) -> Message { + let instructions: Vec<_> = (0..num_instructions) + .into_iter() + .map(|_| { + let program_id = if spl_token { + inline_spl_token_v2_0::id() + } else { + system_program::id() + }; + let seed = max_closed_seed.fetch_add(1, Ordering::Relaxed).to_string(); + let address = + Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap(); + if spl_token { + spl_token_v2_0_instruction( + spl_token_v2_0::instruction::close_account( + &spl_token_v2_0::id(), + &spl_token_v2_0_pubkey(&address), + &spl_token_v2_0_pubkey(&keypair.pubkey()), + &spl_token_v2_0_pubkey(&base_keypair.pubkey()), + &[], + ) + .unwrap(), + ) + } else { + system_instruction::transfer_with_seed( + &address, + &base_keypair.pubkey(), + seed, + &program_id, + &keypair.pubkey(), + balance, + ) + } + }) + .collect(); + + Message::new(&instructions, Some(&keypair.pubkey())) +} + +#[allow(clippy::too_many_arguments)] +fn run_accounts_bench( + entrypoint_addr: SocketAddr, + faucet_addr: SocketAddr, + payer_keypairs: &[&Keypair], + iterations: usize, + maybe_space: Option, + batch_size: usize, + close_nth: u64, + maybe_lamports: Option, + num_instructions: usize, + mint: Option, +) { + assert!(num_instructions > 0); + let client = + RpcClient::new_socket_with_commitment(entrypoint_addr, CommitmentConfig::confirmed()); + + info!("Targeting {}", entrypoint_addr); + + let mut last_blockhash = Instant::now(); + let mut last_log = Instant::now(); + let mut count = 0; + let mut recent_blockhash = client.get_recent_blockhash().expect("blockhash"); + let mut tx_sent_count = 0; + let mut total_accounts_created = 0; + let mut total_accounts_closed = 0; + let mut balances: Vec<_> = payer_keypairs + .iter() + .map(|keypair| client.get_balance(&keypair.pubkey()).unwrap_or(0)) + .collect(); + let mut last_balance = Instant::now(); + + let default_max_lamports = 1000; + let min_balance = maybe_lamports.unwrap_or_else(|| { + let space = maybe_space.unwrap_or(default_max_lamports); + client + .get_minimum_balance_for_rent_exemption(space as usize) + .expect("min balance") + }); + + let base_keypair = Keypair::new(); + let seed_tracker = SeedTracker { + max_created: Arc::new(AtomicU64::default()), + max_closed: Arc::new(AtomicU64::default()), + }; + + info!("Starting balance(s): {:?}", balances); + + let executor = TransactionExecutor::new(entrypoint_addr); + + loop { + if last_blockhash.elapsed().as_millis() > 10_000 { + recent_blockhash = client.get_recent_blockhash().expect("blockhash"); + last_blockhash = Instant::now(); + } + + let fee = recent_blockhash + .1 + .lamports_per_signature + .saturating_mul(NUM_SIGNATURES); + let lamports = min_balance + fee; + + for (i, balance) in balances.iter_mut().enumerate() { + if *balance < lamports || last_balance.elapsed().as_millis() > 2000 { + if let Ok(b) = client.get_balance(&payer_keypairs[i].pubkey()) { + *balance = b; + } + last_balance = Instant::now(); + if *balance < lamports * 2 { + info!( + "Balance {} is less than needed: {}, doing aidrop...", + balance, lamports + ); + if !airdrop_lamports( + &client, + &faucet_addr, + &payer_keypairs[i], + lamports * 100_000, + ) { + warn!("failed airdrop, exiting"); + return; + } + } + } + } + + let sigs_len = executor.num_outstanding(); + if sigs_len < batch_size { + let num_to_create = batch_size - sigs_len; + if num_to_create >= payer_keypairs.len() { + info!("creating {} new", num_to_create); + let chunk_size = num_to_create / payer_keypairs.len(); + if chunk_size > 0 { + for (i, keypair) in payer_keypairs.iter().enumerate() { + let txs: Vec<_> = (0..chunk_size) + .into_par_iter() + .map(|_| { + let message = make_create_message( + keypair, + &base_keypair, + seed_tracker.max_created.clone(), + num_instructions, + min_balance, + maybe_space, + mint, + ); + let signers: Vec<&Keypair> = vec![keypair, &base_keypair]; + Transaction::new(&signers, message, recent_blockhash.0) + }) + .collect(); + balances[i] = balances[i].saturating_sub(lamports * txs.len() as u64); + info!("txs: {}", txs.len()); + let new_ids = executor.push_transactions(txs); + info!("ids: {}", new_ids.len()); + tx_sent_count += new_ids.len(); + total_accounts_created += num_instructions * new_ids.len(); + } + } + } + + if close_nth > 0 { + let expected_closed = total_accounts_created as u64 / close_nth; + if expected_closed > total_accounts_closed { + let txs: Vec<_> = (0..expected_closed - total_accounts_closed) + .into_par_iter() + .map(|_| { + let message = make_close_message( + &payer_keypairs[0], + &base_keypair, + seed_tracker.max_closed.clone(), + 1, + min_balance, + mint.is_some(), + ); + let signers: Vec<&Keypair> = vec![&payer_keypairs[0], &base_keypair]; + Transaction::new(&signers, message, recent_blockhash.0) + }) + .collect(); + balances[0] = balances[0].saturating_sub(fee * txs.len() as u64); + info!("close txs: {}", txs.len()); + let new_ids = executor.push_transactions(txs); + info!("close ids: {}", new_ids.len()); + tx_sent_count += new_ids.len(); + total_accounts_closed += new_ids.len() as u64; + } + } + } else { + let _ = executor.drain_cleared(); + } + + count += 1; + if last_log.elapsed().as_millis() > 3000 { + info!( + "total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}", + total_accounts_created, total_accounts_closed, tx_sent_count, count, balances + ); + last_log = Instant::now(); + } + if iterations != 0 && count >= iterations { + break; + } + if executor.num_outstanding() >= batch_size { + sleep(Duration::from_millis(500)); + } + } + executor.close(); +} + +fn main() { + solana_logger::setup_with_default("solana=info"); + let matches = App::new(crate_name!()) + .about(crate_description!()) + .version(solana_version::version!()) + .arg( + Arg::with_name("entrypoint") + .long("entrypoint") + .takes_value(true) + .value_name("HOST:PORT") + .help("RPC entrypoint address. Usually :8899"), + ) + .arg( + Arg::with_name("faucet_addr") + .long("faucet") + .takes_value(true) + .value_name("HOST:PORT") + .help("Faucet entrypoint address. Usually :9900"), + ) + .arg( + Arg::with_name("space") + .long("space") + .takes_value(true) + .value_name("BYTES") + .help("Size of accounts to create"), + ) + .arg( + Arg::with_name("lamports") + .long("lamports") + .takes_value(true) + .value_name("LAMPORTS") + .help("How many lamports to fund each account"), + ) + .arg( + Arg::with_name("identity") + .long("identity") + .takes_value(true) + .multiple(true) + .value_name("FILE") + .help("keypair file"), + ) + .arg( + Arg::with_name("batch_size") + .long("batch-size") + .takes_value(true) + .value_name("BYTES") + .help("Number of transactions to send per batch"), + ) + .arg( + Arg::with_name("close_nth") + .long("close-frequency") + .takes_value(true) + .value_name("BYTES") + .help( + "Send close transactions after this many accounts created. \ + Note: a `close-frequency` value near or below `batch-size` \ + may result in transaction-simulation errors, as the close \ + transactions will be submitted before the corresponding \ + create transactions have been confirmed", + ), + ) + .arg( + Arg::with_name("num_instructions") + .long("num-instructions") + .takes_value(true) + .value_name("NUM") + .help("Number of accounts to create on each transaction"), + ) + .arg( + Arg::with_name("iterations") + .long("iterations") + .takes_value(true) + .value_name("NUM") + .help("Number of iterations to make"), + ) + .arg( + Arg::with_name("check_gossip") + .long("check-gossip") + .help("Just use entrypoint address directly"), + ) + .arg( + Arg::with_name("mint") + .long("mint") + .takes_value(true) + .help("Mint address to initialize account"), + ) + .get_matches(); + + let skip_gossip = !matches.is_present("check_gossip"); + + let port = if skip_gossip { DEFAULT_RPC_PORT } else { 8001 }; + let mut entrypoint_addr = SocketAddr::from(([127, 0, 0, 1], port)); + if let Some(addr) = matches.value_of("entrypoint") { + entrypoint_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| { + eprintln!("failed to parse entrypoint address: {}", e); + exit(1) + }); + } + let mut faucet_addr = SocketAddr::from(([127, 0, 0, 1], FAUCET_PORT)); + if let Some(addr) = matches.value_of("faucet_addr") { + faucet_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| { + eprintln!("failed to parse entrypoint address: {}", e); + exit(1) + }); + } + + let space = value_t!(matches, "space", u64).ok(); + let lamports = value_t!(matches, "lamports", u64).ok(); + let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4); + let close_nth = value_t!(matches, "close_nth", u64).unwrap_or(0); + let iterations = value_t!(matches, "iterations", usize).unwrap_or(10); + let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1); + if num_instructions == 0 || num_instructions > 500 { + eprintln!("bad num_instructions: {}", num_instructions); + exit(1); + } + + let mint = pubkey_of(&matches, "mint"); + + let payer_keypairs: Vec<_> = values_t_or_exit!(matches, "identity", String) + .iter() + .map(|keypair_string| { + read_keypair_file(keypair_string) + .unwrap_or_else(|_| panic!("bad keypair {:?}", keypair_string)) + }) + .collect(); + let mut payer_keypair_refs: Vec<&Keypair> = vec![]; + for keypair in payer_keypairs.iter() { + payer_keypair_refs.push(keypair); + } + + let rpc_addr = if !skip_gossip { + info!("Finding cluster entry: {:?}", entrypoint_addr); + let (gossip_nodes, _validators) = discover( + None, // keypair + Some(&entrypoint_addr), + None, // num_nodes + Duration::from_secs(60), // timeout + None, // find_node_by_pubkey + Some(&entrypoint_addr), // find_node_by_gossip_addr + None, // my_gossip_addr + 0, // my_shred_version + ) + .unwrap_or_else(|err| { + eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err); + exit(1); + }); + + info!("done found {} nodes", gossip_nodes.len()); + gossip_nodes[0].rpc + } else { + info!("Using {:?} as the RPC address", entrypoint_addr); + entrypoint_addr + }; + + run_accounts_bench( + rpc_addr, + faucet_addr, + &payer_keypair_refs, + iterations, + space, + batch_size, + close_nth, + lamports, + num_instructions, + mint, + ); +} + +#[cfg(test)] +pub mod test { + use super::*; + use solana_core::validator::ValidatorConfig; + use solana_local_cluster::{ + local_cluster::{ClusterConfig, LocalCluster}, + validator_configs::make_identical_validator_configs, + }; + use solana_sdk::poh_config::PohConfig; + + #[test] + fn test_accounts_cluster_bench() { + solana_logger::setup(); + let validator_config = ValidatorConfig::default(); + let num_nodes = 1; + let mut config = ClusterConfig { + cluster_lamports: 10_000_000, + poh_config: PohConfig::new_sleep(Duration::from_millis(50)), + node_stakes: vec![100; num_nodes], + validator_configs: make_identical_validator_configs(&validator_config, num_nodes), + ..ClusterConfig::default() + }; + + let faucet_addr = SocketAddr::from(([127, 0, 0, 1], 9900)); + let cluster = LocalCluster::new(&mut config); + let iterations = 10; + let maybe_space = None; + let batch_size = 100; + let close_nth = 100; + let maybe_lamports = None; + let num_instructions = 2; + let mut start = Measure::start("total accounts run"); + run_accounts_bench( + cluster.entry_point_info.rpc, + faucet_addr, + &[&cluster.funding_keypair], + iterations, + maybe_space, + batch_size, + close_nth, + maybe_lamports, + num_instructions, + None, + ); + start.stop(); + info!("{}", start); + } +} diff --git a/banking-bench/Cargo.toml b/banking-bench/Cargo.toml index ae8bbfaa15..96b57f5855 100644 --- a/banking-bench/Cargo.toml +++ b/banking-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-banking-bench" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,17 +13,17 @@ clap = "2.33.1" crossbeam-channel = "0.4" log = "0.4.11" rand = "0.7.0" -rayon = "1.4.0" -solana-core = { path = "../core", version = "=1.5.19" } -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-streamer = { path = "../streamer", version = "=1.5.19" } -solana-perf = { path = "../perf", version = "=1.5.19" } -solana-ledger = { path = "../ledger", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-measure = { path = "../measure", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-version = { path = "../version" } +rayon = "1.5.0" +solana-core = { path = "../core", version = "=1.6.14" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-streamer = { path = "../streamer", version = "=1.6.14" } +solana-perf = { path = "../perf", version = "=1.6.14" } +solana-ledger = { path = "../ledger", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-measure = { path = "../measure", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/banks-client/Cargo.toml b/banks-client/Cargo.toml index 89441a9a0e..ce99abbe64 100644 --- a/banks-client/Cargo.toml +++ b/banks-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-banks-client" -version = "1.5.19" +version = "1.6.14" description = "Solana banks client" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -15,16 +15,16 @@ borsh = "0.8.1" borsh-derive = "0.8.1" futures = "0.3" mio = "0.7.6" -solana-banks-interface = { path = "../banks-interface", version = "=1.5.19" } -solana-program = { path = "../sdk/program", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -tarpc = { version = "0.23.0", features = ["full"] } -tokio = { version = "0.3.5", features = ["full"] } -tokio-serde = { version = "0.6", features = ["bincode"] } +solana-banks-interface = { path = "../banks-interface", version = "=1.6.14" } +solana-program = { path = "../sdk/program", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +tarpc = { version = "0.24.1", features = ["full"] } +tokio = { version = "1", features = ["full"] } +tokio-serde = { version = "0.8", features = ["bincode"] } [dev-dependencies] -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-banks-server = { path = "../banks-server", version = "=1.5.19" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-banks-server = { path = "../banks-server", version = "=1.6.14" } [lib] crate-type = ["lib"] diff --git a/banks-client/src/lib.rs b/banks-client/src/lib.rs index 7aa7f42c04..5a4d36580e 100644 --- a/banks-client/src/lib.rs +++ b/banks-client/src/lib.rs @@ -129,7 +129,7 @@ impl BanksClient { self.get_account(sysvar::rent::id()).map(|result| { let rent_sysvar = result? .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Rent sysvar not present"))?; - from_account::(&rent_sysvar).ok_or_else(|| { + from_account::(&rent_sysvar).ok_or_else(|| { io::Error::new(io::ErrorKind::Other, "Failed to deserialize Rent sysvar") }) }) diff --git a/banks-interface/Cargo.toml b/banks-interface/Cargo.toml index 1451e69833..ef33e3b95e 100644 --- a/banks-interface/Cargo.toml +++ b/banks-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-banks-interface" -version = "1.5.19" +version = "1.6.14" description = "Solana banks RPC interface" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,12 +11,12 @@ edition = "2018" [dependencies] mio = "0.7.6" -serde = { version = "1.0.118", features = ["derive"] } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -tarpc = { version = "0.23.0", features = ["full"] } +serde = { version = "1.0.122", features = ["derive"] } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +tarpc = { version = "0.24.1", features = ["full"] } [dev-dependencies] -tokio = { version = "0.3.5", features = ["full"] } +tokio = { version = "1", features = ["full"] } [lib] crate-type = ["lib"] diff --git a/banks-server/Cargo.toml b/banks-server/Cargo.toml index ee7f4b2180..df16a93b20 100644 --- a/banks-server/Cargo.toml +++ b/banks-server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-banks-server" -version = "1.5.19" +version = "1.6.14" description = "Solana banks server" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,13 +14,14 @@ bincode = "1.3.1" futures = "0.3" log = "0.4.11" mio = "0.7.6" -solana-banks-interface = { path = "../banks-interface", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } -tarpc = { version = "0.23.0", features = ["full"] } -tokio = { version = "0.3", features = ["full"] } -tokio-serde = { version = "0.6", features = ["bincode"] } +solana-banks-interface = { path = "../banks-interface", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } +tarpc = { version = "0.24.1", features = ["full"] } +tokio = { version = "1", features = ["full"] } +tokio-serde = { version = "0.8", features = ["bincode"] } +tokio-stream = "0.1" [lib] crate-type = ["lib"] diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index a8dbb6f53c..1f1a303ac2 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -242,7 +242,7 @@ impl Banks for BanksServer { commitment: CommitmentLevel, ) -> Option { let bank = self.bank(commitment); - bank.get_account(&address) + bank.get_account(&address).map(Account::from) } } diff --git a/banks-server/src/rpc_banks_service.rs b/banks-server/src/rpc_banks_service.rs index 541133e64a..75e5e13ba6 100644 --- a/banks-server/src/rpc_banks_service.rs +++ b/banks-server/src/rpc_banks_service.rs @@ -15,6 +15,7 @@ use tokio::{ runtime::Runtime, time::{self, Duration}, }; +use tokio_stream::wrappers::IntervalStream; pub struct RpcBanksService { thread_hdl: JoinHandle<()>, @@ -35,7 +36,7 @@ async fn start_abortable_tcp_server( block_commitment_cache.clone(), ) .fuse(); - let interval = time::interval(Duration::from_millis(100)).fuse(); + let interval = IntervalStream::new(time::interval(Duration::from_millis(100))).fuse(); pin_mut!(server, interval); loop { select! { diff --git a/bench-exchange/Cargo.toml b/bench-exchange/Cargo.toml index c9860f9734..12e927f1ec 100644 --- a/bench-exchange/Cargo.toml +++ b/bench-exchange/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-bench-exchange" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -15,24 +15,24 @@ log = "0.4.11" num-derive = "0.3" num-traits = "0.2" rand = "0.7.0" -rayon = "1.4.0" +rayon = "1.5.0" serde_json = "1.0.56" serde_yaml = "0.8.13" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-core = { path = "../core", version = "=1.5.19" } -solana-genesis = { path = "../genesis", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-faucet = { path = "../faucet", version = "=1.5.19" } -solana-exchange-program = { path = "../programs/exchange", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } -solana-net-utils = { path = "../net-utils", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-version = { path = "../version" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-core = { path = "../core", version = "=1.6.14" } +solana-genesis = { path = "../genesis", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-faucet = { path = "../faucet", version = "=1.6.14" } +solana-exchange-program = { path = "../programs/exchange", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } +solana-net-utils = { path = "../net-utils", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } [dev-dependencies] -solana-local-cluster = { path = "../local-cluster", version = "=1.5.19" } +solana-local-cluster = { path = "../local-cluster", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bench-exchange/tests/bench_exchange.rs b/bench-exchange/tests/bench_exchange.rs index bb032c2bd1..a41b644501 100644 --- a/bench-exchange/tests/bench_exchange.rs +++ b/bench-exchange/tests/bench_exchange.rs @@ -1,19 +1,23 @@ use log::*; use solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config}; -use solana_core::gossip_service::{discover_cluster, get_multi_client}; -use solana_core::validator::ValidatorConfig; -use solana_exchange_program::exchange_processor::process_instruction; -use solana_exchange_program::id; -use solana_exchange_program::solana_exchange_program; +use solana_core::{ + gossip_service::{discover_cluster, get_multi_client}, + validator::ValidatorConfig, +}; +use solana_exchange_program::{ + exchange_processor::process_instruction, id, solana_exchange_program, +}; use solana_faucet::faucet::run_local_faucet_with_port; -use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster}; -use solana_runtime::bank::Bank; -use solana_runtime::bank_client::BankClient; -use solana_sdk::genesis_config::create_genesis_config; -use solana_sdk::signature::{Keypair, Signer}; -use std::process::exit; -use std::sync::mpsc::channel; -use std::time::Duration; +use solana_local_cluster::{ + local_cluster::{ClusterConfig, LocalCluster}, + validator_configs::make_identical_validator_configs, +}; +use solana_runtime::{bank::Bank, bank_client::BankClient}; +use solana_sdk::{ + genesis_config::create_genesis_config, + signature::{Keypair, Signer}, +}; +use std::{process::exit, sync::mpsc::channel, time::Duration}; #[test] #[ignore] @@ -44,7 +48,7 @@ fn test_exchange_local_cluster() { let cluster = LocalCluster::new(&mut ClusterConfig { node_stakes: vec![100_000; NUM_NODES], cluster_lamports: 100_000_000_000_000, - validator_configs: vec![ValidatorConfig::default(); NUM_NODES], + validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES), native_instruction_processors: [solana_exchange_program!()].to_vec(), ..ClusterConfig::default() }); diff --git a/bench-streamer/Cargo.toml b/bench-streamer/Cargo.toml index d0f83dce82..8bd7442584 100644 --- a/bench-streamer/Cargo.toml +++ b/bench-streamer/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-bench-streamer" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -10,11 +10,11 @@ publish = false [dependencies] clap = "2.33.1" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-streamer = { path = "../streamer", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-net-utils = { path = "../net-utils", version = "=1.5.19" } -solana-version = { path = "../version" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-streamer = { path = "../streamer", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-net-utils = { path = "../net-utils", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index c35fd8ede3..b5d522cfc0 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-bench-tps" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -12,26 +12,25 @@ publish = false bincode = "1.3.1" clap = "2.33.1" log = "0.4.11" -rayon = "1.4.0" +rayon = "1.5.0" serde_json = "1.0.56" serde_yaml = "0.8.13" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-core = { path = "../core", version = "=1.5.19" } -solana-genesis = { path = "../genesis", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-faucet = { path = "../faucet", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } -solana-measure = { path = "../measure", version = "=1.5.19" } -solana-net-utils = { path = "../net-utils", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-version = { path = "../version" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-core = { path = "../core", version = "=1.6.14" } +solana-genesis = { path = "../genesis", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-faucet = { path = "../faucet", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } +solana-measure = { path = "../measure", version = "=1.6.14" } +solana-net-utils = { path = "../net-utils", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } [dev-dependencies] serial_test = "0.4.0" -serial_test_derive = "0.4.0" -solana-local-cluster = { path = "../local-cluster", version = "=1.5.19" } +solana-local-cluster = { path = "../local-cluster", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index ba0350dec2..084b81ddec 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -8,7 +8,7 @@ use solana_measure::measure::Measure; use solana_metrics::{self, datapoint_info}; use solana_sdk::{ client::Client, - clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE}, + clock::{DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE}, commitment_config::CommitmentConfig, fee_calculator::FeeCalculator, hash::Hash, @@ -32,8 +32,7 @@ use std::{ }; // The point at which transactions become "too old", in seconds. -const MAX_TX_QUEUE_AGE: u64 = - MAX_PROCESSING_AGE as u64 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND; +const MAX_TX_QUEUE_AGE: u64 = (MAX_PROCESSING_AGE as f64 * DEFAULT_S_PER_SLOT) as u64; pub const MAX_SPENDS_PER_TX: u64 = 4; diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index f0713623e1..a49c68a7b4 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -1,15 +1,21 @@ #![allow(clippy::integer_arithmetic)] -use serial_test_derive::serial; -use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs}; -use solana_bench_tps::cli::Config; +use serial_test::serial; +use solana_bench_tps::{ + bench::{do_bench_tps, generate_and_fund_keypairs}, + cli::Config, +}; use solana_client::thin_client::create_client; -use solana_core::cluster_info::VALIDATOR_PORT_RANGE; -use solana_core::validator::ValidatorConfig; +use solana_core::{cluster_info::VALIDATOR_PORT_RANGE, validator::ValidatorConfig}; use solana_faucet::faucet::run_local_faucet_with_port; -use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster}; +use solana_local_cluster::{ + local_cluster::{ClusterConfig, LocalCluster}, + validator_configs::make_identical_validator_configs, +}; use solana_sdk::signature::{Keypair, Signer}; -use std::sync::{mpsc::channel, Arc}; -use std::time::Duration; +use std::{ + sync::{mpsc::channel, Arc}, + time::Duration, +}; fn test_bench_tps_local_cluster(config: Config) { let native_instruction_processors = vec![]; @@ -19,7 +25,7 @@ fn test_bench_tps_local_cluster(config: Config) { let cluster = LocalCluster::new(&mut ClusterConfig { node_stakes: vec![999_990; NUM_NODES], cluster_lamports: 200_000_000, - validator_configs: vec![ValidatorConfig::default(); NUM_NODES], + validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES), native_instruction_processors, ..ClusterConfig::default() }); diff --git a/ci/do-audit.sh b/ci/do-audit.sh index 37ed86d29c..b6c1b86be1 100755 --- a/ci/do-audit.sh +++ b/ci/do-audit.sh @@ -33,12 +33,10 @@ cargo_audit_ignores=( # Blocked on predicates v1.0.6 removing its dependency on `difference` --ignore RUSTSEC-2020-0095 - # hyper is upgraded on master/v1.6 but not for v1.5 - --ignore RUSTSEC-2021-0020 - # generic-array: arr! macro erases lifetimes # - # ed25519-dalek and libsecp256k1 not upgraded for v1.5 + # Blocked on libsecp256k1 releasing with upgraded dependencies + # https://github.com/paritytech/libsecp256k1/issues/66 --ignore RUSTSEC-2020-0146 ) diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index d463de2db4..4cf08b7cba 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,4 +1,4 @@ -FROM solanalabs/rust:1.49.0 +FROM solanalabs/rust:1.51.0 ARG date RUN set -x \ diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index 26aafdabed..bad2bd7150 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,6 +1,6 @@ # Note: when the rust version is changed also modify # ci/rust-version.sh to pick up the new image tag -FROM rust:1.49.0 +FROM rust:1.51.0 # Add Google Protocol Buffers for Libra's metrics library. ENV PROTOC_VERSION 3.8.0 diff --git a/ci/localnet-sanity.sh b/ci/localnet-sanity.sh index f94c9e1878..0899e1e2bc 100755 --- a/ci/localnet-sanity.sh +++ b/ci/localnet-sanity.sh @@ -78,7 +78,6 @@ nodes=( --init-complete-file init-complete-node0.log \ --dynamic-port-range 8000-8050" "multinode-demo/validator.sh \ - --enable-rpc-exit \ --no-restart \ --dynamic-port-range 8050-8100 \ --init-complete-file init-complete-node1.log \ @@ -201,17 +200,10 @@ killNodes() { [[ ${#pids[@]} -gt 0 ]] || return # Try to use the RPC exit API to cleanly exit the first two nodes - # (dynamic nodes, -x, are just killed since their RPC port is not known) + # (dynamic nodes, -x, are just killed) echo "--- RPC exit" - for port in 8899 18899; do - ( - set -x - curl --retry 5 --retry-delay 2 --retry-connrefused \ - -X POST -H 'Content-Type: application/json' \ - -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' \ - http://localhost:$port - ) - done + $solana_validator --ledger "$SOLANA_CONFIG_DIR"/bootstrap-validator exit --force || true + $solana_validator --ledger "$SOLANA_CONFIG_DIR"/validator exit --force || true # Give the nodes a splash of time to cleanly exit before killing them sleep 2 diff --git a/ci/run-sanity.sh b/ci/run-sanity.sh index 36f423c53e..5e0390a60c 100755 --- a/ci/run-sanity.sh +++ b/ci/run-sanity.sh @@ -22,10 +22,11 @@ done snapshot_slot=1 # wait a bit longer than snapshot_slot -while [[ $($solana_cli --url http://localhost:8899 slot --commitment recent) -le $((snapshot_slot + 1)) ]]; do +while [[ $($solana_cli --url http://localhost:8899 slot --commitment processed) -le $((snapshot_slot + 1)) ]]; do sleep 1 done -curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899 + +$solana_validator --ledger config/ledger exit --force || true wait $pid diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 7dc982fae6..b8719f2800 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -18,13 +18,13 @@ if [[ -n $RUST_STABLE_VERSION ]]; then stable_version="$RUST_STABLE_VERSION" else - stable_version=1.49.0 + stable_version=1.51.0 fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2021-01-23 + nightly_version=2021-04-18 fi diff --git a/ci/test-checks.sh b/ci/test-checks.sh index 9d60586e8a..03ddeea0c4 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -79,7 +79,6 @@ _ ci/do-audit.sh cd "$project" _ "$cargo" nightly clippy -- --deny=warnings --allow=clippy::missing_safety_doc _ "$cargo" stable fmt -- --check - _ "$cargo" nightly test ) done } diff --git a/ci/test-stable.sh b/ci/test-stable.sh index ec3feca39a..1d7cbc1bce 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -25,9 +25,6 @@ source scripts/ulimit-n.sh test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete test -d target/release/bpf && find target/release/bpf -name '*.d' -delete -# Clear the BPF sysroot files, they are not automatically rebuilt -rm -rf target/xargo # Issue #3105 - # Limit compiler jobs to reduce memory usage # on machines with 2gb/thread of memory NPROC=$(nproc) @@ -46,7 +43,11 @@ test-stable-perf) # BPF solana-sdk legacy compile test ./cargo-build-bpf --manifest-path sdk/Cargo.toml - # BPF program tests + # BPF Program unit tests + "$cargo" stable test --manifest-path programs/bpf/Cargo.toml + cargo-build-bpf --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf + + # BPF program system tests _ make -C programs/bpf/c tests _ "$cargo" stable test \ --manifest-path programs/bpf/Cargo.toml \ diff --git a/clap-utils/Cargo.toml b/clap-utils/Cargo.toml index 7035e85f46..627e4a8ef1 100644 --- a/clap-utils/Cargo.toml +++ b/clap-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-clap-utils" -version = "1.5.19" +version = "1.6.14" description = "Solana utilities for the clap" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,14 +12,17 @@ edition = "2018" [dependencies] clap = "2.33.0" rpassword = "4.0" -solana-remote-wallet = { path = "../remote-wallet", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } thiserror = "1.0.21" -tiny-bip39 = "0.7.0" +tiny-bip39 = "0.8.0" uriparse = "0.6.3" url = "2.1.0" chrono = "0.4" +[dev-dependencies] +tempfile = "3.1.0" + [lib] name = "solana_clap_utils" diff --git a/clap-utils/src/fee_payer.rs b/clap-utils/src/fee_payer.rs index 4031ab4e88..ef21c31def 100644 --- a/clap-utils/src/fee_payer.rs +++ b/clap-utils/src/fee_payer.rs @@ -1,5 +1,7 @@ -use crate::{input_validators, ArgConstant}; -use clap::Arg; +use { + crate::{input_validators, ArgConstant}, + clap::Arg, +}; pub const FEE_PAYER_ARG: ArgConstant<'static> = ArgConstant { name: "fee_payer", diff --git a/clap-utils/src/input_parsers.rs b/clap-utils/src/input_parsers.rs index 50ba489cec..e64785142f 100644 --- a/clap-utils/src/input_parsers.rs +++ b/clap-utils/src/input_parsers.rs @@ -1,19 +1,21 @@ -use crate::keypair::{ - keypair_from_seed_phrase, pubkey_from_path, resolve_signer_from_path, signer_from_path, - ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG, +use { + crate::keypair::{ + keypair_from_seed_phrase, pubkey_from_path, resolve_signer_from_path, signer_from_path, + ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG, + }, + chrono::DateTime, + clap::ArgMatches, + solana_remote_wallet::remote_wallet::RemoteWalletManager, + solana_sdk::{ + clock::UnixTimestamp, + commitment_config::CommitmentConfig, + genesis_config::ClusterType, + native_token::sol_to_lamports, + pubkey::Pubkey, + signature::{read_keypair_file, Keypair, Signature, Signer}, + }, + std::{str::FromStr, sync::Arc}, }; -use chrono::DateTime; -use clap::ArgMatches; -use solana_remote_wallet::remote_wallet::RemoteWalletManager; -use solana_sdk::{ - clock::UnixTimestamp, - commitment_config::CommitmentConfig, - genesis_config::ClusterType, - native_token::sol_to_lamports, - pubkey::Pubkey, - signature::{read_keypair_file, Keypair, Signature, Signer}, -}; -use std::{str::FromStr, sync::Arc}; // Return parsed values from matches at `name` pub fn values_of(matches: &ArgMatches<'_>, name: &str) -> Option> @@ -55,7 +57,7 @@ pub fn keypair_of(matches: &ArgMatches<'_>, name: &str) -> Option { if let Some(value) = matches.value_of(name) { if value == ASK_KEYWORD { let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); - keypair_from_seed_phrase(name, skip_validation, true).ok() + keypair_from_seed_phrase(name, skip_validation, true, None, true).ok() } else { read_keypair_file(value).ok() } @@ -70,7 +72,7 @@ pub fn keypairs_of(matches: &ArgMatches<'_>, name: &str) -> Option> .filter_map(|value| { if value == ASK_KEYWORD { let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); - keypair_from_seed_phrase(name, skip_validation, true).ok() + keypair_from_seed_phrase(name, skip_validation, true, None, true).ok() } else { read_keypair_file(value).ok() } diff --git a/clap-utils/src/input_validators.rs b/clap-utils/src/input_validators.rs index 75682f1d79..b74990afb0 100644 --- a/clap-utils/src/input_validators.rs +++ b/clap-utils/src/input_validators.rs @@ -1,13 +1,15 @@ -use crate::keypair::{parse_signer_source, SignerSource, ASK_KEYWORD}; -use chrono::DateTime; -use solana_sdk::{ - clock::{Epoch, Slot}, - hash::Hash, - pubkey::{Pubkey, MAX_SEED_LEN}, - signature::{read_keypair_file, Signature}, +use { + crate::keypair::{parse_signer_source, SignerSourceKind, ASK_KEYWORD}, + chrono::DateTime, + solana_sdk::{ + clock::{Epoch, Slot}, + hash::Hash, + pubkey::{Pubkey, MAX_SEED_LEN}, + signature::{read_keypair_file, Signature}, + }, + std::fmt::Display, + std::str::FromStr, }; -use std::fmt::Display; -use std::str::FromStr; fn is_parsable_generic(string: T) -> Result<(), String> where @@ -94,6 +96,26 @@ where .map_err(|err| format!("{}", err)) } +// Return an error if a `SignerSourceKind::Prompt` cannot be parsed +pub fn is_prompt_signer_source(string: T) -> Result<(), String> +where + T: AsRef + Display, +{ + if string.as_ref() == ASK_KEYWORD { + return Ok(()); + } + match parse_signer_source(string.as_ref()) + .map_err(|err| format!("{}", err))? + .kind + { + SignerSourceKind::Prompt => Ok(()), + _ => Err(format!( + "Unable to parse input as `prompt:` URI scheme or `ASK` keyword: {}", + string + )), + } +} + // Return an error if string cannot be parsed as pubkey string or keypair file location pub fn is_pubkey_or_keypair(string: T) -> Result<(), String> where @@ -108,8 +130,11 @@ pub fn is_valid_pubkey(string: T) -> Result<(), String> where T: AsRef + Display, { - match parse_signer_source(string.as_ref()) { - SignerSource::Filepath(path) => is_keypair(path), + match parse_signer_source(string.as_ref()) + .map_err(|err| format!("{}", err))? + .kind + { + SignerSourceKind::Filepath(path) => is_keypair(path), _ => Ok(()), } } diff --git a/clap-utils/src/keypair.rs b/clap-utils/src/keypair.rs index d9d8f4dcbd..9d61776798 100644 --- a/clap-utils/src/keypair.rs +++ b/clap-utils/src/keypair.rs @@ -1,31 +1,39 @@ -use crate::{ - input_parsers::pubkeys_sigs_of, - offline::{SIGNER_ARG, SIGN_ONLY_ARG}, - ArgConstant, -}; -use bip39::{Language, Mnemonic, Seed}; -use clap::ArgMatches; -use rpassword::prompt_password_stderr; -use solana_remote_wallet::{ - remote_keypair::generate_remote_keypair, - remote_wallet::{maybe_wallet_manager, RemoteWalletError, RemoteWalletManager}, -}; -use solana_sdk::{ - hash::Hash, - message::Message, - pubkey::Pubkey, - signature::{ - keypair_from_seed, keypair_from_seed_phrase_and_passphrase, read_keypair, - read_keypair_file, Keypair, NullSigner, Presigner, Signature, Signer, +use { + crate::{ + input_parsers::pubkeys_sigs_of, + offline::{SIGNER_ARG, SIGN_ONLY_ARG}, + ArgConstant, }, -}; -use std::{ - convert::TryFrom, - error, - io::{stdin, stdout, Write}, - process::exit, - str::FromStr, - sync::Arc, + bip39::{Language, Mnemonic, Seed}, + clap::ArgMatches, + rpassword::prompt_password_stderr, + solana_remote_wallet::{ + locator::{Locator as RemoteWalletLocator, LocatorError as RemoteWalletLocatorError}, + remote_keypair::generate_remote_keypair, + remote_wallet::{maybe_wallet_manager, RemoteWalletError, RemoteWalletManager}, + }, + solana_sdk::{ + derivation_path::{DerivationPath, DerivationPathError}, + hash::Hash, + message::Message, + pubkey::Pubkey, + signature::{ + generate_seed_from_seed_phrase_and_passphrase, keypair_from_seed, + keypair_from_seed_and_derivation_path, keypair_from_seed_phrase_and_passphrase, + read_keypair, read_keypair_file, Keypair, NullSigner, Presigner, Signature, Signer, + }, + }, + std::{ + cell::RefCell, + convert::TryFrom, + error, + io::{stdin, stdout, Write}, + ops::Deref, + process::exit, + str::FromStr, + sync::Arc, + }, + thiserror::Error, }; pub struct SignOnly { @@ -84,12 +92,48 @@ impl CliSignerInfo { } } +#[derive(Debug, Default)] pub struct DefaultSigner { pub arg_name: String, pub path: String, + is_path_checked: RefCell, } impl DefaultSigner { + pub fn new, P: AsRef>(arg_name: AN, path: P) -> Self { + let arg_name = arg_name.as_ref().to_string(); + let path = path.as_ref().to_string(); + Self { + arg_name, + path, + ..Self::default() + } + } + + fn path(&self) -> Result<&str, Box> { + if !self.is_path_checked.borrow().deref() { + parse_signer_source(&self.path) + .and_then(|s| { + if let SignerSourceKind::Filepath(path) = &s.kind { + std::fs::metadata(path).map(|_| ()).map_err(|e| e.into()) + } else { + Ok(()) + } + }) + .map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::Other, + format!( + "No default signer found, run \"solana-keygen new -o {}\" to create a new one", + self.path + ), + ) + })?; + *self.is_path_checked.borrow_mut() = true; + } + Ok(&self.path) + } + pub fn generate_unique_signers( &self, bulk_signers: Vec>>, @@ -120,7 +164,7 @@ impl DefaultSigner { matches: &ArgMatches, wallet_manager: &mut Option>, ) -> Result, Box> { - signer_from_path(matches, &self.path, &self.arg_name, wallet_manager) + signer_from_path(matches, self.path()?, &self.arg_name, wallet_manager) } pub fn signer_from_path_with_config( @@ -129,38 +173,147 @@ impl DefaultSigner { wallet_manager: &mut Option>, config: &SignerFromPathConfig, ) -> Result, Box> { - signer_from_path_with_config(matches, &self.path, &self.arg_name, wallet_manager, config) + signer_from_path_with_config( + matches, + self.path()?, + &self.arg_name, + wallet_manager, + config, + ) } } -pub(crate) enum SignerSource { - Ask, +pub(crate) struct SignerSource { + pub kind: SignerSourceKind, + pub derivation_path: Option, + pub legacy: bool, +} + +impl SignerSource { + fn new(kind: SignerSourceKind) -> Self { + Self { + kind, + derivation_path: None, + legacy: false, + } + } + + fn new_legacy(kind: SignerSourceKind) -> Self { + Self { + kind, + derivation_path: None, + legacy: true, + } + } + + // pub fn signer_from_path_with_config( + // &self, + // matches: &ArgMatches, + // wallet_manager: &mut Option>, + // config: &SignerFromPathConfig, + // ) -> Result, Box> { + // signer_from_path_with_config(matches, &self.path, &self.arg_name, wallet_manager, config) + // } +} + +const SIGNER_SOURCE_PROMPT: &str = "prompt"; +const SIGNER_SOURCE_FILEPATH: &str = "file"; +const SIGNER_SOURCE_USB: &str = "usb"; +const SIGNER_SOURCE_STDIN: &str = "stdin"; +const SIGNER_SOURCE_PUBKEY: &str = "pubkey"; + +pub(crate) enum SignerSourceKind { + Prompt, Filepath(String), - Usb(String), + Usb(RemoteWalletLocator), Stdin, Pubkey(Pubkey), } -pub(crate) fn parse_signer_source>(source: S) -> SignerSource { +impl AsRef for SignerSourceKind { + fn as_ref(&self) -> &str { + match self { + Self::Prompt => SIGNER_SOURCE_PROMPT, + Self::Filepath(_) => SIGNER_SOURCE_FILEPATH, + Self::Usb(_) => SIGNER_SOURCE_USB, + Self::Stdin => SIGNER_SOURCE_STDIN, + Self::Pubkey(_) => SIGNER_SOURCE_PUBKEY, + } + } +} + +impl std::fmt::Debug for SignerSourceKind { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let s: &str = self.as_ref(); + write!(f, "{}", s) + } +} + +#[derive(Debug, Error)] +pub(crate) enum SignerSourceError { + #[error("unrecognized signer source")] + UnrecognizedSource, + #[error(transparent)] + RemoteWalletLocatorError(#[from] RemoteWalletLocatorError), + #[error(transparent)] + DerivationPathError(#[from] DerivationPathError), + #[error(transparent)] + IoError(#[from] std::io::Error), +} + +pub(crate) fn parse_signer_source>( + source: S, +) -> Result { let source = source.as_ref(); - match uriparse::URIReference::try_from(source) { - Err(_) => SignerSource::Filepath(source.to_string()), + let source = { + #[cfg(target_family = "windows")] + { + source.replace("\\", "/") + } + #[cfg(not(target_family = "windows"))] + { + source.to_string() + } + }; + match uriparse::URIReference::try_from(source.as_str()) { + Err(_) => Err(SignerSourceError::UnrecognizedSource), Ok(uri) => { if let Some(scheme) = uri.scheme() { let scheme = scheme.as_str().to_ascii_lowercase(); match scheme.as_str() { - "ask" => SignerSource::Ask, - "file" => SignerSource::Filepath(uri.path().to_string()), - "usb" => SignerSource::Usb(source.to_string()), - _ => SignerSource::Filepath(source.to_string()), + SIGNER_SOURCE_PROMPT => Ok(SignerSource { + kind: SignerSourceKind::Prompt, + derivation_path: DerivationPath::from_uri_any_query(&uri)?, + legacy: false, + }), + SIGNER_SOURCE_FILEPATH => Ok(SignerSource::new(SignerSourceKind::Filepath( + uri.path().to_string(), + ))), + SIGNER_SOURCE_USB => Ok(SignerSource { + kind: SignerSourceKind::Usb(RemoteWalletLocator::new_from_uri(&uri)?), + derivation_path: DerivationPath::from_uri_key_query(&uri)?, + legacy: false, + }), + SIGNER_SOURCE_STDIN => Ok(SignerSource::new(SignerSourceKind::Stdin)), + _ => { + #[cfg(target_family = "windows")] + // On Windows, an absolute path's drive letter will be parsed as the URI + // scheme. Assume a filepath source in case of a single character shceme. + if scheme.len() == 1 { + return Ok(SignerSource::new(SignerSourceKind::Filepath(source))); + } + Err(SignerSourceError::UnrecognizedSource) + } } } else { - match source { - "-" => SignerSource::Stdin, - ASK_KEYWORD => SignerSource::Ask, - _ => match Pubkey::from_str(source) { - Ok(pubkey) => SignerSource::Pubkey(pubkey), - Err(_) => SignerSource::Filepath(source.to_string()), + match source.as_str() { + "-" => Ok(SignerSource::new(SignerSourceKind::Stdin)), + ASK_KEYWORD => Ok(SignerSource::new_legacy(SignerSourceKind::Prompt)), + _ => match Pubkey::from_str(source.as_str()) { + Ok(pubkey) => Ok(SignerSource::new(SignerSourceKind::Pubkey(pubkey))), + Err(_) => std::fs::metadata(source.as_str()) + .map(|_| SignerSource::new(SignerSourceKind::Filepath(source))) + .map_err(|err| err.into()), }, } } @@ -211,16 +364,23 @@ pub fn signer_from_path_with_config( wallet_manager: &mut Option>, config: &SignerFromPathConfig, ) -> Result, Box> { - match parse_signer_source(path) { - SignerSource::Ask => { + let SignerSource { + kind, + derivation_path, + legacy, + } = parse_signer_source(path)?; + match kind { + SignerSourceKind::Prompt => { let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); Ok(Box::new(keypair_from_seed_phrase( keypair_name, skip_validation, false, + derivation_path, + legacy, )?)) } - SignerSource::Filepath(path) => match read_keypair_file(&path) { + SignerSourceKind::Filepath(path) => match read_keypair_file(&path) { Err(e) => Err(std::io::Error::new( std::io::ErrorKind::Other, format!("could not read keypair file \"{}\". Run \"velas-keygen new\" to create a keypair file: {}", path, e), @@ -228,17 +388,18 @@ pub fn signer_from_path_with_config( .into()), Ok(file) => Ok(Box::new(file)), }, - SignerSource::Stdin => { + SignerSourceKind::Stdin => { let mut stdin = std::io::stdin(); Ok(Box::new(read_keypair(&mut stdin)?)) } - SignerSource::Usb(path) => { + SignerSourceKind::Usb(locator) => { if wallet_manager.is_none() { *wallet_manager = maybe_wallet_manager()?; } if let Some(wallet_manager) = wallet_manager { Ok(Box::new(generate_remote_keypair( - path, + locator, + derivation_path.unwrap_or_default(), wallet_manager, matches.is_present("confirm_key"), keypair_name, @@ -247,7 +408,7 @@ pub fn signer_from_path_with_config( Err(RemoteWalletError::NoDeviceFound.into()) } } - SignerSource::Pubkey(pubkey) => { + SignerSourceKind::Pubkey(pubkey) => { let presigner = pubkeys_sigs_of(matches, SIGNER_ARG.name) .as_ref() .and_then(|presigners| presigner_from_pubkey_sigs(&pubkey, presigners)); @@ -272,8 +433,9 @@ pub fn pubkey_from_path( keypair_name: &str, wallet_manager: &mut Option>, ) -> Result> { - match parse_signer_source(path) { - SignerSource::Pubkey(pubkey) => Ok(pubkey), + let SignerSource { kind, .. } = parse_signer_source(path)?; + match kind { + SignerSourceKind::Pubkey(pubkey) => Ok(pubkey), _ => Ok(signer_from_path(matches, path, keypair_name, wallet_manager)?.pubkey()), } } @@ -284,34 +446,51 @@ pub fn resolve_signer_from_path( keypair_name: &str, wallet_manager: &mut Option>, ) -> Result, Box> { - match parse_signer_source(path) { - SignerSource::Ask => { + let SignerSource { + kind, + derivation_path, + legacy, + } = parse_signer_source(path)?; + match kind { + SignerSourceKind::Prompt => { let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); // This method validates the seed phrase, but returns `None` because there is no path // on disk or to a device - keypair_from_seed_phrase(keypair_name, skip_validation, false).map(|_| None) + keypair_from_seed_phrase( + keypair_name, + skip_validation, + false, + derivation_path, + legacy, + ) + .map(|_| None) } - SignerSource::Filepath(path) => match read_keypair_file(&path) { + SignerSourceKind::Filepath(path) => match read_keypair_file(&path) { Err(e) => Err(std::io::Error::new( std::io::ErrorKind::Other, - format!("could not read keypair file \"{}\". Run \"velas-keygen new\" to create a keypair file: {}", path, e), + format!( + "could not read keypair file \"{}\". \ + Run \"solana-keygen new\" to create a keypair file: {}", + path, e + ), ) .into()), Ok(_) => Ok(Some(path.to_string())), }, - SignerSource::Stdin => { + SignerSourceKind::Stdin => { let mut stdin = std::io::stdin(); // This method validates the keypair from stdin, but returns `None` because there is no // path on disk or to a device read_keypair(&mut stdin).map(|_| None) } - SignerSource::Usb(path) => { + SignerSourceKind::Usb(locator) => { if wallet_manager.is_none() { *wallet_manager = maybe_wallet_manager()?; } if let Some(wallet_manager) = wallet_manager { let path = generate_remote_keypair( - path, + locator, + derivation_path.unwrap_or_default(), wallet_manager, matches.is_present("confirm_key"), keypair_name, @@ -326,7 +505,7 @@ pub fn resolve_signer_from_path( } } -// Keyword used to indicate that the user should be asked for a keypair seed phrase +// Keyword used to indicate that the user should be prompted for a keypair seed phrase pub const ASK_KEYWORD: &str = "ASK"; pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant { @@ -347,6 +526,56 @@ pub fn prompt_passphrase(prompt: &str) -> Result> Ok(passphrase) } +/// Parses a path into a SignerSource and returns a Keypair for supporting SignerSourceKinds +pub fn keypair_from_path( + matches: &ArgMatches, + path: &str, + keypair_name: &str, + confirm_pubkey: bool, +) -> Result> { + let SignerSource { + kind, + derivation_path, + legacy, + } = parse_signer_source(path)?; + match kind { + SignerSourceKind::Prompt => { + let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); + Ok(keypair_from_seed_phrase( + keypair_name, + skip_validation, + confirm_pubkey, + derivation_path, + legacy, + )?) + } + SignerSourceKind::Filepath(path) => match read_keypair_file(&path) { + Err(e) => Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!( + "could not read keypair file \"{}\". \ + Run \"solana-keygen new\" to create a keypair file: {}", + path, e + ), + ) + .into()), + Ok(file) => Ok(file), + }, + SignerSourceKind::Stdin => { + let mut stdin = std::io::stdin(); + Ok(read_keypair(&mut stdin)?) + } + _ => Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!( + "signer of type `{:?}` does not support Keypair output", + kind + ), + ) + .into()), + } +} + /// Reads user input from stdin to retrieve a seed phrase and passphrase for keypair derivation /// Optionally skips validation of seed phrase /// Optionally confirms recovered public key @@ -354,6 +583,8 @@ pub fn keypair_from_seed_phrase( keypair_name: &str, skip_validation: bool, confirm_pubkey: bool, + derivation_path: Option, + legacy: bool, ) -> Result> { let seed_phrase = prompt_password_stderr(&format!("[{}] seed phrase: ", keypair_name))?; let seed_phrase = seed_phrase.trim(); @@ -364,7 +595,12 @@ pub fn keypair_from_seed_phrase( let keypair = if skip_validation { let passphrase = prompt_passphrase(&passphrase_prompt)?; - keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)? + if legacy { + keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)? + } else { + let seed = generate_seed_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase); + keypair_from_seed_and_derivation_path(&seed, derivation_path)? + } } else { let sanitized = sanitize_seed_phrase(seed_phrase); let parse_language_fn = || { @@ -387,7 +623,11 @@ pub fn keypair_from_seed_phrase( let mnemonic = parse_language_fn()?; let passphrase = prompt_passphrase(&passphrase_prompt)?; let seed = Seed::new(&mnemonic, &passphrase); - keypair_from_seed(seed.as_bytes())? + if legacy { + keypair_from_seed(seed.as_bytes())? + } else { + keypair_from_seed_and_derivation_path(&seed.as_bytes(), derivation_path)? + } }; if confirm_pubkey { @@ -415,7 +655,9 @@ fn sanitize_seed_phrase(seed_phrase: &str) -> String { #[cfg(test)] mod tests { use super::*; + use solana_remote_wallet::locator::Manufacturer; use solana_sdk::system_instruction; + use tempfile::NamedTempFile; #[test] fn test_sanitize_seed_phrase() { @@ -459,35 +701,119 @@ mod tests { #[test] fn test_parse_signer_source() { - assert!(matches!(parse_signer_source("-"), SignerSource::Stdin)); assert!(matches!( - parse_signer_source(ASK_KEYWORD), - SignerSource::Ask + parse_signer_source("-").unwrap(), + SignerSource { + kind: SignerSourceKind::Stdin, + derivation_path: None, + legacy: false, + } + )); + let stdin = "stdin:".to_string(); + assert!(matches!( + parse_signer_source(&stdin).unwrap(), + SignerSource { + kind: SignerSourceKind::Stdin, + derivation_path: None, + legacy: false, + } + )); + assert!(matches!( + parse_signer_source(ASK_KEYWORD).unwrap(), + SignerSource { + kind: SignerSourceKind::Prompt, + derivation_path: None, + legacy: true, + } )); let pubkey = Pubkey::new_unique(); assert!( - matches!(parse_signer_source(&pubkey.to_string()), SignerSource::Pubkey(p) if p == pubkey) + matches!(parse_signer_source(&pubkey.to_string()).unwrap(), SignerSource { + kind: SignerSourceKind::Pubkey(p), + derivation_path: None, + legacy: false, + } + if p == pubkey) + ); + + // Set up absolute and relative path strs + let file0 = NamedTempFile::new().unwrap(); + let path = file0.path(); + assert!(path.is_absolute()); + let absolute_path_str = path.to_str().unwrap(); + + let file1 = NamedTempFile::new_in(std::env::current_dir().unwrap()).unwrap(); + let path = file1.path().file_name().unwrap().to_str().unwrap(); + let path = std::path::Path::new(path); + assert!(path.is_relative()); + let relative_path_str = path.to_str().unwrap(); + + assert!( + matches!(parse_signer_source(absolute_path_str).unwrap(), SignerSource { + kind: SignerSourceKind::Filepath(p), + derivation_path: None, + legacy: false, + } if p == absolute_path_str) + ); + assert!( + matches!(parse_signer_source(&relative_path_str).unwrap(), SignerSource { + kind: SignerSourceKind::Filepath(p), + derivation_path: None, + legacy: false, + } if p == relative_path_str) ); - let path = "/absolute/path".to_string(); - assert!(matches!(parse_signer_source(&path), SignerSource::Filepath(p) if p == path)); - let path = "relative/path".to_string(); - assert!(matches!(parse_signer_source(&path), SignerSource::Filepath(p) if p == path)); + let usb = "usb://ledger".to_string(); - assert!(matches!(parse_signer_source(&usb), SignerSource::Usb(u) if u == usb)); - // Catchall into SignerSource::Filepath - let junk = "sometextthatisnotapubkey".to_string(); + let expected_locator = RemoteWalletLocator { + manufacturer: Manufacturer::Ledger, + pubkey: None, + }; + assert!(matches!(parse_signer_source(&usb).unwrap(), SignerSource { + kind: SignerSourceKind::Usb(u), + derivation_path: None, + legacy: false, + } if u == expected_locator)); + let usb = "usb://ledger?key=0/0".to_string(); + let expected_locator = RemoteWalletLocator { + manufacturer: Manufacturer::Ledger, + pubkey: None, + }; + let expected_derivation_path = Some(DerivationPath::new_bip44(Some(0), Some(0))); + assert!(matches!(parse_signer_source(&usb).unwrap(), SignerSource { + kind: SignerSourceKind::Usb(u), + derivation_path: d, + legacy: false, + } if u == expected_locator && d == expected_derivation_path)); + // Catchall into SignerSource::Filepath fails + let junk = "sometextthatisnotapubkeyorfile".to_string(); assert!(Pubkey::from_str(&junk).is_err()); - assert!(matches!(parse_signer_source(&junk), SignerSource::Filepath(j) if j == junk)); + assert!(matches!( + parse_signer_source(&junk), + Err(SignerSourceError::IoError(_)) + )); - let ask = "ask:".to_string(); - assert!(matches!(parse_signer_source(&ask), SignerSource::Ask)); - let path = "/absolute/path".to_string(); + let prompt = "prompt:".to_string(); + assert!(matches!( + parse_signer_source(&prompt).unwrap(), + SignerSource { + kind: SignerSourceKind::Prompt, + derivation_path: None, + legacy: false, + } + )); assert!( - matches!(parse_signer_source(&format!("file:{}", path)), SignerSource::Filepath(p) if p == path) + matches!(parse_signer_source(&format!("file:{}", absolute_path_str)).unwrap(), SignerSource { + kind: SignerSourceKind::Filepath(p), + derivation_path: None, + legacy: false, + } if p == absolute_path_str) ); - let path = "relative/path".to_string(); assert!( - matches!(parse_signer_source(&format!("file:{}", path)), SignerSource::Filepath(p) if p == path) + matches!(parse_signer_source(&format!("file:{}", relative_path_str)).unwrap(), SignerSource { + kind: SignerSourceKind::Filepath(p), + derivation_path: None, + legacy: false, + } if p == relative_path_str) ); } } diff --git a/clap-utils/src/lib.rs b/clap-utils/src/lib.rs index 132cd8befa..c6f7e8e2d9 100644 --- a/clap-utils/src/lib.rs +++ b/clap-utils/src/lib.rs @@ -27,5 +27,6 @@ pub mod fee_payer; pub mod input_parsers; pub mod input_validators; pub mod keypair; +pub mod memo; pub mod nonce; pub mod offline; diff --git a/clap-utils/src/memo.rs b/clap-utils/src/memo.rs new file mode 100644 index 0000000000..4414e1ea9f --- /dev/null +++ b/clap-utils/src/memo.rs @@ -0,0 +1,15 @@ +use {crate::ArgConstant, clap::Arg}; + +pub const MEMO_ARG: ArgConstant<'static> = ArgConstant { + name: "memo", + long: "--with-memo", + help: "Specify a memo string to include in the transaction.", +}; + +pub fn memo_arg<'a, 'b>() -> Arg<'a, 'b> { + Arg::with_name(MEMO_ARG.name) + .long(MEMO_ARG.long) + .takes_value(true) + .value_name("MEMO") + .help(MEMO_ARG.help) +} diff --git a/clap-utils/src/nonce.rs b/clap-utils/src/nonce.rs index 2979135b26..34c7011075 100644 --- a/clap-utils/src/nonce.rs +++ b/clap-utils/src/nonce.rs @@ -1,5 +1,7 @@ -use crate::{input_validators::*, offline::BLOCKHASH_ARG, ArgConstant}; -use clap::{App, Arg}; +use { + crate::{input_validators::*, offline::BLOCKHASH_ARG, ArgConstant}, + clap::{App, Arg}, +}; pub const NONCE_ARG: ArgConstant<'static> = ArgConstant { name: "nonce", diff --git a/clap-utils/src/offline.rs b/clap-utils/src/offline.rs index f632e83e19..08f2364dfe 100644 --- a/clap-utils/src/offline.rs +++ b/clap-utils/src/offline.rs @@ -1,5 +1,7 @@ -use crate::{input_validators::*, ArgConstant}; -use clap::{App, Arg}; +use { + crate::{input_validators::*, ArgConstant}, + clap::{App, Arg}, +}; pub const BLOCKHASH_ARG: ArgConstant<'static> = ArgConstant { name: "blockhash", diff --git a/cli-config/Cargo.toml b/cli-config/Cargo.toml index 5593e305f2..ac22214ff1 100644 --- a/cli-config/Cargo.toml +++ b/cli-config/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-cli-config" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -12,7 +12,7 @@ documentation = "https://docs.rs/solana-cli-config" [dependencies] dirs-next = "2.0.0" lazy_static = "1.4.0" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" serde_yaml = "0.8.13" url = "2.1.1" diff --git a/cli-config/src/config.rs b/cli-config/src/config.rs index 2fec957fde..ad72bff453 100644 --- a/cli-config/src/config.rs +++ b/cli-config/src/config.rs @@ -75,7 +75,8 @@ impl Config { .set_scheme(if is_secure { "wss" } else { "ws" }) .expect("unable to set scheme"); if let Some(port) = json_rpc_url.port() { - ws_url.set_port(Some(port + 1)).expect("unable to set port"); + let port = port.checked_add(1).expect("port out of range"); + ws_url.set_port(Some(port)).expect("unable to set port"); } ws_url.to_string() } @@ -106,13 +107,13 @@ mod test { #[test] fn compute_websocket_url() { assert_eq!( - Config::compute_websocket_url(&"http://devnet.solana.com"), - "ws://devnet.solana.com/".to_string() + Config::compute_websocket_url(&"http://api.devnet.solana.com"), + "ws://api.devnet.solana.com/".to_string() ); assert_eq!( - Config::compute_websocket_url(&"https://devnet.solana.com"), - "wss://devnet.solana.com/".to_string() + Config::compute_websocket_url(&"https://api.devnet.solana.com"), + "wss://api.devnet.solana.com/".to_string() ); assert_eq!( diff --git a/cli-output/Cargo.toml b/cli-output/Cargo.toml index 29a485d47e..f75074d313 100644 --- a/cli-output/Cargo.toml +++ b/cli-output/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-cli-output" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -16,16 +16,17 @@ console = "0.11.3" humantime = "2.0.1" Inflector = "0.11.4" indicatif = "0.15.0" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" serde_json = "1.0.56" -solana-account-decoder = { path = "../account-decoder", version = "=1.5.19" } -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } -solana-transaction-status = { path = "../transaction-status", version = "=1.5.19" } -solana-vote-program = { path = "../programs/vote", version = "=1.5.19" } +solana-account-decoder = { path = "../account-decoder", version = "=1.6.14" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } +solana-transaction-status = { path = "../transaction-status", version = "=1.6.14" } +solana-vote-program = { path = "../programs/vote", version = "=1.6.14" } +spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index cf0888389a..74c71971d7 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -15,8 +15,8 @@ use { solana_account_decoder::parse_token::UiTokenAccount, solana_clap_utils::keypair::SignOnly, solana_client::rpc_response::{ - RpcAccountBalance, RpcInflationGovernor, RpcInflationRate, RpcKeyedAccount, RpcSupply, - RpcVoteAccountInfo, + RpcAccountBalance, RpcContactInfo, RpcInflationGovernor, RpcInflationRate, RpcKeyedAccount, + RpcSupply, RpcVoteAccountInfo, }, solana_sdk::{ clock::{Epoch, Slot, UnixTimestamp}, @@ -35,7 +35,7 @@ use { }, solana_vote_program::{ authorized_voters::AuthorizedVoters, - vote_state::{BlockTimestamp, Lockout}, + vote_state::{BlockTimestamp, Lockout, MAX_EPOCH_CREDITS_HISTORY, MAX_LOCKOUT_HISTORY}, }, std::{ collections::{BTreeMap, HashMap}, @@ -303,6 +303,19 @@ pub struct CliValidatorsStakeByVersion { pub delinquent_active_stake: u64, } +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Copy)] +pub enum CliValidatorsSortOrder { + Delinquent, + Commission, + EpochCredits, + Identity, + LastVote, + Root, + SkipRate, + Stake, + VoteAccount, +} + #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct CliValidators { @@ -310,8 +323,13 @@ pub struct CliValidators { pub total_active_stake: u64, pub total_current_stake: u64, pub total_delinquent_stake: u64, - pub current_validators: Vec, - pub delinquent_validators: Vec, + pub validators: Vec, + #[serde(skip_serializing)] + pub validators_sort_order: CliValidatorsSortOrder, + #[serde(skip_serializing)] + pub validators_reverse_sort: bool, + #[serde(skip_serializing)] + pub number_validators: bool, pub stake_by_version: BTreeMap, #[serde(skip_serializing)] pub use_lamports_unit: bool, @@ -327,30 +345,40 @@ impl fmt::Display for CliValidators { validator: &CliValidator, total_active_stake: u64, use_lamports_unit: bool, - delinquent: bool, + highest_last_vote: u64, + highest_root: u64, ) -> fmt::Result { - fn non_zero_or_dash(v: u64) -> String { + fn non_zero_or_dash(v: u64, max_v: u64) -> String { if v == 0 { - "-".into() + "- ".into() + } else if v == max_v { + format!("{:>8} ( 0)", v) + } else if v > max_v.saturating_sub(100) { + format!("{:>8} ({:>3})", v, -(max_v.saturating_sub(v) as isize)) } else { - format!("{}", v) + format!("{:>8} ", v) } } writeln!( f, - "{} {:<44} {:<44} {:>3}% {:>8} {:>10} {:>10} {:>8} {}", - if delinquent { + "{} {:<44} {:<44} {:>3}% {:>14} {:>14} {:>7} {:>8} {:>7} {}", + if validator.delinquent { WARNING.to_string() } else { - " ".to_string() + "\u{a0}".to_string() }, validator.identity_pubkey, validator.vote_account_pubkey, validator.commission, - non_zero_or_dash(validator.last_vote), - non_zero_or_dash(validator.root_slot), - validator.credits, + non_zero_or_dash(validator.last_vote, highest_last_vote), + non_zero_or_dash(validator.root_slot, highest_root), + if let Some(skip_rate) = validator.skip_rate { + format!("{:.2}%", skip_rate) + } else { + "- ".to_string() + }, + validator.epoch_credits, validator.version, if validator.activated_stake > 0 { format!( @@ -364,47 +392,101 @@ impl fmt::Display for CliValidators { ) } - writeln_name_value( - f, - "Validators majority count:", - &self.majority_count.to_string(), - )?; + let padding = if self.number_validators { + ((self.validators.len() + 1) as f64).log10().floor() as usize + 1 + } else { + 0 + }; + let header = style(format!( + "{:padding$} {:<44} {:<38} {} {} {} {} {} {} {}", + " ", + "Identity", + "Vote Account", + "Commission", + "Last Vote ", + "Root Slot ", + "Skip Rate", + "Credits", + "Version", + "Active Stake", + padding = padding + 1 + )) + .bold(); + writeln!(f, "{}", header)?; + + let mut sorted_validators = self.validators.clone(); + match self.validators_sort_order { + CliValidatorsSortOrder::Delinquent => { + sorted_validators.sort_by_key(|a| a.delinquent); + } + CliValidatorsSortOrder::Commission => { + sorted_validators.sort_by_key(|a| a.commission); + } + CliValidatorsSortOrder::EpochCredits => { + sorted_validators.sort_by_key(|a| a.epoch_credits); + } + CliValidatorsSortOrder::Identity => { + sorted_validators.sort_by(|a, b| a.identity_pubkey.cmp(&b.identity_pubkey)); + } + CliValidatorsSortOrder::LastVote => { + sorted_validators.sort_by_key(|a| a.last_vote); + } + CliValidatorsSortOrder::Root => { + sorted_validators.sort_by_key(|a| a.root_slot); + } + CliValidatorsSortOrder::VoteAccount => { + sorted_validators.sort_by(|a, b| a.vote_account_pubkey.cmp(&b.vote_account_pubkey)); + } + CliValidatorsSortOrder::SkipRate => { + sorted_validators.sort_by(|a, b| { + use std::cmp::Ordering; + match (a.skip_rate, b.skip_rate) { + (None, None) => Ordering::Equal, + (None, Some(_)) => Ordering::Greater, + (Some(_), None) => Ordering::Less, + (Some(a), Some(b)) => a.partial_cmp(&b).unwrap_or(Ordering::Equal), + } + }); + } + CliValidatorsSortOrder::Stake => { + sorted_validators.sort_by_key(|a| a.activated_stake); + } + } - writeln!( - f, - "{}", - style(format!( - " {:<44} {:<38} {} {} {} {:>10} {:^8} {}", - "Identity", - "Vote Account", - "Commission", - "Last Vote", - "Root Block", - "Credits", - "Version", - "Active Stake", - )) - .bold() - )?; - for validator in &self.current_validators { - write_vote_account( - f, - validator, - self.total_active_stake, - self.use_lamports_unit, - false, - )?; + if self.validators_reverse_sort { + sorted_validators.reverse(); } - for validator in &self.delinquent_validators { + + let highest_root = sorted_validators + .iter() + .map(|v| v.root_slot) + .max() + .unwrap_or_default(); + let highest_last_vote = sorted_validators + .iter() + .map(|v| v.last_vote) + .max() + .unwrap_or_default(); + + for (i, validator) in sorted_validators.iter().enumerate() { + if padding > 0 { + write!(f, "{:padding$}", i + 1, padding = padding)?; + } write_vote_account( f, validator, self.total_active_stake, self.use_lamports_unit, - true, + highest_last_vote, + highest_root, )?; } + // The actual header has long scrolled away. Print the header once more as a footer + if self.validators.len() > 100 { + writeln!(f, "{}", header)?; + } + writeln!(f)?; writeln_name_value( f, @@ -461,7 +543,7 @@ impl fmt::Display for CliValidators { } } -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct CliValidator { pub identity_pubkey: String, @@ -469,9 +551,12 @@ pub struct CliValidator { pub commission: u8, pub last_vote: u64, pub root_slot: u64, - pub credits: u64, + pub credits: u64, // lifetime credits + pub epoch_credits: u64, // credits earned in the current epoch pub activated_stake: u64, pub version: String, + pub delinquent: bool, + pub skip_rate: Option, } impl CliValidator { @@ -479,27 +564,67 @@ impl CliValidator { vote_account: &RpcVoteAccountInfo, current_epoch: Epoch, version: String, + skip_rate: Option, address_labels: &HashMap, ) -> Self { + Self::_new( + vote_account, + current_epoch, + version, + skip_rate, + address_labels, + false, + ) + } + + pub fn new_delinquent( + vote_account: &RpcVoteAccountInfo, + current_epoch: Epoch, + version: String, + skip_rate: Option, + address_labels: &HashMap, + ) -> Self { + Self::_new( + vote_account, + current_epoch, + version, + skip_rate, + address_labels, + true, + ) + } + + fn _new( + vote_account: &RpcVoteAccountInfo, + current_epoch: Epoch, + version: String, + skip_rate: Option, + address_labels: &HashMap, + delinquent: bool, + ) -> Self { + let (credits, epoch_credits) = vote_account + .epoch_credits + .iter() + .find_map(|(epoch, credits, pre_credits)| { + if *epoch == current_epoch { + Some((*credits, credits.saturating_sub(*pre_credits))) + } else { + None + } + }) + .unwrap_or((0, 0)); Self { identity_pubkey: format_labeled_address(&vote_account.node_pubkey, address_labels), vote_account_pubkey: format_labeled_address(&vote_account.vote_pubkey, address_labels), commission: vote_account.commission, last_vote: vote_account.last_vote, root_slot: vote_account.root_slot, - credits: vote_account - .epoch_credits - .iter() - .find_map(|(epoch, credits, _)| { - if *epoch == current_epoch { - Some(*credits) - } else { - None - } - }) - .unwrap_or(0), + credits, + epoch_credits, activated_stake: vote_account.activated_stake, version, + delinquent, + skip_rate, } } } @@ -690,17 +815,55 @@ fn show_votes_and_credits( return Ok(()); } - writeln!(f, "Recent Votes:")?; - for vote in votes { - writeln!(f, "- slot: {}", vote.slot)?; - writeln!(f, " confirmation count: {}", vote.confirmation_count)?; - } - writeln!(f, "Epoch Voting History:")?; + // Existence of this should guarantee the occurrence of vote truncation + let newest_history_entry = epoch_voting_history.iter().rev().next(); + writeln!( f, - "* missed credits include slots unavailable to vote on due to delinquent leaders", + "{} Votes (using {}/{} entries):", + (if newest_history_entry.is_none() { + "All" + } else { + "Recent" + }), + votes.len(), + MAX_LOCKOUT_HISTORY )?; - for entry in epoch_voting_history { + + for vote in votes.iter().rev() { + writeln!( + f, + "- slot: {} (confirmation count: {})", + vote.slot, vote.confirmation_count + )?; + } + if let Some(newest) = newest_history_entry { + writeln!( + f, + "- ... (truncated {} rooted votes, which have been credited)", + newest.credits + )?; + } + + if !epoch_voting_history.is_empty() { + writeln!( + f, + "{} Epoch Voting History (using {}/{} entries):", + (if epoch_voting_history.len() < MAX_EPOCH_CREDITS_HISTORY { + "All" + } else { + "Recent" + }), + epoch_voting_history.len(), + MAX_EPOCH_CREDITS_HISTORY + )?; + writeln!( + f, + "* missed credits include slots unavailable to vote on due to delinquent leaders", + )?; + } + + for entry in epoch_voting_history.iter().rev() { writeln!( f, // tame fmt so that this will be folded like following "- epoch: {}", @@ -708,7 +871,7 @@ fn show_votes_and_credits( )?; writeln!( f, - " credits range: [{}..{})", + " credits range: ({}..{}]", entry.prev_credits, entry.credits )?; writeln!( @@ -717,6 +880,22 @@ fn show_votes_and_credits( entry.credits_earned, entry.slots_in_epoch )?; } + if let Some(oldest) = epoch_voting_history.iter().next() { + if oldest.prev_credits > 0 { + // Oldest entry doesn't start with 0. so history must be truncated... + + // count of this combined pseudo credits range: (0..=oldest.prev_credits] like the above + // (or this is just [1..=oldest.prev_credits] for human's simpler minds) + let count = oldest.prev_credits; + + writeln!( + f, + "- ... (omitting {} past rooted votes, which have already been credited)", + count + )?; + } + } + Ok(()) } @@ -1283,18 +1462,18 @@ impl fmt::Display for CliInflation { if (self.governor.initial - self.governor.terminal).abs() < f64::EPSILON { writeln!( f, - "Fixed APR: {:>5.2}%", + "Fixed rate: {:>5.2}%", self.governor.terminal * 100. )?; } else { writeln!( f, - "Initial APR: {:>5.2}%", + "Initial rate: {:>5.2}%", self.governor.initial * 100. )?; writeln!( f, - "Terminal APR: {:>5.2}%", + "Terminal rate: {:>5.2}%", self.governor.terminal * 100. )?; writeln!( @@ -1302,6 +1481,10 @@ impl fmt::Display for CliInflation { "Rate reduction per year: {:>5.2}%", self.governor.taper * 100. )?; + writeln!( + f, + "* Rate reduction is derived using the target slot time in genesis config" + )?; } if self.governor.foundation_term > 0. { writeln!( @@ -1323,17 +1506,17 @@ impl fmt::Display for CliInflation { )?; writeln!( f, - "Total APR: {:>5.2}%", + "Total rate: {:>5.2}%", self.current_rate.total * 100. )?; writeln!( f, - "Staking APR: {:>5.2}%", + "Staking rate: {:>5.2}%", self.current_rate.validator * 100. )?; writeln!( f, - "Foundation APR: {:>5.2}%", + "Foundation rate: {:>5.2}%", self.current_rate.foundation * 100. ) } @@ -1944,6 +2127,9 @@ impl fmt::Display for CliBlock { if let Some(block_time) = self.encoded_confirmed_block.block_time { writeln!(f, "Block Time: {:?}", Local.timestamp(block_time, 0))?; } + if let Some(block_height) = self.encoded_confirmed_block.block_height { + writeln!(f, "Block Height: {:?}", block_height)?; + } if !self.encoded_confirmed_block.rewards.is_empty() { let mut rewards = self.encoded_confirmed_block.rewards.clone(); rewards.sort_by(|a, b| a.pubkey.cmp(&b.pubkey)); @@ -2101,6 +2287,97 @@ impl fmt::Display for CliTransactionConfirmation { } } +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliGossipNode { + #[serde(skip_serializing_if = "Option::is_none")] + pub ip_address: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub identity_label: Option, + pub identity_pubkey: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub gossip_port: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tpu_port: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub rpc_host: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, +} + +impl CliGossipNode { + pub fn new(info: RpcContactInfo, labels: &HashMap) -> Self { + Self { + ip_address: info.gossip.map(|addr| addr.ip().to_string()), + identity_label: labels.get(&info.pubkey).cloned(), + identity_pubkey: info.pubkey, + gossip_port: info.gossip.map(|addr| addr.port()), + tpu_port: info.tpu.map(|addr| addr.port()), + rpc_host: info.rpc.map(|addr| addr.to_string()), + version: info.version, + } + } +} + +fn unwrap_to_string_or_none(option: Option) -> String +where + T: std::string::ToString, +{ + unwrap_to_string_or_default(option, "none") +} + +fn unwrap_to_string_or_default(option: Option, default: &str) -> String +where + T: std::string::ToString, +{ + option + .as_ref() + .map(|v| v.to_string()) + .unwrap_or_else(|| default.to_string()) +} + +impl fmt::Display for CliGossipNode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "{:15} | {:44} | {:6} | {:5} | {:21} | {}", + unwrap_to_string_or_none(self.ip_address.as_ref()), + self.identity_label + .as_ref() + .unwrap_or(&self.identity_pubkey), + unwrap_to_string_or_none(self.gossip_port.as_ref()), + unwrap_to_string_or_none(self.tpu_port.as_ref()), + unwrap_to_string_or_none(self.rpc_host.as_ref()), + unwrap_to_string_or_default(self.version.as_ref(), "unknown"), + ) + } +} + +impl QuietDisplay for CliGossipNode {} +impl VerboseDisplay for CliGossipNode {} + +#[derive(Serialize, Deserialize)] +pub struct CliGossipNodes(pub Vec); + +impl fmt::Display for CliGossipNodes { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!( + f, + "IP Address | Node identifier \ + | Gossip | TPU | RPC Address | Version\n\ + ----------------+----------------------------------------------+\ + --------+-------+-----------------------+----------------", + )?; + for node in self.0.iter() { + writeln!(f, "{}", node)?; + } + writeln!(f, "Nodes: {}", self.0.len()) + } +} + +impl QuietDisplay for CliGossipNodes {} +impl VerboseDisplay for CliGossipNodes {} + #[cfg(test)] mod tests { use super::*; diff --git a/cli-output/src/display.rs b/cli-output/src/display.rs index 78c87b446c..c1c1d09e72 100644 --- a/cli-output/src/display.rs +++ b/cli-output/src/display.rs @@ -4,10 +4,12 @@ use { console::style, indicatif::{ProgressBar, ProgressStyle}, solana_sdk::{ - clock::UnixTimestamp, hash::Hash, native_token::lamports_to_sol, - program_utils::limited_deserialize, transaction::Transaction, + clock::UnixTimestamp, hash::Hash, message::Message, native_token::lamports_to_sol, + program_utils::limited_deserialize, pubkey::Pubkey, transaction::Transaction, }, solana_transaction_status::UiTransactionStatusMeta, + spl_memo::id as spl_memo_id, + spl_memo::v1::id as spl_memo_v1_id, std::{collections::HashMap, fmt, io}, }; @@ -28,6 +30,11 @@ impl Default for BuildBalanceMessageConfig { } } +fn is_memo_program(k: &Pubkey) -> bool { + let k_str = k.to_string(); + (k_str == spl_memo_v1_id().to_string()) || (k_str == spl_memo_id().to_string()) +} + pub fn build_balance_message_with_config( lamports: u64, config: &BuildBalanceMessageConfig, @@ -125,6 +132,31 @@ pub fn println_signers( println!(); } +fn format_account_mode(message: &Message, index: usize) -> String { + format!( + "{}r{}{}", // accounts are always readable... + if message.is_signer(index) { + "s" // stands for signer + } else { + "-" + }, + if message.is_writable(index, /*demote_sysvar_write_locks=*/ true) { + "w" // comment for consistent rust fmt (no joking; lol) + } else { + "-" + }, + // account may be executable on-chain while not being + // designated as a program-id in the message + if message.maybe_executable(index) { + "x" + } else { + // programs to be executed via CPI cannot be identified as + // executable from the message + "-" + }, + ) +} + pub fn write_transaction( w: &mut W, transaction: &Transaction, @@ -167,16 +199,31 @@ pub fn write_transaction( prefix, signature_index, signature, sigverify_status, )?; } - writeln!(w, "{}{:?}", prefix, message.header)?; + let mut fee_payer_index = None; for (account_index, account) in message.account_keys.iter().enumerate() { - writeln!(w, "{}Account {}: {:?}", prefix, account_index, account)?; + if fee_payer_index.is_none() && message.is_non_loader_key(account, account_index) { + fee_payer_index = Some(account_index) + } + writeln!( + w, + "{}Account {}: {} {}{}", + prefix, + account_index, + format_account_mode(message, account_index), + account, + if Some(account_index) == fee_payer_index { + " (fee payer)" + } else { + "" + }, + )?; } for (instruction_index, instruction) in message.instructions.iter().enumerate() { let program_pubkey = message.account_keys[instruction.program_id_index as usize]; writeln!(w, "{}Instruction {}", prefix, instruction_index)?; writeln!( w, - "{} Program: {} ({})", + "{} Program: {} ({})", prefix, program_pubkey, instruction.program_id_index )?; for (account_index, account) in instruction.accounts.iter().enumerate() { @@ -213,6 +260,11 @@ pub fn write_transaction( writeln!(w, "{} {:?}", prefix, system_instruction)?; raw = false; } + } else if is_memo_program(&program_pubkey) { + if let Ok(s) = std::str::from_utf8(&instruction.data) { + writeln!(w, "{} Data: \"{}\"", prefix, s)?; + raw = false; + } } if raw { @@ -270,7 +322,38 @@ pub fn write_transaction( if !log_messages.is_empty() { writeln!(w, "{}Log Messages:", prefix,)?; for log_message in log_messages { - writeln!(w, "{} {}", prefix, log_message,)?; + writeln!(w, "{} {}", prefix, log_message)?; + } + } + } + + if let Some(rewards) = &transaction_status.rewards { + if !rewards.is_empty() { + writeln!(w, "{}Rewards:", prefix,)?; + writeln!( + w, + "{} {:<44} {:^15} {:<15} {:<20}", + prefix, "Address", "Type", "Amount", "New Balance" + )?; + for reward in rewards { + let sign = if reward.lamports < 0 { "-" } else { "" }; + writeln!( + w, + "{} {:<44} {:^15} {:<15} {}", + prefix, + reward.pubkey, + if let Some(reward_type) = reward.reward_type { + format!("{}", reward_type) + } else { + "-".to_string() + }, + format!( + "{}◎{:<14.9}", + sign, + lamports_to_sol(reward.lamports.abs() as u64) + ), + format!("◎{:<18.9}", lamports_to_sol(reward.post_balance),) + )?; } } } diff --git a/cli/Cargo.toml b/cli/Cargo.toml index e190c5024c..2f742d7895 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-cli" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -24,33 +24,34 @@ indicatif = "0.15.0" humantime = "2.0.1" num-traits = "0.2" pretty-hex = "0.2.1" -reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] } -serde = "1.0.118" +reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] } +serde = "1.0.122" serde_derive = "1.0.103" serde_json = "1.0.56" -solana-account-decoder = { path = "../account-decoder", version = "=1.5.19" } -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.5.19" } -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-cli-config = { path = "../cli-config", version = "=1.5.19" } -solana-cli-output = { path = "../cli-output", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-config-program = { path = "../programs/config", version = "=1.5.19" } -solana-faucet = { path = "../faucet", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-net-utils = { path = "../net-utils", version = "=1.5.19" } -solana_rbpf = "=0.2.7" -solana-remote-wallet = { path = "../remote-wallet", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } -solana-transaction-status = { path = "../transaction-status", version = "=1.5.19" } -solana-version = { path = "../version" } -solana-vote-program = { path = "../programs/vote", version = "=1.5.19" } +solana-account-decoder = { path = "../account-decoder", version = "=1.6.14" } +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.6.14" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-cli-config = { path = "../cli-config", version = "=1.6.14" } +solana-cli-output = { path = "../cli-output", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-config-program = { path = "../programs/config", version = "=1.6.14" } +solana-faucet = { path = "../faucet", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-net-utils = { path = "../net-utils", version = "=1.6.14" } +solana_rbpf = "=0.2.9" +solana-remote-wallet = { path = "../remote-wallet", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } +solana-transaction-status = { path = "../transaction-status", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } +solana-vote-program = { path = "../programs/vote", version = "=1.6.14" } +spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } thiserror = "1.0.21" tiny-bip39 = "0.7.0" url = "2.1.1" [dev-dependencies] -solana-core = { path = "../core", version = "=1.5.19" } +solana-core = { path = "../core", version = "=1.6.14" } tempfile = "3.1.0" [[bin]] diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 26a51dc286..4c929f6d64 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1,6 +1,6 @@ use crate::{ - cluster_query::*, feature::*, inflation::*, nonce::*, program::*, spend_utils::*, stake::*, - validator_info::*, vote::*, + cluster_query::*, feature::*, inflation::*, memo::*, nonce::*, program::*, spend_utils::*, + stake::*, validator_info::*, vote::*, }; use clap::{value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand}; use log::*; @@ -13,13 +13,15 @@ use solana_clap_utils::{ input_parsers::*, input_validators::*, keypair::*, + memo::{memo_arg, MEMO_ARG}, nonce::*, offline::*, }; use solana_cli_output::{ display::{build_balance_message, println_name_value}, return_signers_with_config, CliAccount, CliSignature, CliSignatureVerificationStatus, - CliTransaction, CliTransactionConfirmation, OutputFormat, ReturnSignersConfig, + CliTransaction, CliTransactionConfirmation, CliValidatorsSortOrder, OutputFormat, + ReturnSignersConfig, }; use solana_client::{ blockhash_query::BlockhashQuery, @@ -128,6 +130,9 @@ pub enum CliCommand { }, ShowValidators { use_lamports_unit: bool, + sort_order: CliValidatorsSortOrder, + reverse_sort: bool, + number_validators: bool, }, Supply { print_accounts: bool, @@ -147,18 +152,21 @@ pub enum CliCommand { AuthorizeNonceAccount { nonce_account: Pubkey, nonce_authority: SignerIndex, + memo: Option, new_authority: Pubkey, }, CreateNonceAccount { nonce_account: SignerIndex, seed: Option, nonce_authority: Option, + memo: Option, amount: SpendAmount, }, GetNonce(Pubkey), NewNonce { nonce_account: Pubkey, nonce_authority: SignerIndex, + memo: Option, }, ShowNonceAccount { nonce_account_pubkey: Pubkey, @@ -167,6 +175,7 @@ pub enum CliCommand { WithdrawFromNonceAccount { nonce_account: Pubkey, nonce_authority: SignerIndex, + memo: Option, destination_account_pubkey: Pubkey, lamports: u64, }, @@ -191,6 +200,7 @@ pub enum CliCommand { blockhash_query: BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option, fee_payer: SignerIndex, from: SignerIndex, }, @@ -202,6 +212,8 @@ pub enum CliCommand { blockhash_query: BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option, + seed: Option, fee_payer: SignerIndex, }, DelegateStake { @@ -214,6 +226,7 @@ pub enum CliCommand { blockhash_query: BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option, fee_payer: SignerIndex, }, SplitStake { @@ -224,6 +237,7 @@ pub enum CliCommand { blockhash_query: BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option, split_stake_account: SignerIndex, seed: Option, lamports: u64, @@ -238,6 +252,7 @@ pub enum CliCommand { blockhash_query: BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option, fee_payer: SignerIndex, }, ShowStakeHistory { @@ -257,6 +272,7 @@ pub enum CliCommand { blockhash_query: BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option, fee_payer: SignerIndex, custodian: Option, }, @@ -269,12 +285,13 @@ pub enum CliCommand { blockhash_query: BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option, fee_payer: SignerIndex, }, WithdrawStake { stake_account_pubkey: Pubkey, destination_account_pubkey: Pubkey, - lamports: u64, + amount: SpendAmount, withdraw_authority: SignerIndex, custodian: Option, sign_only: bool, @@ -282,6 +299,8 @@ pub enum CliCommand { blockhash_query: BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option, + seed: Option, fee_payer: SignerIndex, }, // Validator Info Commands @@ -299,6 +318,7 @@ pub enum CliCommand { authorized_voter: Option, authorized_withdrawer: Option, commission: u8, + memo: Option, }, ShowVoteAccount { pubkey: Pubkey, @@ -310,21 +330,25 @@ pub enum CliCommand { destination_account_pubkey: Pubkey, withdraw_authority: SignerIndex, withdraw_amount: SpendAmount, + memo: Option, }, VoteAuthorize { vote_account_pubkey: Pubkey, new_authorized_pubkey: Pubkey, vote_authorize: VoteAuthorize, + memo: Option, }, VoteUpdateValidator { vote_account_pubkey: Pubkey, new_identity_account: SignerIndex, withdraw_authority: SignerIndex, + memo: Option, }, VoteUpdateCommission { vote_account_pubkey: Pubkey, commission: u8, withdraw_authority: SignerIndex, + memo: Option, }, // Wallet Commands Address, @@ -350,10 +374,12 @@ pub enum CliCommand { from: SignerIndex, sign_only: bool, dump_transaction_message: bool, + allow_unfunded_recipient: bool, no_wait: bool, blockhash_query: BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option, fee_payer: SignerIndex, derived_address_seed: Option, derived_address_program_id: Option, @@ -419,7 +445,7 @@ pub struct CliConfig<'a> { pub websocket_url: String, pub signers: Vec<&'a dyn Signer>, pub keypair_path: String, - pub rpc_client: Option, + pub rpc_client: Option>, pub rpc_timeout: Duration, pub verbose: bool, pub output_format: OutputFormat, @@ -685,7 +711,7 @@ pub fn parse_command( } // Stake Commands ("create-stake-account", Some(matches)) => { - parse_stake_create_account(matches, default_signer, wallet_manager) + parse_create_stake_account(matches, default_signer, wallet_manager) } ("delegate-stake", Some(matches)) => { parse_stake_delegate_stake(matches, default_signer, wallet_manager) @@ -825,7 +851,7 @@ pub fn parse_command( signers: vec![], }) } - ("pay", Some(matches)) | ("transfer", Some(matches)) => { + ("transfer", Some(matches)) => { let amount = SpendAmount::new_from_matches(matches, "amount"); let to = pubkey_of_signer(matches, "to", wallet_manager)?.unwrap(); let sign_only = matches.is_present(SIGN_ONLY_ARG.name); @@ -835,9 +861,11 @@ pub fn parse_command( let nonce_account = pubkey_of_signer(matches, NONCE_ARG.name, wallet_manager)?; let (nonce_authority, nonce_authority_pubkey) = signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; + let memo = matches.value_of(MEMO_ARG.name).map(String::from); let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; let (from, from_pubkey) = signer_of(matches, "from", wallet_manager)?; + let allow_unfunded_recipient = matches.is_present("allow_unfunded_recipient"); let mut bulk_signers = vec![fee_payer, from]; if nonce_account.is_some() { @@ -859,10 +887,12 @@ pub fn parse_command( to, sign_only, dump_transaction_message, + allow_unfunded_recipient, no_wait, blockhash_query, nonce_account, nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), + memo, fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), from: signer_info.index_of(from_pubkey).unwrap(), derived_address_seed, @@ -1093,7 +1123,7 @@ fn process_show_account( pubkey: account_pubkey.to_string(), account: UiAccount::encode( account_pubkey, - account, + &account, UiAccountEncoding::Base64, None, None, @@ -1130,10 +1160,12 @@ fn process_transfer( from: SignerIndex, sign_only: bool, dump_transaction_message: bool, + allow_unfunded_recipient: bool, no_wait: bool, blockhash_query: &BlockhashQuery, nonce_account: Option<&Pubkey>, nonce_authority: SignerIndex, + memo: Option<&String>, fee_payer: SignerIndex, derived_address_seed: Option, derived_address_program_id: Option<&Pubkey>, @@ -1144,6 +1176,21 @@ fn process_transfer( let (recent_blockhash, fee_calculator) = blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?; + if !sign_only && !allow_unfunded_recipient { + let recipient_balance = rpc_client + .get_balance_with_commitment(to, config.commitment)? + .value; + if recipient_balance == 0 { + return Err(format!( + "The recipient address ({}) is not funded. \ + Add `--allow-unfunded-recipient` to complete the transfer \ + ", + to + ) + .into()); + } + } + let nonce_authority = config.signers[nonce_authority]; let fee_payer = config.signers[fee_payer]; @@ -1166,8 +1213,9 @@ fn process_transfer( to, lamports, )] + .with_memo(memo) } else { - vec![system_instruction::transfer(&from_pubkey, to, lamports)] + vec![system_instruction::transfer(&from_pubkey, to, lamports)].with_memo(memo) }; if let Some(nonce_account) = &nonce_account { @@ -1237,17 +1285,15 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { println_name_value("Commitment:", &config.commitment.commitment.to_string()); } - let mut _rpc_client; let rpc_client = if config.rpc_client.is_none() { - _rpc_client = RpcClient::new_with_timeout_and_commitment( + Arc::new(RpcClient::new_with_timeout_and_commitment( config.json_rpc_url.to_string(), config.rpc_timeout, config.commitment, - ); - &_rpc_client + )) } else { // Primarily for testing - config.rpc_client.as_ref().unwrap() + config.rpc_client.as_ref().unwrap().clone() }; match &config.command { @@ -1338,9 +1384,19 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { CliCommand::WaitForMaxStake { max_stake_percent } => { process_wait_for_max_stake(&rpc_client, config, *max_stake_percent) } - CliCommand::ShowValidators { use_lamports_unit } => { - process_show_validators(&rpc_client, config, *use_lamports_unit) - } + CliCommand::ShowValidators { + use_lamports_unit, + sort_order, + reverse_sort, + number_validators, + } => process_show_validators( + &rpc_client, + config, + *use_lamports_unit, + *sort_order, + *reverse_sort, + *number_validators, + ), CliCommand::Supply { print_accounts } => { process_supply(&rpc_client, config, *print_accounts) } @@ -1367,12 +1423,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { CliCommand::AuthorizeNonceAccount { nonce_account, nonce_authority, + memo, new_authority, } => process_authorize_nonce_account( &rpc_client, config, nonce_account, *nonce_authority, + memo.as_ref(), new_authority, ), // Create nonce account @@ -1380,6 +1438,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { nonce_account, seed, nonce_authority, + memo, amount, } => process_create_nonce_account( &rpc_client, @@ -1387,6 +1446,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *nonce_account, seed.clone(), *nonce_authority, + memo.as_ref(), *amount, ), // Get the current nonce @@ -1397,7 +1457,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { CliCommand::NewNonce { nonce_account, nonce_authority, - } => process_new_nonce(&rpc_client, config, nonce_account, *nonce_authority), + memo, + } => process_new_nonce( + &rpc_client, + config, + nonce_account, + *nonce_authority, + memo.as_ref(), + ), // Show the contents of a nonce account CliCommand::ShowNonceAccount { nonce_account_pubkey, @@ -1412,6 +1479,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { CliCommand::WithdrawFromNonceAccount { nonce_account, nonce_authority, + memo, destination_account_pubkey, lamports, } => process_withdraw_from_nonce_account( @@ -1419,6 +1487,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { config, &nonce_account, *nonce_authority, + memo.as_ref(), &destination_account_pubkey, *lamports, ), @@ -1432,7 +1501,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { use_deprecated_loader, allow_excessive_balance, } => process_deploy( - &rpc_client, + rpc_client, config, program_location, *address, @@ -1440,7 +1509,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *allow_excessive_balance, ), CliCommand::Program(program_subcommand) => { - process_program_subcommand(&rpc_client, config, program_subcommand) + process_program_subcommand(rpc_client, config, program_subcommand) } // Stake Commands @@ -1458,6 +1527,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, ref nonce_account, nonce_authority, + memo, fee_payer, from, } => process_create_stake_account( @@ -1474,6 +1544,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, nonce_account.as_ref(), *nonce_authority, + memo.as_ref(), *fee_payer, *from, ), @@ -1485,6 +1556,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, nonce_account, nonce_authority, + memo, + seed, fee_payer, } => process_deactivate_stake_account( &rpc_client, @@ -1496,6 +1569,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, *nonce_account, *nonce_authority, + memo.as_ref(), + seed.as_ref(), *fee_payer, ), CliCommand::DelegateStake { @@ -1508,6 +1583,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, nonce_account, nonce_authority, + memo, fee_payer, } => process_delegate_stake( &rpc_client, @@ -1521,6 +1597,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, *nonce_account, *nonce_authority, + memo.as_ref(), *fee_payer, ), CliCommand::SplitStake { @@ -1531,6 +1608,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, nonce_account, nonce_authority, + memo, split_stake_account, seed, lamports, @@ -1545,6 +1623,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, *nonce_account, *nonce_authority, + memo.as_ref(), *split_stake_account, seed, *lamports, @@ -1559,6 +1638,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, nonce_account, nonce_authority, + memo, fee_payer, } => process_merge_stake( &rpc_client, @@ -1571,6 +1651,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, *nonce_account, *nonce_authority, + memo.as_ref(), *fee_payer, ), CliCommand::ShowStakeAccount { @@ -1596,6 +1677,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, nonce_account, nonce_authority, + memo, fee_payer, custodian, } => process_stake_authorize( @@ -1609,6 +1691,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, *nonce_account, *nonce_authority, + memo.as_ref(), *fee_payer, ), CliCommand::StakeSetLockup { @@ -1620,6 +1703,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, nonce_account, nonce_authority, + memo, fee_payer, } => process_stake_set_lockup( &rpc_client, @@ -1632,12 +1716,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, *nonce_account, *nonce_authority, + memo.as_ref(), *fee_payer, ), CliCommand::WithdrawStake { stake_account_pubkey, destination_account_pubkey, - lamports, + amount, withdraw_authority, custodian, sign_only, @@ -1645,13 +1730,15 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, ref nonce_account, nonce_authority, + memo, + seed, fee_payer, } => process_withdraw_stake( &rpc_client, config, &stake_account_pubkey, &destination_account_pubkey, - *lamports, + *amount, *withdraw_authority, *custodian, *sign_only, @@ -1659,6 +1746,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { blockhash_query, nonce_account.as_ref(), *nonce_authority, + memo.as_ref(), + seed.as_ref(), *fee_payer, ), @@ -1691,6 +1780,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { authorized_voter, authorized_withdrawer, commission, + memo, } => process_create_vote_account( &rpc_client, config, @@ -1700,6 +1790,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { authorized_voter, authorized_withdrawer, *commission, + memo.as_ref(), ), CliCommand::ShowVoteAccount { pubkey: vote_account_pubkey, @@ -1717,6 +1808,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { withdraw_authority, withdraw_amount, destination_account_pubkey, + memo, } => process_withdraw_from_vote_account( &rpc_client, config, @@ -1724,39 +1816,46 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *withdraw_authority, *withdraw_amount, destination_account_pubkey, + memo.as_ref(), ), CliCommand::VoteAuthorize { vote_account_pubkey, new_authorized_pubkey, vote_authorize, + memo, } => process_vote_authorize( &rpc_client, config, &vote_account_pubkey, &new_authorized_pubkey, *vote_authorize, + memo.as_ref(), ), CliCommand::VoteUpdateValidator { vote_account_pubkey, new_identity_account, withdraw_authority, + memo, } => process_vote_update_validator( &rpc_client, config, &vote_account_pubkey, *new_identity_account, *withdraw_authority, + memo.as_ref(), ), CliCommand::VoteUpdateCommission { vote_account_pubkey, commission, withdraw_authority, + memo, } => process_vote_update_commission( &rpc_client, config, &vote_account_pubkey, *commission, *withdraw_authority, + memo.as_ref(), ), // Wallet Commands @@ -1799,10 +1898,12 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { from, sign_only, dump_transaction_message, + allow_unfunded_recipient, no_wait, ref blockhash_query, ref nonce_account, nonce_authority, + memo, fee_payer, derived_address_seed, ref derived_address_program_id, @@ -1814,10 +1915,12 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *from, *sign_only, *dump_transaction_message, + *allow_unfunded_recipient, *no_wait, blockhash_query, nonce_account.as_ref(), *nonce_authority, + memo.as_ref(), *fee_payer, derived_address_seed.clone(), derived_address_program_id.as_ref(), @@ -1952,6 +2055,17 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, ' .takes_value(true) .required(true) .help("The transaction signature to confirm"), + ) + .after_help(// Formatted specifically for the manually-indented heredoc string + "Note: This will show more detailed information for finalized transactions with verbose mode (-v/--verbose).\ + \n\ + \nAccount modes:\ + \n |srwx|\ + \n s: signed\ + \n r: readable (always true)\ + \n w: writable\ + \n x: program account (inner instructions excluded)\ + " ), ) .subcommand( @@ -2040,28 +2154,6 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, ' .help("Use the designated program id, even if the account already holds a large balance of VLX") ), ) - .subcommand( - SubCommand::with_name("pay") - .about("Deprecated alias for the transfer command") - .arg( - pubkey!(Arg::with_name("to") - .index(1) - .value_name("RECIPIENT_ADDRESS") - .required(true), - "The account address of recipient. "), - ) - .arg( - Arg::with_name("amount") - .index(2) - .value_name("AMOUNT") - .takes_value(true) - .validator(is_amount_or_all) - .required(true) - .help("The amount to send, in VLX; accepts keyword ALL"), - ) - .offline_args() - .nonce_args(false) - ) .subcommand( SubCommand::with_name("resolve-signer") .about("Checks that a signer is valid, and returns its specific path; useful for signers that may be specified generally, eg. usb://ledger") @@ -2078,6 +2170,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, ' .subcommand( SubCommand::with_name("transfer") .about("Transfer funds between system accounts") + .alias("pay") .arg( pubkey!(Arg::with_name("to") .index(1) @@ -2127,10 +2220,11 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, ' Arg::with_name("allow_unfunded_recipient") .long("allow-unfunded-recipient") .takes_value(false) - .hidden(true) // Forward compatibility with v1.6 + .help("Complete the transfer even if the recipient address is not funded") ) .offline_args() .nonce_args(false) + .arg(memo_arg()) .arg(fee_payer_arg()), ) .subcommand( @@ -2178,7 +2272,6 @@ mod tests { signature::{keypair_from_seed, read_keypair_file, write_keypair_file, Keypair, Presigner}, transaction::TransactionError, }; - use solana_stake_program::stake_state::MIN_DELEGATE_STAKE_AMOUNT; use solana_transaction_status::TransactionConfirmationStatus; use std::path::PathBuf; @@ -2204,10 +2297,7 @@ mod tests { let default_keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &default_keypair_file).unwrap(); - let default_signer = DefaultSigner { - arg_name: "keypair".to_string(), - path: default_keypair_file, - }; + let default_signer = DefaultSigner::new("keypair", &default_keypair_file); let signer_info = default_signer .generate_unique_signers(vec![], &matches, &mut None) @@ -2285,10 +2375,7 @@ mod tests { let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); let keypair = read_keypair_file(&keypair_file).unwrap(); - let default_signer = DefaultSigner { - path: keypair_file.clone(), - arg_name: "".to_string(), - }; + let default_signer = DefaultSigner::new("", &keypair_file); // Test Airdrop Subcommand let test_airdrop = test_commands @@ -2418,12 +2505,12 @@ mod tests { ); // Test Deploy Subcommand - let test_deploy = + let test_command = test_commands .clone() .get_matches_from(vec!["test", "deploy", "/Users/test/program.o"]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Deploy { program_location: "/Users/test/program.o".to_string(), @@ -2438,14 +2525,14 @@ mod tests { let custom_address = Keypair::new(); let custom_address_file = make_tmp_path("custom_address_file"); write_keypair_file(&custom_address, &custom_address_file).unwrap(); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "deploy", "/Users/test/program.o", &custom_address_file, ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Deploy { program_location: "/Users/test/program.o".to_string(), @@ -2491,7 +2578,7 @@ mod tests { fn test_cli_process_command() { // Success cases let mut config = CliConfig { - rpc_client: Some(RpcClient::new_mock("succeeds".to_string())), + rpc_client: Some(Arc::new(RpcClient::new_mock("succeeds".to_string()))), json_rpc_url: "http://127.0.0.1:8899".to_string(), ..CliConfig::default() }; @@ -2531,6 +2618,7 @@ mod tests { authorized_voter: Some(bob_pubkey), authorized_withdrawer: Some(bob_pubkey), commission: 0, + memo: None, }; config.signers = vec![&keypair, &bob_keypair, &identity_keypair]; let result = process_command(&config); @@ -2542,6 +2630,7 @@ mod tests { vote_account_pubkey: bob_pubkey, new_authorized_pubkey, vote_authorize: VoteAuthorize::Voter, + memo: None, }; let result = process_command(&config); assert!(result.is_ok()); @@ -2552,6 +2641,7 @@ mod tests { vote_account_pubkey: bob_pubkey, new_identity_account: 2, withdraw_authority: 1, + memo: None, }; let result = process_command(&config); assert!(result.is_ok()); @@ -2575,6 +2665,7 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; @@ -2586,7 +2677,7 @@ mod tests { config.command = CliCommand::WithdrawStake { stake_account_pubkey, destination_account_pubkey: to_pubkey, - lamports: 100, + amount: SpendAmount::All, withdraw_authority: 0, custodian: None, sign_only: false, @@ -2594,6 +2685,8 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }; config.signers = vec![&keypair]; @@ -2608,6 +2701,8 @@ mod tests { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }; process_command(&config).unwrap(); @@ -2622,6 +2717,7 @@ mod tests { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, split_stake_account: 1, seed: None, lamports: MIN_DELEGATE_STAKE_AMOUNT + 400, @@ -2642,10 +2738,12 @@ mod tests { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }; config.signers = vec![&keypair, &merge_stake_account]; - process_command(&config).unwrap(); + let result = process_command(&config); + assert!(result.is_ok()); config.command = CliCommand::GetSlot; assert_eq!(process_command(&config).unwrap(), "0"); @@ -2676,13 +2774,13 @@ mod tests { assert!(process_command(&config).is_ok()); // sig_not_found case - config.rpc_client = Some(RpcClient::new_mock("sig_not_found".to_string())); + config.rpc_client = Some(Arc::new(RpcClient::new_mock("sig_not_found".to_string()))); let missing_signature = Signature::new(&bs58::decode("5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW").into_vec().unwrap()); config.command = CliCommand::Confirm(missing_signature); assert_eq!(process_command(&config).unwrap(), "Not found"); // Tx error case - config.rpc_client = Some(RpcClient::new_mock("account_in_use".to_string())); + config.rpc_client = Some(Arc::new(RpcClient::new_mock("account_in_use".to_string()))); let any_signature = Signature::new(&bs58::decode(SIGNATURE).into_vec().unwrap()); config.command = CliCommand::Confirm(any_signature); assert_eq!( @@ -2691,7 +2789,7 @@ mod tests { ); // Failure cases - config.rpc_client = Some(RpcClient::new_mock("fails".to_string())); + config.rpc_client = Some(Arc::new(RpcClient::new_mock("fails".to_string()))); config.command = CliCommand::Airdrop { pubkey: None, @@ -2714,6 +2812,7 @@ mod tests { authorized_voter: Some(bob_pubkey), authorized_withdrawer: Some(bob_pubkey), commission: 0, + memo: None, }; config.signers = vec![&keypair, &bob_keypair, &identity_keypair]; assert!(process_command(&config).is_err()); @@ -2722,6 +2821,7 @@ mod tests { vote_account_pubkey: bob_pubkey, new_authorized_pubkey: bob_pubkey, vote_authorize: VoteAuthorize::Voter, + memo: None, }; assert!(process_command(&config).is_err()); @@ -2729,6 +2829,7 @@ mod tests { vote_account_pubkey: bob_pubkey, new_identity_account: 1, withdraw_authority: 1, + memo: None, }; assert!(process_command(&config).is_err()); @@ -2758,7 +2859,7 @@ mod tests { mocks.insert(RpcRequest::GetAccountInfo, account_info_response); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); - config.rpc_client = Some(rpc_client); + config.rpc_client = Some(Arc::new(rpc_client)); let default_keypair = Keypair::new(); config.signers = vec![&default_keypair]; @@ -2768,6 +2869,7 @@ mod tests { use_deprecated_loader: false, allow_excessive_balance: false, }; + config.output_format = OutputFormat::JsonCompact; let result = process_command(&config); let json: Value = serde_json::from_str(&result.unwrap()).unwrap(); let program_id = json @@ -2797,10 +2899,7 @@ mod tests { let default_keypair = Keypair::new(); let default_keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &default_keypair_file).unwrap(); - let default_signer = DefaultSigner { - path: default_keypair_file.clone(), - arg_name: "".to_string(), - }; + let default_signer = DefaultSigner::new("", &default_keypair_file); //Test Transfer Subcommand, SOL let from_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); @@ -2821,10 +2920,12 @@ mod tests { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: false, no_wait: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -2846,10 +2947,12 @@ mod tests { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: false, no_wait: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -2858,11 +2961,12 @@ mod tests { } ); - // Test Transfer no-wait + // Test Transfer no-wait and --allow-unfunded-recipient let test_transfer = test_commands.clone().get_matches_from(vec![ "test", "transfer", "--no-wait", + "--allow-unfunded-recipient", &to_string, "42", ]); @@ -2875,10 +2979,12 @@ mod tests { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: true, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -2908,10 +3014,12 @@ mod tests { from: 0, sign_only: true, dump_transaction_message: false, + allow_unfunded_recipient: false, no_wait: false, blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -2946,6 +3054,7 @@ mod tests { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: false, no_wait: false, blockhash_query: BlockhashQuery::FeeCalculator( blockhash_query::Source::Cluster, @@ -2953,6 +3062,7 @@ mod tests { ), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -2988,6 +3098,7 @@ mod tests { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: false, no_wait: false, blockhash_query: BlockhashQuery::FeeCalculator( blockhash_query::Source::NonceAccount(nonce_address), @@ -2995,6 +3106,7 @@ mod tests { ), nonce_account: Some(nonce_address), nonce_authority: 1, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -3028,10 +3140,12 @@ mod tests { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: false, no_wait: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: Some(derived_address_seed), derived_address_program_id: Some(solana_stake_program::id()), diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index d60160ff27..d51da94852 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -46,9 +46,10 @@ use solana_sdk::{ rent::Rent, rpc_port::DEFAULT_RPC_PORT_STR, signature::Signature, - system_instruction, system_program, + slot_history, system_instruction, system_program, sysvar::{ self, + slot_history::SlotHistory, stake_history::{self}, }, timing, @@ -60,7 +61,6 @@ use solana_vote_program::vote_state::VoteState; use std::{ collections::{BTreeMap, HashMap, VecDeque}, fmt, - net::SocketAddr, str::FromStr, sync::{ atomic::{AtomicBool, Ordering}, @@ -349,6 +349,38 @@ impl ClusterQuerySubCommands for App<'_, '_> { .long("lamports") .takes_value(false) .help("Display balance in lamports instead of VLX"), + ) + .arg( + Arg::with_name("number") + .long("number") + .short("n") + .takes_value(false) + .help("Number the validators"), + ) + .arg( + Arg::with_name("reverse") + .long("reverse") + .short("r") + .takes_value(false) + .help("Reverse order while sorting"), + ) + .arg( + Arg::with_name("sort") + .long("sort") + .takes_value(true) + .possible_values(&[ + "delinquent", + "commission", + "credits", + "identity", + "last-vote", + "root", + "skip-rate", + "stake", + "vote-account", + ]) + .default_value("stake") + .help("Sort order (does not affect JSON output)"), ), ) .subcommand( @@ -582,9 +614,29 @@ pub fn parse_show_stakes( pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result { let use_lamports_unit = matches.is_present("lamports"); + let number_validators = matches.is_present("number"); + let reverse_sort = matches.is_present("reverse"); + + let sort_order = match value_t_or_exit!(matches, "sort", String).as_str() { + "delinquent" => CliValidatorsSortOrder::Delinquent, + "commission" => CliValidatorsSortOrder::Commission, + "credits" => CliValidatorsSortOrder::EpochCredits, + "identity" => CliValidatorsSortOrder::Identity, + "last-vote" => CliValidatorsSortOrder::LastVote, + "root" => CliValidatorsSortOrder::Root, + "skip-rate" => CliValidatorsSortOrder::SkipRate, + "stake" => CliValidatorsSortOrder::Stake, + "vote-account" => CliValidatorsSortOrder::VoteAccount, + _ => unreachable!(), + }; Ok(CliCommandInfo { - command: CliCommand::ShowValidators { use_lamports_unit }, + command: CliCommand::ShowValidators { + use_lamports_unit, + sort_order, + reverse_sort, + number_validators, + }, signers: vec![], }) } @@ -1041,8 +1093,8 @@ pub fn process_get_slot(rpc_client: &RpcClient, _config: &CliConfig) -> ProcessR } pub fn process_get_block_height(rpc_client: &RpcClient, _config: &CliConfig) -> ProcessResult { - let epoch_info = rpc_client.get_epoch_info()?; - Ok(epoch_info.block_height.to_string()) + let block_height = rpc_client.get_block_height()?; + Ok(block_height.to_string()) } pub fn parse_show_block_production(matches: &ArgMatches<'_>) -> Result { @@ -1069,8 +1121,6 @@ pub fn process_show_block_production( return Err(format!("Epoch {} is in the future", epoch).into()); } - let minimum_ledger_slot = rpc_client.minimum_ledger_slot()?; - let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch); let end_slot = std::cmp::min( epoch_info.absolute_slot, @@ -1083,32 +1133,60 @@ pub fn process_show_block_production( first_slot_in_epoch }; - if minimum_ledger_slot > end_slot { - return Err(format!( - "Ledger data not available for slots {} to {} (minimum ledger slot is {})", - start_slot, end_slot, minimum_ledger_slot - ) - .into()); - } - - if minimum_ledger_slot > start_slot { - println!( - "\n{}", - style(format!( - "Note: Requested start slot was {} but minimum ledger slot is {}", - start_slot, minimum_ledger_slot - )) - .italic(), - ); - start_slot = minimum_ledger_slot; - } - let progress_bar = new_spinner_progress_bar(); progress_bar.set_message(&format!( "Fetching confirmed blocks between slots {} and {}...", start_slot, end_slot )); - let confirmed_blocks = rpc_client.get_confirmed_blocks(start_slot, Some(end_slot))?; + + let slot_history_account = rpc_client + .get_account_with_commitment(&sysvar::slot_history::id(), CommitmentConfig::finalized())? + .value + .unwrap(); + + let slot_history: SlotHistory = from_account(&slot_history_account).ok_or_else(|| { + CliError::RpcRequestError("Failed to deserialize slot history".to_string()) + })?; + + let (confirmed_blocks, start_slot) = + if start_slot >= slot_history.oldest() && end_slot <= slot_history.newest() { + // Fast, more reliable path using the SlotHistory sysvar + + let confirmed_blocks: Vec<_> = (start_slot..=end_slot) + .filter(|slot| slot_history.check(*slot) == slot_history::Check::Found) + .collect(); + (confirmed_blocks, start_slot) + } else { + // Slow, less reliable path using `getBlocks`. + // + // "less reliable" because if the RPC node has holds in its ledger then the block production data will be + // incorrect. This condition currently can't be detected over RPC + // + + let minimum_ledger_slot = rpc_client.minimum_ledger_slot()?; + if minimum_ledger_slot > end_slot { + return Err(format!( + "Ledger data not available for slots {} to {} (minimum ledger slot is {})", + start_slot, end_slot, minimum_ledger_slot + ) + .into()); + } + + if minimum_ledger_slot > start_slot { + progress_bar.println(format!( + "{}", + style(format!( + "Note: Requested start slot was {} but minimum ledger slot is {}", + start_slot, minimum_ledger_slot + )) + .italic(), + )); + start_slot = minimum_ledger_slot; + } + + let confirmed_blocks = rpc_client.get_confirmed_blocks(start_slot, Some(end_slot))?; + (confirmed_blocks, start_slot) + }; let start_slot_index = (start_slot - first_slot_in_epoch) as usize; let end_slot_index = (end_slot - first_slot_in_epoch) as usize; @@ -1371,9 +1449,7 @@ pub fn process_ping( // Sleep for half a slot if signal_receiver - .recv_timeout(Duration::from_millis( - 500 * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND, - )) + .recv_timeout(Duration::from_millis(clock::DEFAULT_MS_PER_SLOT / 2)) .is_ok() { break 'mainloop; @@ -1578,40 +1654,14 @@ pub fn process_live_slots(config: &CliConfig) -> ProcessResult { pub fn process_show_gossip(rpc_client: &RpcClient, config: &CliConfig) -> ProcessResult { let cluster_nodes = rpc_client.get_cluster_nodes()?; - fn format_port(addr: Option) -> String { - addr.map(|addr| addr.port().to_string()) - .unwrap_or_else(|| "none".to_string()) - } - - let s: Vec<_> = cluster_nodes + let nodes: Vec<_> = cluster_nodes .into_iter() - .map(|node| { - format!( - "{:15} | {:44} | {:6} | {:5} | {:21} | {}", - node.gossip - .map(|addr| addr.ip().to_string()) - .unwrap_or_else(|| "none".to_string()), - format_labeled_address(&node.pubkey, &config.address_labels), - format_port(node.gossip), - format_port(node.tpu), - node.rpc - .map(|addr| addr.to_string()) - .unwrap_or_else(|| "none".to_string()), - node.version.unwrap_or_else(|| "unknown".to_string()), - ) - }) + .map(|node| CliGossipNode::new(node, &config.address_labels)) .collect(); - Ok(format!( - "IP Address | Node identifier \ - | Gossip | TPU | RPC Address | Version\n\ - ----------------+----------------------------------------------+\ - --------+-------+-----------------------+----------------\n\ - {}\n\ - Nodes: {}", - s.join("\n"), - s.len(), - )) + Ok(config + .output_format + .formatted_string(&CliGossipNodes(nodes))) } pub fn process_show_stakes( @@ -1626,11 +1676,11 @@ pub fn process_show_stakes( progress_bar.set_message("Fetching stake accounts..."); let mut program_accounts_config = RpcProgramAccountsConfig { - filters: None, account_config: RpcAccountInfoConfig { encoding: Some(solana_account_decoder::UiAccountEncoding::Base64), ..RpcAccountInfoConfig::default() }, + ..RpcProgramAccountsConfig::default() }; if let Some(vote_account_pubkeys) = vote_account_pubkeys { @@ -1732,10 +1782,36 @@ pub fn process_show_validators( rpc_client: &RpcClient, config: &CliConfig, use_lamports_unit: bool, + validators_sort_order: CliValidatorsSortOrder, + validators_reverse_sort: bool, + number_validators: bool, ) -> ProcessResult { + let progress_bar = new_spinner_progress_bar(); + progress_bar.set_message("Fetching vote accounts..."); let epoch_info = rpc_client.get_epoch_info()?; let vote_accounts = rpc_client.get_vote_accounts()?; + progress_bar.set_message("Fetching block production..."); + let skip_rate: HashMap<_, _> = rpc_client + .get_block_production() + .ok() + .map(|result| { + result + .value + .by_identity + .into_iter() + .map(|(identity, (leader_slots, blocks_produced))| { + ( + identity, + 100. * (leader_slots.saturating_sub(blocks_produced)) as f64 + / leader_slots as f64, + ) + }) + .collect() + }) + .unwrap_or_default(); + + progress_bar.set_message("Fetching version information..."); let mut node_version = HashMap::new(); let unknown_version = "unknown".to_string(); for contact_info in rpc_client.get_cluster_nodes()? { @@ -1754,6 +1830,8 @@ pub fn process_show_validators( }) .count() as u64; + progress_bar.finish_and_clear(); + let total_active_stake = vote_accounts .current .iter() @@ -1768,9 +1846,8 @@ pub fn process_show_validators( .sum(); let total_current_stake = total_active_stake - total_delinquent_stake; - let mut current = vote_accounts.current; - current.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake)); - let current_validators: Vec = current + let current_validators: Vec = vote_accounts + .current .iter() .map(|vote_account| { CliValidator::new( @@ -1780,22 +1857,23 @@ pub fn process_show_validators( .get(&vote_account.node_pubkey) .unwrap_or(&unknown_version) .clone(), + skip_rate.get(&vote_account.node_pubkey).cloned(), &config.address_labels, ) }) .collect(); - let mut delinquent = vote_accounts.delinquent; - delinquent.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake)); - let delinquent_validators: Vec = delinquent + let delinquent_validators: Vec = vote_accounts + .delinquent .iter() .map(|vote_account| { - CliValidator::new( + CliValidator::new_delinquent( vote_account, epoch_info.epoch, node_version .get(&vote_account.node_pubkey) .unwrap_or(&unknown_version) .clone(), + skip_rate.get(&vote_account.node_pubkey).cloned(), &config.address_labels, ) }) @@ -1822,8 +1900,13 @@ pub fn process_show_validators( total_active_stake, total_current_stake, total_delinquent_stake, - current_validators, - delinquent_validators, + validators: current_validators + .into_iter() + .chain(delinquent_validators.into_iter()) + .collect(), + validators_sort_order, + validators_reverse_sort, + number_validators, stake_by_version, use_lamports_unit, }; @@ -2023,10 +2106,7 @@ mod tests { let default_keypair = Keypair::new(); let (default_keypair_file, mut tmp_file) = make_tmp_file(); write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap(); - let default_signer = DefaultSigner { - path: default_keypair_file, - arg_name: String::new(), - }; + let default_signer = DefaultSigner::new("", &default_keypair_file); let test_cluster_version = test_commands .clone() diff --git a/cli/src/lib.rs b/cli/src/lib.rs index ed480e334d..55d0b947f0 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -26,9 +26,9 @@ pub mod cli; pub mod cluster_query; pub mod feature; pub mod inflation; +pub mod memo; pub mod nonce; pub mod program; -pub mod send_tpu; pub mod spend_utils; pub mod stake; pub mod test_utils; diff --git a/cli/src/main.rs b/cli/src/main.rs index 358cf134e4..a791bc6f09 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -178,10 +178,7 @@ pub fn parse_args<'a>( &config.keypair_path, ); - let default_signer = DefaultSigner { - arg_name: default_signer_arg_name, - path: default_signer_path.clone(), - }; + let default_signer = DefaultSigner::new(default_signer_arg_name, &default_signer_path); let CliCommandInfo { command, diff --git a/cli/src/memo.rs b/cli/src/memo.rs new file mode 100644 index 0000000000..ccb02c3d9b --- /dev/null +++ b/cli/src/memo.rs @@ -0,0 +1,22 @@ +use solana_sdk::instruction::Instruction; +use solana_sdk::pubkey::Pubkey; +use spl_memo::id; + +pub trait WithMemo { + fn with_memo>(self, memo: Option) -> Self; +} + +impl WithMemo for Vec { + fn with_memo>(mut self, memo: Option) -> Self { + if let Some(memo) = &memo { + let memo = memo.as_ref(); + let memo_ix = Instruction { + program_id: Pubkey::new(&id().to_bytes()), + accounts: vec![], + data: memo.as_bytes().to_vec(), + }; + self.push(memo_ix); + } + self + } +} diff --git a/cli/src/nonce.rs b/cli/src/nonce.rs index bb9ab35b62..203d1abbc9 100644 --- a/cli/src/nonce.rs +++ b/cli/src/nonce.rs @@ -4,6 +4,7 @@ use crate::{ log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult, }, + memo::WithMemo, spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount}, }; use clap::{App, Arg, ArgMatches, SubCommand}; @@ -11,6 +12,7 @@ use solana_clap_utils::{ input_parsers::*, input_validators::*, keypair::{DefaultSigner, SignerIndex}, + memo::MEMO_ARG, nonce::*, }; use solana_cli_output::CliNonceAccount; @@ -171,6 +173,7 @@ pub fn parse_authorize_nonce_account( ) -> Result { let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap(); let new_authority = pubkey_of_signer(matches, "new_authority", wallet_manager)?.unwrap(); + let memo = matches.value_of(MEMO_ARG.name).map(String::from); let (nonce_authority, nonce_authority_pubkey) = signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; @@ -185,6 +188,7 @@ pub fn parse_authorize_nonce_account( command: CliCommand::AuthorizeNonceAccount { nonce_account, nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), + memo, new_authority, }, signers: signer_info.signers, @@ -201,6 +205,7 @@ pub fn parse_nonce_create_account( let seed = matches.value_of("seed").map(|s| s.to_string()); let amount = SpendAmount::new_from_matches(matches, "amount"); let nonce_authority = pubkey_of_signer(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; + let memo = matches.value_of(MEMO_ARG.name).map(String::from); let payer_provided = None; let signer_info = default_signer.generate_unique_signers( @@ -214,6 +219,7 @@ pub fn parse_nonce_create_account( nonce_account: signer_info.index_of(nonce_account_pubkey).unwrap(), seed, nonce_authority, + memo, amount, }, signers: signer_info.signers, @@ -239,6 +245,7 @@ pub fn parse_new_nonce( wallet_manager: &mut Option>, ) -> Result { let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap(); + let memo = matches.value_of(MEMO_ARG.name).map(String::from); let (nonce_authority, nonce_authority_pubkey) = signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; @@ -253,6 +260,7 @@ pub fn parse_new_nonce( command: CliCommand::NewNonce { nonce_account, nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), + memo, }, signers: signer_info.signers, }) @@ -284,6 +292,7 @@ pub fn parse_withdraw_from_nonce_account( let destination_account_pubkey = pubkey_of_signer(matches, "destination_account_pubkey", wallet_manager)?.unwrap(); let lamports = lamports_of_sol(matches, "amount").unwrap(); + let memo = matches.value_of(MEMO_ARG.name).map(String::from); let (nonce_authority, nonce_authority_pubkey) = signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; @@ -298,6 +307,7 @@ pub fn parse_withdraw_from_nonce_account( command: CliCommand::WithdrawFromNonceAccount { nonce_account, nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), + memo, destination_account_pubkey, lamports, }, @@ -330,13 +340,19 @@ pub fn process_authorize_nonce_account( config: &CliConfig, nonce_account: &Pubkey, nonce_authority: SignerIndex, + memo: Option<&String>, new_authority: &Pubkey, ) -> ProcessResult { let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; let nonce_authority = config.signers[nonce_authority]; - let ix = authorize_nonce_account(nonce_account, &nonce_authority.pubkey(), new_authority); - let message = Message::new(&[ix], Some(&config.signers[0].pubkey())); + let ixs = vec![authorize_nonce_account( + nonce_account, + &nonce_authority.pubkey(), + new_authority, + )] + .with_memo(memo); + let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, recent_blockhash)?; @@ -357,6 +373,7 @@ pub fn process_create_nonce_account( nonce_account: SignerIndex, seed: Option, nonce_authority: Option, + memo: Option<&String>, amount: SpendAmount, ) -> ProcessResult { let nonce_account_pubkey = config.signers[nonce_account].pubkey(); @@ -383,6 +400,7 @@ pub fn process_create_nonce_account( &nonce_authority, lamports, ) + .with_memo(memo) } else { create_nonce_account( &config.signers[0].pubkey(), @@ -390,6 +408,7 @@ pub fn process_create_nonce_account( &nonce_authority, lamports, ) + .with_memo(memo) }; Message::new(&ixs, Some(&config.signers[0].pubkey())) }; @@ -451,6 +470,7 @@ pub fn process_new_nonce( config: &CliConfig, nonce_account: &Pubkey, nonce_authority: SignerIndex, + memo: Option<&String>, ) -> ProcessResult { check_unique_pubkeys( (&config.signers[0].pubkey(), "cli keypair".to_string()), @@ -466,9 +486,13 @@ pub fn process_new_nonce( } let nonce_authority = config.signers[nonce_authority]; - let ix = advance_nonce_account(&nonce_account, &nonce_authority.pubkey()); + let ixs = vec![advance_nonce_account( + &nonce_account, + &nonce_authority.pubkey(), + )] + .with_memo(memo); let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; - let message = Message::new(&[ix], Some(&config.signers[0].pubkey())); + let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, recent_blockhash)?; check_account_for_fee_with_commitment( @@ -517,19 +541,21 @@ pub fn process_withdraw_from_nonce_account( config: &CliConfig, nonce_account: &Pubkey, nonce_authority: SignerIndex, + memo: Option<&String>, destination_account_pubkey: &Pubkey, lamports: u64, ) -> ProcessResult { let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; let nonce_authority = config.signers[nonce_authority]; - let ix = withdraw_nonce_account( + let ixs = vec![withdraw_nonce_account( nonce_account, &nonce_authority.pubkey(), destination_account_pubkey, lamports, - ); - let message = Message::new(&[ix], Some(&config.signers[0].pubkey())); + )] + .with_memo(memo); + let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, recent_blockhash)?; check_account_for_fee_with_commitment( @@ -570,10 +596,7 @@ mod tests { let default_keypair = Keypair::new(); let (default_keypair_file, mut tmp_file) = make_tmp_file(); write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap(); - let default_signer = DefaultSigner { - path: default_keypair_file.clone(), - arg_name: String::new(), - }; + let default_signer = DefaultSigner::new("", &default_keypair_file); let (keypair_file, mut tmp_file) = make_tmp_file(); let nonce_account_keypair = Keypair::new(); write_keypair(&nonce_account_keypair, tmp_file.as_file_mut()).unwrap(); @@ -597,6 +620,7 @@ mod tests { command: CliCommand::AuthorizeNonceAccount { nonce_account: nonce_account_pubkey, nonce_authority: 0, + memo: None, new_authority: Pubkey::default(), }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], @@ -618,6 +642,7 @@ mod tests { command: CliCommand::AuthorizeNonceAccount { nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(), nonce_authority: 1, + memo: None, new_authority: Pubkey::default(), }, signers: vec![ @@ -641,6 +666,7 @@ mod tests { nonce_account: 1, seed: None, nonce_authority: None, + memo: None, amount: SpendAmount::Some(50_000_000_000), }, signers: vec![ @@ -666,6 +692,7 @@ mod tests { nonce_account: 1, seed: None, nonce_authority: Some(nonce_authority_keypair.pubkey()), + memo: None, amount: SpendAmount::Some(50_000_000_000), }, signers: vec![ @@ -701,6 +728,7 @@ mod tests { command: CliCommand::NewNonce { nonce_account: nonce_account.pubkey(), nonce_authority: 0, + memo: None, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], } @@ -721,6 +749,7 @@ mod tests { command: CliCommand::NewNonce { nonce_account: nonce_account.pubkey(), nonce_authority: 1, + memo: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -765,6 +794,7 @@ mod tests { command: CliCommand::WithdrawFromNonceAccount { nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(), nonce_authority: 0, + memo: None, destination_account_pubkey: nonce_account_pubkey, lamports: 42_000_000_000 }, @@ -793,6 +823,7 @@ mod tests { command: CliCommand::WithdrawFromNonceAccount { nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(), nonce_authority: 1, + memo: None, destination_account_pubkey: nonce_account_pubkey, lamports: 42_000_000_000 }, diff --git a/cli/src/program.rs b/cli/src/program.rs index 99cc49f66c..fe933a16f2 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -1,4 +1,3 @@ -use crate::send_tpu::{get_leader_tpus, send_transaction_tpu}; use crate::{ checks::*, cli::{ @@ -6,16 +5,15 @@ use crate::{ ProcessResult, }, }; -use bincode::serialize; use bip39::{Language, Mnemonic, MnemonicType, Seed}; use clap::{App, AppSettings, Arg, ArgMatches, SubCommand}; use log::*; -use serde_json::{self, json, Value}; use solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig}; use solana_bpf_loader_program::{bpf_verifier, BpfError, ThisInstructionMeter}; use solana_clap_utils::{self, input_parsers::*, input_validators::*, keypair::*}; use solana_cli_output::{ - display::new_spinner_progress_bar, CliProgram, CliUpgradeableBuffer, CliUpgradeableBuffers, + display::new_spinner_progress_bar, CliProgram, CliProgramAccountType, CliProgramAuthority, + CliProgramBuffer, CliProgramId, CliUpgradeableBuffer, CliUpgradeableBuffers, CliUpgradeableProgram, }; use solana_client::{ @@ -25,7 +23,7 @@ use solana_client::{ rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig}, rpc_filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType}, rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, - rpc_response::RpcLeaderSchedule, + tpu_client::{TpuClient, TpuClientConfig}, }; use solana_rbpf::vm::{Config, Executable}; use solana_remote_wallet::remote_wallet::RemoteWalletManager; @@ -51,20 +49,17 @@ use solana_sdk::{ }; use solana_transaction_status::TransactionConfirmationStatus; use std::{ - cmp::min, collections::HashMap, error, fs::File, io::{Read, Write}, - net::UdpSocket, path::PathBuf, sync::Arc, thread::sleep, - time::{Duration, Instant}, + time::Duration, }; const DATA_CHUNK_SIZE: usize = 229; // Keep program chunks under PACKET_DATA_SIZE -const NUM_TPU_LEADERS: u64 = 2; #[derive(Debug, PartialEq)] pub enum ProgramCliCommand { @@ -622,7 +617,7 @@ pub fn parse_program_subcommand( } pub fn process_program_subcommand( - rpc_client: &RpcClient, + rpc_client: Arc, config: &CliConfig, program_subcommand: &ProgramCliCommand, ) -> ProcessResult { @@ -638,7 +633,7 @@ pub fn process_program_subcommand( max_len, allow_excessive_balance, } => process_program_deploy( - &rpc_client, + rpc_client, config, program_location, *program_signer_index, @@ -657,7 +652,7 @@ pub fn process_program_subcommand( buffer_authority_signer_index, max_len, } => process_write_buffer( - &rpc_client, + rpc_client, config, program_location, *buffer_signer_index, @@ -746,7 +741,7 @@ fn get_default_program_keypair(program_location: &Option) -> Keypair { /// Deploy using upgradeable loader #[allow(clippy::too_many_arguments)] fn process_program_deploy( - rpc_client: &RpcClient, + rpc_client: Arc, config: &CliConfig, program_location: &Option, program_signer_index: Option, @@ -892,7 +887,7 @@ fn process_program_deploy( let result = if do_deploy { do_process_program_write_and_deploy( - rpc_client, + rpc_client.clone(), config, &program_data, buffer_data_len, @@ -907,7 +902,7 @@ fn process_program_deploy( ) } else { do_process_program_upgrade( - rpc_client, + rpc_client.clone(), config, &program_data, &program_pubkey, @@ -918,7 +913,7 @@ fn process_program_deploy( }; if result.is_ok() && is_final { process_set_authority( - rpc_client, + &rpc_client, config, Some(program_pubkey), None, @@ -933,7 +928,7 @@ fn process_program_deploy( } fn process_write_buffer( - rpc_client: &RpcClient, + rpc_client: Arc, config: &CliConfig, program_location: &str, buffer_signer_index: Option, @@ -1071,7 +1066,17 @@ fn process_set_authority( ) .map_err(|e| format!("Setting authority failed: {}", e))?; - Ok(option_pubkey_to_string("authority", new_authority).to_string()) + let authority = CliProgramAuthority { + authority: new_authority + .map(|pubkey| pubkey.to_string()) + .unwrap_or_else(|| "none".to_string()), + account_type: if program_pubkey.is_some() { + CliProgramAccountType::Program + } else { + CliProgramAccountType::Buffer + }, + }; + Ok(config.output_format.formatted_string(&authority)) } fn get_buffers( @@ -1097,6 +1102,7 @@ fn get_buffers( data_slice: Some(UiDataSliceConfig { offset: 0, length }), ..RpcAccountInfoConfig::default() }, + ..RpcProgramAccountsConfig::default() }, )?; Ok(results) @@ -1401,6 +1407,7 @@ fn process_close( data_slice: Some(UiDataSliceConfig { offset: 0, length }), ..RpcAccountInfoConfig::default() }, + ..RpcProgramAccountsConfig::default() }, )?; @@ -1440,7 +1447,7 @@ fn process_close( /// Deploy using non-upgradeable loader pub fn process_deploy( - rpc_client: &RpcClient, + rpc_client: Arc, config: &CliConfig, program_location: &str, buffer_signer_index: Option, @@ -1485,7 +1492,7 @@ pub fn process_deploy( #[allow(clippy::too_many_arguments)] fn do_process_program_write_and_deploy( - rpc_client: &RpcClient, + rpc_client: Arc, config: &CliConfig, program_data: &[u8], buffer_data_len: usize, @@ -1623,7 +1630,7 @@ fn do_process_program_write_and_deploy( messages.push(message); } - check_payer(rpc_client, config, balance_needed, &messages)?; + check_payer(&rpc_client, config, balance_needed, &messages)?; send_deploy_messages( rpc_client, @@ -1637,20 +1644,20 @@ fn do_process_program_write_and_deploy( )?; if let Some(program_signers) = program_signers { - Ok(json!({ - "programId": format!("{}", program_signers[0].pubkey()), - }) - .to_string()) + let program_id = CliProgramId { + program_id: program_signers[0].pubkey().to_string(), + }; + Ok(config.output_format.formatted_string(&program_id)) } else { - Ok(json!({ - "buffer": format!("{}", buffer_pubkey), - }) - .to_string()) + let buffer = CliProgramBuffer { + buffer: buffer_pubkey.to_string(), + }; + Ok(config.output_format.formatted_string(&buffer)) } } fn do_process_program_upgrade( - rpc_client: &RpcClient, + rpc_client: Arc, config: &CliConfig, program_data: &[u8], program_id: &Pubkey, @@ -1746,7 +1753,7 @@ fn do_process_program_upgrade( ); messages.push(&final_message); - check_payer(rpc_client, config, balance_needed, &messages)?; + check_payer(&rpc_client, config, balance_needed, &messages)?; send_deploy_messages( rpc_client, config, @@ -1758,10 +1765,10 @@ fn do_process_program_upgrade( Some(&[upgrade_authority]), )?; - Ok(json!({ - "programId": format!("{}", program_id), - }) - .to_string()) + let program_id = CliProgramId { + program_id: program_id.to_string(), + }; + Ok(config.output_format.formatted_string(&program_id)) } fn read_and_verify_elf(program_location: &str) -> Result, Box> { @@ -1774,7 +1781,7 @@ fn read_and_verify_elf(program_location: &str) -> Result, Box>::from_elf( &program_data, - Some(|x| bpf_verifier::check(x, false)), + Some(|x| bpf_verifier::check(x)), Config::default(), ) .map_err(|err| format!("ELF error: {}", err))?; @@ -1851,7 +1858,7 @@ fn check_payer( } fn send_deploy_messages( - rpc_client: &RpcClient, + rpc_client: Arc, config: &CliConfig, initial_message: &Option, write_messages: &Option>, @@ -1899,7 +1906,8 @@ fn send_deploy_messages( } send_and_confirm_transactions_with_spinner( - &rpc_client, + rpc_client.clone(), + &config.websocket_url, write_transactions, &[payer_signer, write_signer], config.commitment, @@ -1967,19 +1975,9 @@ fn report_ephemeral_mnemonic(words: usize, mnemonic: bip39::Mnemonic) { ); } -fn option_pubkey_to_string(tag: &str, option: Option) -> Value { - match option { - Some(pubkey) => json!({ - tag: format!("{:?}", pubkey), - }), - None => json!({ - tag: "none", - }), - } -} - fn send_and_confirm_transactions_with_spinner( - rpc_client: &RpcClient, + rpc_client: Arc, + websocket_url: &str, mut transactions: Vec, signer_keys: &T, commitment: CommitmentConfig, @@ -1987,39 +1985,19 @@ fn send_and_confirm_transactions_with_spinner( ) -> Result<(), Box> { let progress_bar = new_spinner_progress_bar(); let mut send_retries = 5; - let mut leader_schedule: Option = None; - let mut leader_schedule_epoch = 0; - let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); - let cluster_nodes = rpc_client.get_cluster_nodes().ok(); + progress_bar.set_message("Finding leader nodes..."); + let tpu_client = TpuClient::new( + rpc_client.clone(), + websocket_url, + TpuClientConfig::default(), + )?; loop { - progress_bar.set_message("Finding leader nodes..."); - let epoch_info = rpc_client.get_epoch_info()?; - let mut slot = epoch_info.absolute_slot; - let mut last_epoch_fetch = Instant::now(); - if epoch_info.epoch > leader_schedule_epoch || leader_schedule.is_none() { - leader_schedule = rpc_client.get_leader_schedule(Some(epoch_info.absolute_slot))?; - leader_schedule_epoch = epoch_info.epoch; - } - - let mut tpu_addresses = get_leader_tpus( - min(epoch_info.slot_index + 1, epoch_info.slots_in_epoch), - NUM_TPU_LEADERS, - leader_schedule.as_ref(), - cluster_nodes.as_ref(), - ); - // Send all transactions let mut pending_transactions = HashMap::new(); let num_transactions = transactions.len(); for transaction in transactions { - if !tpu_addresses.is_empty() { - let wire_transaction = - serialize(&transaction).expect("serialization should succeed"); - for tpu_address in &tpu_addresses { - send_transaction_tpu(&send_socket, &tpu_address, &wire_transaction); - } - } else { + if !tpu_client.send_transaction(&transaction) { let _result = rpc_client .send_transaction_with_config( &transaction, @@ -2039,22 +2017,11 @@ fn send_and_confirm_transactions_with_spinner( // Throttle transactions to about 100 TPS sleep(Duration::from_millis(10)); - - // Update leader periodically - if last_epoch_fetch.elapsed() > Duration::from_millis(400) { - let epoch_info = rpc_client.get_epoch_info()?; - last_epoch_fetch = Instant::now(); - tpu_addresses = get_leader_tpus( - min(epoch_info.slot_index + 1, epoch_info.slots_in_epoch), - NUM_TPU_LEADERS, - leader_schedule.as_ref(), - cluster_nodes.as_ref(), - ); - } } // Collect statuses for all the transactions, drop those that are confirmed loop { + let mut slot = 0; let pending_signatures = pending_transactions.keys().cloned().collect::>(); for pending_signatures_chunk in pending_signatures.chunks(MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS) @@ -2096,22 +2063,8 @@ fn send_and_confirm_transactions_with_spinner( break; } - let epoch_info = rpc_client.get_epoch_info()?; - tpu_addresses = get_leader_tpus( - min(epoch_info.slot_index + 1, epoch_info.slots_in_epoch), - NUM_TPU_LEADERS, - leader_schedule.as_ref(), - cluster_nodes.as_ref(), - ); - for transaction in pending_transactions.values() { - if !tpu_addresses.is_empty() { - let wire_transaction = - serialize(&transaction).expect("serialization should succeed"); - for tpu_address in &tpu_addresses { - send_transaction_tpu(&send_socket, &tpu_address, &wire_transaction); - } - } else { + if !tpu_client.send_transaction(transaction) { let _result = rpc_client .send_transaction_with_config( transaction, @@ -2153,6 +2106,7 @@ mod tests { use super::*; use crate::cli::{app, parse_command, process_command}; use serde_json::Value; + use solana_cli_output::OutputFormat; use solana_sdk::signature::write_keypair_file; fn make_tmp_path(name: &str) -> String { @@ -2177,19 +2131,16 @@ mod tests { let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); - let default_signer = DefaultSigner { - path: keypair_file.clone(), - arg_name: "".to_string(), - }; + let default_signer = DefaultSigner::new("", &keypair_file); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "deploy", "/Users/test/program.so", ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), @@ -2206,7 +2157,7 @@ mod tests { } ); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "deploy", @@ -2215,7 +2166,7 @@ mod tests { "42", ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), @@ -2235,7 +2186,7 @@ mod tests { let buffer_keypair = Keypair::new(); let buffer_keypair_file = make_tmp_path("buffer_keypair_file"); write_keypair_file(&buffer_keypair, &buffer_keypair_file).unwrap(); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "deploy", @@ -2243,7 +2194,7 @@ mod tests { &buffer_keypair_file, ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: None, @@ -2325,7 +2276,7 @@ mod tests { let authority_keypair = Keypair::new(); let authority_keypair_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "deploy", @@ -2334,7 +2285,7 @@ mod tests { &authority_keypair_file, ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), @@ -2354,7 +2305,7 @@ mod tests { } ); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "deploy", @@ -2362,7 +2313,7 @@ mod tests { "--final", ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), @@ -2388,20 +2339,17 @@ mod tests { let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); - let default_signer = DefaultSigner { - path: keypair_file.clone(), - arg_name: "".to_string(), - }; + let default_signer = DefaultSigner::new("", &keypair_file); // defaults - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "write-buffer", "/Users/test/program.so", ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), @@ -2415,7 +2363,7 @@ mod tests { ); // specify max len - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "write-buffer", @@ -2424,7 +2372,7 @@ mod tests { "42", ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), @@ -2441,7 +2389,7 @@ mod tests { let buffer_keypair = Keypair::new(); let buffer_keypair_file = make_tmp_path("buffer_keypair_file"); write_keypair_file(&buffer_keypair, &buffer_keypair_file).unwrap(); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "write-buffer", @@ -2450,7 +2398,7 @@ mod tests { &buffer_keypair_file, ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), @@ -2470,7 +2418,7 @@ mod tests { let authority_keypair = Keypair::new(); let authority_keypair_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "write-buffer", @@ -2479,7 +2427,7 @@ mod tests { &authority_keypair_file, ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), @@ -2502,7 +2450,7 @@ mod tests { let authority_keypair = Keypair::new(); let authority_keypair_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "write-buffer", @@ -2513,7 +2461,7 @@ mod tests { &authority_keypair_file, ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), @@ -2539,14 +2487,11 @@ mod tests { let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); - let default_signer = DefaultSigner { - path: keypair_file.clone(), - arg_name: "".to_string(), - }; + let default_signer = DefaultSigner::new("", &keypair_file); let program_pubkey = Pubkey::new_unique(); let new_authority_pubkey = Pubkey::new_unique(); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "set-upgrade-authority", @@ -2555,7 +2500,7 @@ mod tests { &new_authority_pubkey.to_string(), ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority { program_pubkey, @@ -2570,7 +2515,7 @@ mod tests { let new_authority_pubkey = Keypair::new(); let new_authority_pubkey_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&new_authority_pubkey, &new_authority_pubkey_file).unwrap(); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "set-upgrade-authority", @@ -2579,7 +2524,7 @@ mod tests { &new_authority_pubkey_file, ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority { program_pubkey, @@ -2594,7 +2539,7 @@ mod tests { let new_authority_pubkey = Keypair::new(); let new_authority_pubkey_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&new_authority_pubkey, &new_authority_pubkey_file).unwrap(); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "set-upgrade-authority", @@ -2602,7 +2547,7 @@ mod tests { "--final", ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority { program_pubkey, @@ -2617,7 +2562,7 @@ mod tests { let authority = Keypair::new(); let authority_keypair_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&authority, &authority_keypair_file).unwrap(); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "set-upgrade-authority", @@ -2627,7 +2572,7 @@ mod tests { "--final", ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority { program_pubkey, @@ -2650,14 +2595,11 @@ mod tests { let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); - let default_signer = DefaultSigner { - path: keypair_file.clone(), - arg_name: "".to_string(), - }; + let default_signer = DefaultSigner::new("", &keypair_file); let buffer_pubkey = Pubkey::new_unique(); let new_authority_pubkey = Pubkey::new_unique(); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "set-buffer-authority", @@ -2666,7 +2608,7 @@ mod tests { &new_authority_pubkey.to_string(), ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetBufferAuthority { buffer_pubkey, @@ -2681,7 +2623,7 @@ mod tests { let new_authority_keypair = Keypair::new(); let new_authority_keypair_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&new_authority_keypair, &new_authority_keypair_file).unwrap(); - let test_deploy = test_commands.clone().get_matches_from(vec![ + let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "set-buffer-authority", @@ -2690,7 +2632,7 @@ mod tests { &new_authority_keypair_file, ]); assert_eq!( - parse_command(&test_deploy, &default_signer, &mut None).unwrap(), + parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetBufferAuthority { buffer_pubkey, @@ -2710,10 +2652,7 @@ mod tests { let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); - let default_signer = DefaultSigner { - path: keypair_file, - arg_name: "".to_string(), - }; + let default_signer = DefaultSigner::new("", &keypair_file); // defaults let buffer_pubkey = Pubkey::new_unique(); @@ -2812,10 +2751,7 @@ mod tests { let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); - let default_signer = DefaultSigner { - path: keypair_file.clone(), - arg_name: "".to_string(), - }; + let default_signer = DefaultSigner::new("", &keypair_file); // defaults let buffer_pubkey = Pubkey::new_unique(); @@ -2933,7 +2869,7 @@ mod tests { write_keypair_file(&program_pubkey, &program_keypair_location).unwrap(); let config = CliConfig { - rpc_client: Some(RpcClient::new_mock("".to_string())), + rpc_client: Some(Arc::new(RpcClient::new_mock("".to_string()))), command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(program_location.to_str().unwrap().to_string()), buffer_signer_index: None, @@ -2946,6 +2882,7 @@ mod tests { allow_excessive_balance: false, }), signers: vec![&default_keypair], + output_format: OutputFormat::JsonCompact, ..CliConfig::default() }; diff --git a/cli/src/send_tpu.rs b/cli/src/send_tpu.rs deleted file mode 100644 index 320e88e47a..0000000000 --- a/cli/src/send_tpu.rs +++ /dev/null @@ -1,46 +0,0 @@ -use log::*; -use solana_client::rpc_response::{RpcContactInfo, RpcLeaderSchedule}; -use solana_sdk::clock::NUM_CONSECUTIVE_LEADER_SLOTS; -use std::net::{SocketAddr, UdpSocket}; - -pub fn get_leader_tpus( - slot_index: u64, - num_leaders: u64, - leader_schedule: Option<&RpcLeaderSchedule>, - cluster_nodes: Option<&Vec>, -) -> Vec { - let leaders: Vec<_> = (0..num_leaders) - .filter_map(|i| { - leader_schedule? - .iter() - .find(|(_pubkey, slots)| { - slots.iter().any(|slot| { - *slot as u64 == (slot_index + (i * NUM_CONSECUTIVE_LEADER_SLOTS)) - }) - }) - .and_then(|(pubkey, _)| { - cluster_nodes? - .iter() - .find(|contact_info| contact_info.pubkey == *pubkey) - .and_then(|contact_info| contact_info.tpu) - }) - }) - .collect(); - let mut unique_leaders = vec![]; - for leader in leaders.into_iter() { - if !unique_leaders.contains(&leader) { - unique_leaders.push(leader); - } - } - unique_leaders -} - -pub fn send_transaction_tpu( - send_socket: &UdpSocket, - tpu_address: &SocketAddr, - wire_transaction: &[u8], -) { - if let Err(err) = send_socket.send_to(wire_transaction, tpu_address) { - warn!("Failed to send transaction to {}: {:?}", tpu_address, err); - } -} diff --git a/cli/src/stake.rs b/cli/src/stake.rs index f1dc835727..d02adea97c 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -4,15 +4,17 @@ use crate::{ log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult, }, + memo::WithMemo, nonce::check_nonce_account, spend_utils::{resolve_spend_tx_and_check_account_balances, SpendAmount}, }; -use clap::{App, Arg, ArgGroup, ArgMatches, SubCommand}; +use clap::{value_t, App, Arg, ArgGroup, ArgMatches, SubCommand}; use solana_clap_utils::{ fee_payer::{fee_payer_arg, FEE_PAYER_ARG}, input_parsers::*, input_validators::*, keypair::{DefaultSigner, SignerIndex}, + memo::MEMO_ARG, nonce::*, offline::*, ArgConstant, @@ -108,7 +110,7 @@ impl StakeSubCommands for App<'_, '_> { .arg( Arg::with_name("stake_account") .index(1) - .value_name("ACCOUNT_KEYPAIR") + .value_name("STAKE_ACCOUNT_KEYPAIR") .takes_value(true) .required(true) .validator(is_valid_signer) @@ -134,7 +136,8 @@ impl StakeSubCommands for App<'_, '_> { .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account will be at a derived address of the stake_account pubkey") + .help("Seed for address generation; if specified, the resulting account \ + will be at a derived address of the STAKE_ACCOUNT_KEYPAIR pubkey") ) .arg( Arg::with_name("lockup_epoch") @@ -247,7 +250,15 @@ impl StakeSubCommands for App<'_, '_> { .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account to be deactivated. ") + "Stake account to be deactivated (or base of derived address if --seed is used). ") + ) + .arg( + Arg::with_name("seed") + .long("seed") + .value_name("STRING") + .takes_value(true) + .help("Seed for address generation; if specified, the resulting account \ + will be at a derived address of STAKE_ACCOUNT_ADDRESS") ) .arg(stake_authority_arg()) .offline_args() @@ -287,7 +298,8 @@ impl StakeSubCommands for App<'_, '_> { .long("seed") .value_name("STRING") .takes_value(true) - .help("Seed for address generation; if specified, the resulting account will be at a derived address of the SPLIT_STAKE_ACCOUNT pubkey") + .help("Seed for address generation; if specified, the resulting account \ + will be at a derived address of SPLIT_STAKE_ACCOUNT") ) .arg(stake_authority_arg()) .offline_args() @@ -309,7 +321,8 @@ impl StakeSubCommands for App<'_, '_> { .index(2) .value_name("SOURCE_STAKE_ACCOUNT_ADDRESS") .required(true), - "Source stake account for the merge. If successful, this stake account will no longer exist after the merge") + "Source stake account for the merge. If successful, this stake account \ + will no longer exist after the merge") ) .arg(stake_authority_arg()) .offline_args() @@ -324,7 +337,7 @@ impl StakeSubCommands for App<'_, '_> { .index(1) .value_name("STAKE_ACCOUNT_ADDRESS") .required(true), - "Stake account from which to withdraw") + "Stake account from which to withdraw (or base of derived address if --seed is used). ") ) .arg( pubkey!(Arg::with_name("destination_account_pubkey") @@ -338,9 +351,17 @@ impl StakeSubCommands for App<'_, '_> { .index(3) .value_name("AMOUNT") .takes_value(true) - .validator(is_amount) + .validator(is_amount_or_all) .required(true) - .help("The amount to withdraw from the stake account, in VLX") + .help("The amount to withdraw from the stake account, in VLX; accepts keyword ALL") + ) + .arg( + Arg::with_name("seed") + .long("seed") + .value_name("STRING") + .takes_value(true) + .help("Seed for address generation; if specified, the resulting account \ + will be at a derived address of STAKE_ACCOUNT_ADDRESS") ) .arg(withdraw_authority_arg()) .offline_args() @@ -452,11 +473,24 @@ impl StakeSubCommands for App<'_, '_> { }) .help("Display NUM recent epochs worth of stake history in text mode. 0 for all") ) + .arg( + Arg::with_name("limit") + .long("limit") + .takes_value(true) + .value_name("NUM") + .default_value("10") + .validator(|s| { + s.parse::() + .map(|_| ()) + .map_err(|e| e.to_string()) + }) + .help("Display NUM recent epochs worth of stake history in text mode. 0 for all") + ) ) } } -pub fn parse_stake_create_account( +pub fn parse_create_stake_account( matches: &ArgMatches<'_>, default_signer: &DefaultSigner, wallet_manager: &mut Option>, @@ -472,6 +506,7 @@ pub fn parse_stake_create_account( let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); let blockhash_query = BlockhashQuery::new_from_matches(matches); let nonce_account = pubkey_of_signer(matches, NONCE_ARG.name, wallet_manager)?; + let memo = matches.value_of(MEMO_ARG.name).map(String::from); let (nonce_authority, nonce_authority_pubkey) = signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; @@ -503,6 +538,7 @@ pub fn parse_stake_create_account( blockhash_query, nonce_account, nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), + memo, fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), from: signer_info.index_of(from_pubkey).unwrap(), }, @@ -524,6 +560,7 @@ pub fn parse_stake_delegate_stake( let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); let blockhash_query = BlockhashQuery::new_from_matches(matches); let nonce_account = pubkey_of(matches, NONCE_ARG.name); + let memo = matches.value_of(MEMO_ARG.name).map(String::from); let (stake_authority, stake_authority_pubkey) = signer_of(matches, STAKE_AUTHORITY_ARG.name, wallet_manager)?; let (nonce_authority, nonce_authority_pubkey) = @@ -548,6 +585,7 @@ pub fn parse_stake_delegate_stake( blockhash_query, nonce_account, nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), + memo, fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), }, signers: signer_info.signers, @@ -600,6 +638,7 @@ pub fn parse_stake_authorize( let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); let blockhash_query = BlockhashQuery::new_from_matches(matches); let nonce_account = pubkey_of(matches, NONCE_ARG.name); + let memo = matches.value_of(MEMO_ARG.name).map(String::from); let (nonce_authority, nonce_authority_pubkey) = signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; @@ -637,6 +676,7 @@ pub fn parse_stake_authorize( blockhash_query, nonce_account, nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), + memo, fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), custodian: custodian_pubkey.and_then(|_| signer_info.index_of(custodian_pubkey)), }, @@ -660,6 +700,7 @@ pub fn parse_split_stake( let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); let blockhash_query = BlockhashQuery::new_from_matches(matches); let nonce_account = pubkey_of(matches, NONCE_ARG.name); + let memo = matches.value_of(MEMO_ARG.name).map(String::from); let (stake_authority, stake_authority_pubkey) = signer_of(matches, STAKE_AUTHORITY_ARG.name, wallet_manager)?; let (nonce_authority, nonce_authority_pubkey) = @@ -682,6 +723,7 @@ pub fn parse_split_stake( blockhash_query, nonce_account, nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), + memo, split_stake_account: signer_info.index_of(split_stake_account_pubkey).unwrap(), seed, lamports, @@ -705,6 +747,7 @@ pub fn parse_merge_stake( let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); let blockhash_query = BlockhashQuery::new_from_matches(matches); let nonce_account = pubkey_of(matches, NONCE_ARG.name); + let memo = matches.value_of(MEMO_ARG.name).map(String::from); let (stake_authority, stake_authority_pubkey) = signer_of(matches, STAKE_AUTHORITY_ARG.name, wallet_manager)?; let (nonce_authority, nonce_authority_pubkey) = @@ -728,6 +771,7 @@ pub fn parse_merge_stake( blockhash_query, nonce_account, nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), + memo, fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), }, signers: signer_info.signers, @@ -745,6 +789,8 @@ pub fn parse_stake_deactivate_stake( let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); let blockhash_query = BlockhashQuery::new_from_matches(matches); let nonce_account = pubkey_of(matches, NONCE_ARG.name); + let memo = matches.value_of(MEMO_ARG.name).map(String::from); + let seed = value_t!(matches, "seed", String).ok(); let (stake_authority, stake_authority_pubkey) = signer_of(matches, STAKE_AUTHORITY_ARG.name, wallet_manager)?; let (nonce_authority, nonce_authority_pubkey) = @@ -767,6 +813,8 @@ pub fn parse_stake_deactivate_stake( blockhash_query, nonce_account, nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), + memo, + seed, fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), }, signers: signer_info.signers, @@ -782,11 +830,13 @@ pub fn parse_stake_withdraw_stake( pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap(); let destination_account_pubkey = pubkey_of_signer(matches, "destination_account_pubkey", wallet_manager)?.unwrap(); - let lamports = lamports_of_sol(matches, "amount").unwrap(); + let amount = SpendAmount::new_from_matches(matches, "amount"); let sign_only = matches.is_present(SIGN_ONLY_ARG.name); let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); let blockhash_query = BlockhashQuery::new_from_matches(matches); let nonce_account = pubkey_of(matches, NONCE_ARG.name); + let memo = matches.value_of(MEMO_ARG.name).map(String::from); + let seed = value_t!(matches, "seed", String).ok(); let (withdraw_authority, withdraw_authority_pubkey) = signer_of(matches, WITHDRAW_AUTHORITY_ARG.name, wallet_manager)?; let (nonce_authority, nonce_authority_pubkey) = @@ -808,13 +858,15 @@ pub fn parse_stake_withdraw_stake( command: CliCommand::WithdrawStake { stake_account_pubkey, destination_account_pubkey, - lamports, + amount, withdraw_authority: signer_info.index_of(withdraw_authority_pubkey).unwrap(), sign_only, dump_transaction_message, blockhash_query, nonce_account, nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), + memo, + seed, fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), custodian: custodian_pubkey.and_then(|_| signer_info.index_of(custodian_pubkey)), }, @@ -837,6 +889,7 @@ pub fn parse_stake_set_lockup( let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); let blockhash_query = BlockhashQuery::new_from_matches(matches); let nonce_account = pubkey_of(matches, NONCE_ARG.name); + let memo = matches.value_of(MEMO_ARG.name).map(String::from); let (custodian, custodian_pubkey) = signer_of(matches, "custodian", wallet_manager)?; let (nonce_authority, nonce_authority_pubkey) = @@ -864,6 +917,7 @@ pub fn parse_stake_set_lockup( blockhash_query, nonce_account, nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), + memo, fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), }, signers: signer_info.signers, @@ -919,6 +973,7 @@ pub fn process_create_stake_account( blockhash_query: &BlockhashQuery, nonce_account: Option<&Pubkey>, nonce_authority: SignerIndex, + memo: Option<&String>, fee_payer: SignerIndex, from: SignerIndex, ) -> ProcessResult { @@ -953,6 +1008,7 @@ pub fn process_create_stake_account( lockup, lamports, ) + .with_memo(memo) } else { stake_instruction::create_account( &from.pubkey(), @@ -961,6 +1017,7 @@ pub fn process_create_stake_account( lockup, lamports, ) + .with_memo(memo) }; if let Some(nonce_account) = &nonce_account { Message::new_with_nonce( @@ -1063,6 +1120,7 @@ pub fn process_stake_authorize( blockhash_query: &BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option<&String>, fee_payer: SignerIndex, ) -> ProcessResult { let mut ixs = Vec::new(); @@ -1081,6 +1139,7 @@ pub fn process_stake_authorize( custodian.map(|signer| signer.pubkey()).as_ref(), )); } + ixs = ixs.with_memo(memo); let (recent_blockhash, fee_calculator) = blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?; @@ -1142,15 +1201,25 @@ pub fn process_deactivate_stake_account( blockhash_query: &BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option<&String>, + seed: Option<&String>, fee_payer: SignerIndex, ) -> ProcessResult { let (recent_blockhash, fee_calculator) = blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?; let stake_authority = config.signers[stake_authority]; + + let stake_account_address = if let Some(seed) = seed { + Pubkey::create_with_seed(&stake_account_pubkey, seed, &solana_stake_program::id())? + } else { + *stake_account_pubkey + }; + let ixs = vec![stake_instruction::deactivate_stake( - stake_account_pubkey, + &stake_account_address, &stake_authority.pubkey(), - )]; + )] + .with_memo(memo); let nonce_authority = config.signers[nonce_authority]; let fee_payer = config.signers[fee_payer]; @@ -1203,7 +1272,7 @@ pub fn process_withdraw_stake( config: &CliConfig, stake_account_pubkey: &Pubkey, destination_account_pubkey: &Pubkey, - lamports: u64, + amount: SpendAmount, withdraw_authority: SignerIndex, custodian: Option, sign_only: bool, @@ -1211,34 +1280,58 @@ pub fn process_withdraw_stake( blockhash_query: &BlockhashQuery, nonce_account: Option<&Pubkey>, nonce_authority: SignerIndex, + memo: Option<&String>, + seed: Option<&String>, fee_payer: SignerIndex, ) -> ProcessResult { - let (recent_blockhash, fee_calculator) = - blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?; let withdraw_authority = config.signers[withdraw_authority]; let custodian = custodian.map(|index| config.signers[index]); - let ixs = vec![stake_instruction::withdraw( - stake_account_pubkey, - &withdraw_authority.pubkey(), - destination_account_pubkey, - lamports, - custodian.map(|signer| signer.pubkey()).as_ref(), - )]; + let stake_account_address = if let Some(seed) = seed { + Pubkey::create_with_seed(&stake_account_pubkey, seed, &solana_stake_program::id())? + } else { + *stake_account_pubkey + }; + + let (recent_blockhash, fee_calculator) = + blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?; let fee_payer = config.signers[fee_payer]; let nonce_authority = config.signers[nonce_authority]; - let message = if let Some(nonce_account) = &nonce_account { - Message::new_with_nonce( - ixs, - Some(&fee_payer.pubkey()), - nonce_account, - &nonce_authority.pubkey(), - ) - } else { - Message::new(&ixs, Some(&fee_payer.pubkey())) + let build_message = |lamports| { + let ixs = vec![stake_instruction::withdraw( + &stake_account_address, + &withdraw_authority.pubkey(), + destination_account_pubkey, + lamports, + custodian.map(|signer| signer.pubkey()).as_ref(), + )] + .with_memo(memo); + + if let Some(nonce_account) = &nonce_account { + Message::new_with_nonce( + ixs, + Some(&fee_payer.pubkey()), + nonce_account, + &nonce_authority.pubkey(), + ) + } else { + Message::new(&ixs, Some(&fee_payer.pubkey())) + } }; + + let (message, _) = resolve_spend_tx_and_check_account_balances( + rpc_client, + sign_only, + amount, + &fee_calculator, + &stake_account_address, + &fee_payer.pubkey(), + build_message, + config.commitment, + )?; + let mut tx = Transaction::new_unsigned(message); if sign_only { @@ -1283,6 +1376,7 @@ pub fn process_split_stake( blockhash_query: &BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option<&String>, split_stake_account: SignerIndex, split_stake_account_seed: &Option, lamports: u64, @@ -1376,6 +1470,7 @@ pub fn process_split_stake( &split_stake_account.pubkey(), seed, ) + .with_memo(memo) } else { stake_instruction::split( &stake_account_pubkey, @@ -1383,6 +1478,7 @@ pub fn process_split_stake( lamports, &split_stake_account_address, ) + .with_memo(memo) }; let nonce_authority = config.signers[nonce_authority]; @@ -1442,6 +1538,7 @@ pub fn process_merge_stake( blockhash_query: &BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option<&String>, fee_payer: SignerIndex, ) -> ProcessResult { let fee_payer = config.signers[fee_payer]; @@ -1488,7 +1585,8 @@ pub fn process_merge_stake( &stake_account_pubkey, &source_stake_account_pubkey, &stake_authority.pubkey(), - ); + ) + .with_memo(memo); let nonce_authority = config.signers[nonce_authority]; @@ -1551,6 +1649,7 @@ pub fn process_stake_set_lockup( blockhash_query: &BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option<&String>, fee_payer: SignerIndex, ) -> ProcessResult { let (recent_blockhash, fee_calculator) = @@ -1561,7 +1660,8 @@ pub fn process_stake_set_lockup( stake_account_pubkey, lockup, &custodian.pubkey(), - )]; + )] + .with_memo(memo); let nonce_authority = config.signers[nonce_authority]; let fee_payer = config.signers[fee_payer]; @@ -1744,7 +1844,7 @@ pub fn make_cli_reward( let rate_change = reward.amount as f64 / (reward.post_balance - reward.amount) as f64; let wallclock_epochs_per_year = - (SECONDS_PER_DAY * 356) as f64 / wallclock_epoch_duration as f64; + (SECONDS_PER_DAY * 365) as f64 / wallclock_epoch_duration as f64; let apr = rate_change * wallclock_epochs_per_year; Some(CliEpochReward { @@ -1859,9 +1959,10 @@ pub fn process_show_stake_history( limit_results: usize, ) -> ProcessResult { let stake_history_account = rpc_client.get_account(&stake_history::id())?; - let stake_history = from_account::(&stake_history_account).ok_or_else(|| { - CliError::RpcRequestError("Failed to deserialize stake history".to_string()) - })?; + let stake_history = + from_account::(&stake_history_account).ok_or_else(|| { + CliError::RpcRequestError("Failed to deserialize stake history".to_string()) + })?; let limit_results = match config.output_format { OutputFormat::Json | OutputFormat::JsonCompact => std::usize::MAX, @@ -1897,6 +1998,7 @@ pub fn process_delegate_stake( blockhash_query: &BlockhashQuery, nonce_account: Option, nonce_authority: SignerIndex, + memo: Option<&String>, fee_payer: SignerIndex, ) -> ProcessResult { check_unique_pubkeys( @@ -1960,7 +2062,8 @@ pub fn process_delegate_stake( stake_account_pubkey, &stake_authority.pubkey(), vote_account_pubkey, - )]; + )] + .with_memo(memo); let nonce_authority = config.signers[nonce_authority]; let fee_payer = config.signers[fee_payer]; @@ -2041,10 +2144,7 @@ mod tests { let default_keypair = Keypair::new(); let (default_keypair_file, mut tmp_file) = make_tmp_file(); write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap(); - let default_signer = DefaultSigner { - path: default_keypair_file.clone(), - arg_name: String::new(), - }; + let default_signer = DefaultSigner::new("", &default_keypair_file); let (keypair_file, mut tmp_file) = make_tmp_file(); let stake_account_keypair = Keypair::new(); write_keypair(&stake_account_keypair, tmp_file.as_file_mut()).unwrap(); @@ -2085,6 +2185,7 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }, @@ -2121,6 +2222,7 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }, @@ -2161,6 +2263,7 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }, @@ -2190,6 +2293,7 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }, @@ -2216,6 +2320,7 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }, @@ -2248,6 +2353,7 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }, @@ -2281,6 +2387,7 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }, @@ -2311,6 +2418,7 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }, @@ -2347,6 +2455,7 @@ mod tests { blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }, @@ -2385,6 +2494,7 @@ mod tests { ), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 1, custodian: None, }, @@ -2433,6 +2543,7 @@ mod tests { ), nonce_account: Some(nonce_account), nonce_authority: 2, + memo: None, fee_payer: 1, custodian: None, }, @@ -2467,6 +2578,7 @@ mod tests { ), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }, @@ -2506,6 +2618,7 @@ mod tests { ), nonce_account: Some(nonce_account_pubkey), nonce_authority: 1, + memo: None, fee_payer: 0, custodian: None, }, @@ -2541,6 +2654,7 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 1, custodian: None, }, @@ -2580,6 +2694,7 @@ mod tests { ), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 1, custodian: None, }, @@ -2628,6 +2743,7 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }, @@ -2666,6 +2782,7 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }, @@ -2723,6 +2840,7 @@ mod tests { ), nonce_account: Some(nonce_account), nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }, @@ -2755,6 +2873,7 @@ mod tests { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], @@ -2785,6 +2904,7 @@ mod tests { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }, signers: vec![ @@ -2817,6 +2937,7 @@ mod tests { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], @@ -2850,6 +2971,7 @@ mod tests { ), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], @@ -2878,6 +3000,7 @@ mod tests { blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], @@ -2916,6 +3039,7 @@ mod tests { ), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 1, }, signers: vec![ @@ -2963,6 +3087,7 @@ mod tests { ), nonce_account: Some(nonce_account), nonce_authority: 2, + memo: None, fee_payer: 1, }, signers: vec![ @@ -2998,6 +3123,7 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 1, }, signers: vec![ @@ -3022,7 +3148,7 @@ mod tests { command: CliCommand::WithdrawStake { stake_account_pubkey, destination_account_pubkey: stake_account_pubkey, - lamports: 42_000_000_000, + amount: SpendAmount::Some(42_000_000_000), withdraw_authority: 0, custodian: None, sign_only: false, @@ -3030,6 +3156,8 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], @@ -3053,7 +3181,7 @@ mod tests { command: CliCommand::WithdrawStake { stake_account_pubkey, destination_account_pubkey: stake_account_pubkey, - lamports: 42_000_000_000, + amount: SpendAmount::Some(42_000_000_000), withdraw_authority: 1, custodian: None, sign_only: false, @@ -3061,6 +3189,8 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }, signers: vec![ @@ -3089,7 +3219,7 @@ mod tests { command: CliCommand::WithdrawStake { stake_account_pubkey, destination_account_pubkey: stake_account_pubkey, - lamports: 42_000_000_000, + amount: SpendAmount::Some(42_000_000_000), withdraw_authority: 0, custodian: Some(1), sign_only: false, @@ -3097,6 +3227,8 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }, signers: vec![ @@ -3133,7 +3265,7 @@ mod tests { command: CliCommand::WithdrawStake { stake_account_pubkey, destination_account_pubkey: stake_account_pubkey, - lamports: 42_000_000_000, + amount: SpendAmount::Some(42_000_000_000), withdraw_authority: 0, custodian: None, sign_only: false, @@ -3144,6 +3276,8 @@ mod tests { ), nonce_account: Some(nonce_account), nonce_authority: 1, + memo: None, + seed: None, fee_payer: 1, }, signers: vec![ @@ -3172,6 +3306,8 @@ mod tests { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], @@ -3197,6 +3333,8 @@ mod tests { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }, signers: vec![ @@ -3232,6 +3370,8 @@ mod tests { ), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], @@ -3257,6 +3397,8 @@ mod tests { blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], @@ -3292,6 +3434,8 @@ mod tests { ), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 1, }, signers: vec![ @@ -3336,6 +3480,8 @@ mod tests { ), nonce_account: Some(nonce_account), nonce_authority: 2, + memo: None, + seed: None, fee_payer: 1, }, signers: vec![ @@ -3365,6 +3511,8 @@ mod tests { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 1, }, signers: vec![ @@ -3400,6 +3548,7 @@ mod tests { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, split_stake_account: 1, seed: None, lamports: 50_000_000_000, @@ -3465,6 +3614,7 @@ mod tests { ), nonce_account: Some(nonce_account), nonce_authority: 1, + memo: None, split_stake_account: 2, seed: None, lamports: 50_000_000_000, @@ -3504,6 +3654,7 @@ mod tests { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),], diff --git a/cli/src/vote.rs b/cli/src/vote.rs index 4e0353fd7f..67a3e11b61 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -4,6 +4,7 @@ use crate::{ log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult, }, + memo::WithMemo, spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount}, }; use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand}; @@ -11,6 +12,7 @@ use solana_clap_utils::{ input_parsers::*, input_validators::*, keypair::{DefaultSigner, SignerIndex}, + memo::{memo_arg, MEMO_ARG}, }; use solana_cli_output::{CliEpochVotingHistory, CliLockout, CliVoteAccount}; use solana_client::rpc_client::RpcClient; @@ -79,7 +81,8 @@ impl VoteSubCommands for App<'_, '_> { .value_name("STRING") .takes_value(true) .help("Seed for address generation; if specified, the resulting account will be at a derived address of the VOTE ACCOUNT pubkey") - ), + ) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("vote-authorize-voter") @@ -105,7 +108,8 @@ impl VoteSubCommands for App<'_, '_> { .value_name("NEW_AUTHORIZED_PUBKEY") .required(true), "New authorized vote signer. "), - ), + ) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("vote-authorize-withdrawer") @@ -131,7 +135,8 @@ impl VoteSubCommands for App<'_, '_> { .value_name("AUTHORIZED_PUBKEY") .required(true), "New authorized withdrawer. "), - ), + ) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("vote-update-validator") @@ -161,6 +166,7 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Authorized withdrawer keypair"), ) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("vote-update-commission") @@ -190,6 +196,7 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Authorized withdrawer keypair"), ) + .arg(memo_arg()) ) .subcommand( SubCommand::with_name("vote-account") @@ -259,6 +266,7 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Authorized withdrawer [default: cli config keypair]"), ) + .arg(memo_arg()) ) } } @@ -275,6 +283,7 @@ pub fn parse_create_vote_account( let commission = value_t_or_exit!(matches, "commission", u8); let authorized_voter = pubkey_of_signer(matches, "authorized_voter", wallet_manager)?; let authorized_withdrawer = pubkey_of_signer(matches, "authorized_withdrawer", wallet_manager)?; + let memo = matches.value_of(MEMO_ARG.name).map(String::from); let payer_provided = None; let signer_info = default_signer.generate_unique_signers( @@ -291,6 +300,7 @@ pub fn parse_create_vote_account( authorized_voter, authorized_withdrawer, commission, + memo, }, signers: signer_info.signers, }) @@ -314,12 +324,14 @@ pub fn parse_vote_authorize( matches, wallet_manager, )?; + let memo = matches.value_of(MEMO_ARG.name).map(String::from); Ok(CliCommandInfo { command: CliCommand::VoteAuthorize { vote_account_pubkey, new_authorized_pubkey, vote_authorize, + memo, }, signers: signer_info.signers, }) @@ -343,12 +355,14 @@ pub fn parse_vote_update_validator( matches, wallet_manager, )?; + let memo = matches.value_of(MEMO_ARG.name).map(String::from); Ok(CliCommandInfo { command: CliCommand::VoteUpdateValidator { vote_account_pubkey, new_identity_account: signer_info.index_of(new_identity_pubkey).unwrap(), withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(), + memo, }, signers: signer_info.signers, }) @@ -371,12 +385,14 @@ pub fn parse_vote_update_commission( matches, wallet_manager, )?; + let memo = matches.value_of(MEMO_ARG.name).map(String::from); Ok(CliCommandInfo { command: CliCommand::VoteUpdateCommission { vote_account_pubkey, commission, withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(), + memo, }, signers: signer_info.signers, }) @@ -424,6 +440,7 @@ pub fn parse_withdraw_from_vote_account( matches, wallet_manager, )?; + let memo = matches.value_of(MEMO_ARG.name).map(String::from); Ok(CliCommandInfo { command: CliCommand::WithdrawFromVoteAccount { @@ -431,6 +448,7 @@ pub fn parse_withdraw_from_vote_account( destination_account_pubkey, withdraw_authority: signer_info.index_of(withdraw_authority_pubkey).unwrap(), withdraw_amount, + memo, }, signers: signer_info.signers, }) @@ -445,6 +463,7 @@ pub fn process_create_vote_account( authorized_voter: &Option, authorized_withdrawer: &Option, commission: u8, + memo: Option<&String>, ) -> ProcessResult { let vote_account = config.signers[vote_account]; let vote_account_pubkey = vote_account.pubkey(); @@ -487,6 +506,7 @@ pub fn process_create_vote_account( &vote_init, lamports, ) + .with_memo(memo) } else { vote_instruction::create_account( &config.signers[0].pubkey(), @@ -494,6 +514,7 @@ pub fn process_create_vote_account( &vote_init, lamports, ) + .with_memo(memo) }; Message::new(&ixs, Some(&config.signers[0].pubkey())) }; @@ -537,6 +558,7 @@ pub fn process_vote_authorize( vote_account_pubkey: &Pubkey, new_authorized_pubkey: &Pubkey, vote_authorize: VoteAuthorize, + memo: Option<&String>, ) -> ProcessResult { // If the `authorized_account` is also the fee payer, `config.signers` will only have one // keypair in it @@ -556,7 +578,8 @@ pub fn process_vote_authorize( &authorized.pubkey(), // current authorized new_authorized_pubkey, // new vote signer/withdrawer vote_authorize, // vote or withdraw - )]; + )] + .with_memo(memo); let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); let mut tx = Transaction::new_unsigned(message); @@ -578,6 +601,7 @@ pub fn process_vote_update_validator( vote_account_pubkey: &Pubkey, new_identity_account: SignerIndex, withdraw_authority: SignerIndex, + memo: Option<&String>, ) -> ProcessResult { let authorized_withdrawer = config.signers[withdraw_authority]; let new_identity_account = config.signers[new_identity_account]; @@ -591,7 +615,8 @@ pub fn process_vote_update_validator( vote_account_pubkey, &authorized_withdrawer.pubkey(), &new_identity_pubkey, - )]; + )] + .with_memo(memo); let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); let mut tx = Transaction::new_unsigned(message); @@ -613,6 +638,7 @@ pub fn process_vote_update_commission( vote_account_pubkey: &Pubkey, commission: u8, withdraw_authority: SignerIndex, + memo: Option<&String>, ) -> ProcessResult { let authorized_withdrawer = config.signers[withdraw_authority]; let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; @@ -620,7 +646,8 @@ pub fn process_vote_update_commission( vote_account_pubkey, &authorized_withdrawer.pubkey(), commission, - )]; + )] + .with_memo(memo); let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); let mut tx = Transaction::new_unsigned(message); @@ -731,6 +758,7 @@ pub fn process_withdraw_from_vote_account( withdraw_authority: SignerIndex, withdraw_amount: SpendAmount, destination_account_pubkey: &Pubkey, + memo: Option<&String>, ) -> ProcessResult { let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; let withdraw_authority = config.signers[withdraw_authority]; @@ -751,14 +779,15 @@ pub fn process_withdraw_from_vote_account( } }; - let ix = withdraw( + let ixs = vec![withdraw( vote_account_pubkey, &withdraw_authority.pubkey(), lamports, destination_account_pubkey, - ); + )] + .with_memo(memo); - let message = Message::new(&[ix], Some(&config.signers[0].pubkey())); + let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); let mut transaction = Transaction::new_unsigned(message); transaction.try_sign(&config.signers, recent_blockhash)?; check_account_for_fee_with_commitment( @@ -797,10 +826,7 @@ mod tests { let default_keypair = Keypair::new(); let (default_keypair_file, mut tmp_file) = make_tmp_file(); write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap(); - let default_signer = DefaultSigner { - path: default_keypair_file.clone(), - arg_name: String::new(), - }; + let default_signer = DefaultSigner::new("", &default_keypair_file); let test_authorize_voter = test_commands.clone().get_matches_from(vec![ "test", @@ -815,7 +841,8 @@ mod tests { command: CliCommand::VoteAuthorize { vote_account_pubkey: pubkey, new_authorized_pubkey: pubkey2, - vote_authorize: VoteAuthorize::Voter + vote_authorize: VoteAuthorize::Voter, + memo: None, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], } @@ -838,7 +865,8 @@ mod tests { command: CliCommand::VoteAuthorize { vote_account_pubkey: pubkey, new_authorized_pubkey: pubkey2, - vote_authorize: VoteAuthorize::Voter + vote_authorize: VoteAuthorize::Voter, + memo: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -872,6 +900,7 @@ mod tests { authorized_voter: None, authorized_withdrawer: None, commission: 10, + memo: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -901,6 +930,7 @@ mod tests { authorized_voter: None, authorized_withdrawer: None, commission: 100, + memo: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -933,7 +963,8 @@ mod tests { identity_account: 2, authorized_voter: Some(authed), authorized_withdrawer: None, - commission: 100 + commission: 100, + memo: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -964,7 +995,8 @@ mod tests { identity_account: 2, authorized_voter: None, authorized_withdrawer: Some(authed), - commission: 100 + commission: 100, + memo: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -988,6 +1020,7 @@ mod tests { vote_account_pubkey: pubkey, new_identity_account: 2, withdraw_authority: 1, + memo: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -1011,6 +1044,7 @@ mod tests { vote_account_pubkey: pubkey, commission: 42, withdraw_authority: 1, + memo: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -1035,6 +1069,7 @@ mod tests { destination_account_pubkey: pubkey, withdraw_authority: 0, withdraw_amount: SpendAmount::Some(42_000_000_000), + memo: None, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], } @@ -1056,6 +1091,7 @@ mod tests { destination_account_pubkey: pubkey, withdraw_authority: 0, withdraw_amount: SpendAmount::All, + memo: None, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], } @@ -1082,6 +1118,7 @@ mod tests { destination_account_pubkey: pubkey, withdraw_authority: 1, withdraw_amount: SpendAmount::Some(42_000_000_000), + memo: None, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), diff --git a/cli/tests/nonce.rs b/cli/tests/nonce.rs index b73215ccb6..2e53993421 100644 --- a/cli/tests/nonce.rs +++ b/cli/tests/nonce.rs @@ -101,6 +101,7 @@ fn full_battery_tests( nonce_account: 1, seed, nonce_authority: optional_authority, + memo: None, amount: SpendAmount::Some(1000), }; @@ -134,6 +135,7 @@ fn full_battery_tests( config_payer.command = CliCommand::NewNonce { nonce_account, nonce_authority: index, + memo: None, }; process_command(&config_payer).unwrap(); @@ -151,6 +153,7 @@ fn full_battery_tests( config_payer.command = CliCommand::WithdrawFromNonceAccount { nonce_account, nonce_authority: index, + memo: None, destination_account_pubkey: payee_pubkey, lamports: 100, }; @@ -171,6 +174,7 @@ fn full_battery_tests( config_payer.command = CliCommand::AuthorizeNonceAccount { nonce_account, nonce_authority: index, + memo: None, new_authority: new_authority.pubkey(), }; process_command(&config_payer).unwrap(); @@ -179,6 +183,7 @@ fn full_battery_tests( config_payer.command = CliCommand::NewNonce { nonce_account, nonce_authority: index, + memo: None, }; process_command(&config_payer).unwrap_err(); @@ -187,6 +192,7 @@ fn full_battery_tests( config_payer.command = CliCommand::NewNonce { nonce_account, nonce_authority: 1, + memo: None, }; process_command(&config_payer).unwrap(); @@ -194,6 +200,7 @@ fn full_battery_tests( config_payer.command = CliCommand::WithdrawFromNonceAccount { nonce_account, nonce_authority: 1, + memo: None, destination_account_pubkey: payee_pubkey, lamports: 100, }; @@ -253,6 +260,7 @@ fn test_create_account_with_seed() { nonce_account: 0, seed: Some(seed), nonce_authority: Some(authority_pubkey), + memo: None, amount: SpendAmount::Some(241), }; process_command(&creator_config).unwrap(); @@ -284,10 +292,12 @@ fn test_create_account_with_seed() { from: 0, sign_only: true, dump_transaction_message: true, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_address), nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -308,6 +318,7 @@ fn test_create_account_with_seed() { from: 0, sign_only: false, dump_transaction_message: true, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::FeeCalculator( blockhash_query::Source::NonceAccount(nonce_address), @@ -315,6 +326,7 @@ fn test_create_account_with_seed() { ), nonce_account: Some(nonce_address), nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 5cc6bce0dd..15d7e6ae83 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -58,6 +58,7 @@ fn test_cli_program_deploy_non_upgradeable() { use_deprecated_loader: false, allow_excessive_balance: false, }; + config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); let program_id_str = json @@ -187,6 +188,7 @@ fn test_cli_program_deploy_no_authority() { is_final: true, max_len: None, }); + config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); let program_id_str = json @@ -888,6 +890,7 @@ fn test_cli_program_set_buffer_authority() { buffer_authority_index: Some(0), new_buffer_authority: new_buffer_authority.pubkey(), }); + config.output_format = OutputFormat::JsonCompact; let response = process_command(&config); let json: Value = serde_json::from_str(&response.unwrap()).unwrap(); let new_buffer_authority_str = json diff --git a/cli/tests/stake.rs b/cli/tests/stake.rs index b80bf03f15..fa0e71edb6 100644 --- a/cli/tests/stake.rs +++ b/cli/tests/stake.rs @@ -20,7 +20,7 @@ use solana_sdk::{ }; use solana_stake_program::{ stake_instruction::LockupArgs, - stake_state::{Lockup, StakeAuthorize, StakeState, MIN_DELEGATE_STAKE_AMOUNT}, + stake_state::{Lockup, StakeAuthorize, StakeState}, }; #[test] @@ -38,13 +38,8 @@ fn test_stake_delegation_force() { config.json_rpc_url = test_validator.rpc_url(); config.signers = vec![&default_signer]; - request_and_confirm_airdrop( - &rpc_client, - &config, - &config.signers[0].pubkey(), - MIN_DELEGATE_STAKE_AMOUNT + 400 + 50_000, - ) - .unwrap(); + request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 100_000) + .unwrap(); // Create vote account let vote_keypair = Keypair::new(); @@ -56,6 +51,7 @@ fn test_stake_delegation_force() { authorized_voter: None, authorized_withdrawer: None, commission: 0, + memo: None, }; process_command(&config).unwrap(); @@ -68,12 +64,13 @@ fn test_stake_delegation_force() { staker: None, withdrawer: None, lockup: Lockup::default(), - amount: SpendAmount::Some(MIN_DELEGATE_STAKE_AMOUNT + 400), + amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; @@ -91,6 +88,7 @@ fn test_stake_delegation_force() { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }; process_command(&config).unwrap_err(); @@ -106,6 +104,7 @@ fn test_stake_delegation_force() { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }; process_command(&config).unwrap(); @@ -132,14 +131,10 @@ fn test_seed_stake_delegation_and_deactivation() { &rpc_client, &config_validator, &config_validator.signers[0].pubkey(), - MIN_DELEGATE_STAKE_AMOUNT + 1_000, + 100_000, ) .unwrap(); - check_recent_balance( - MIN_DELEGATE_STAKE_AMOUNT + 1_000, - &rpc_client, - &config_validator.signers[0].pubkey(), - ); + check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey()); let stake_address = Pubkey::create_with_seed( &config_validator.signers[0].pubkey(), @@ -156,12 +151,13 @@ fn test_seed_stake_delegation_and_deactivation() { staker: None, withdrawer: None, lockup: Lockup::default(), - amount: SpendAmount::Some(MIN_DELEGATE_STAKE_AMOUNT + 400), + amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; @@ -178,6 +174,7 @@ fn test_seed_stake_delegation_and_deactivation() { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }; process_command(&config_validator).unwrap(); @@ -191,6 +188,8 @@ fn test_seed_stake_delegation_and_deactivation() { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }; process_command(&config_validator).unwrap(); @@ -219,14 +218,10 @@ fn test_stake_delegation_and_deactivation() { &rpc_client, &config_validator, &config_validator.signers[0].pubkey(), - MIN_DELEGATE_STAKE_AMOUNT + 1_000, + 100_000, ) .unwrap(); - check_recent_balance( - MIN_DELEGATE_STAKE_AMOUNT + 1_000, - &rpc_client, - &config_validator.signers[0].pubkey(), - ); + check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey()); // Create stake account config_validator.signers.push(&stake_keypair); @@ -236,12 +231,13 @@ fn test_stake_delegation_and_deactivation() { staker: None, withdrawer: None, lockup: Lockup::default(), - amount: SpendAmount::Some(MIN_DELEGATE_STAKE_AMOUNT + 400), + amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; @@ -259,6 +255,7 @@ fn test_stake_delegation_and_deactivation() { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }; process_command(&config_validator).unwrap(); @@ -272,6 +269,8 @@ fn test_stake_delegation_and_deactivation() { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }; process_command(&config_validator).unwrap(); @@ -281,7 +280,6 @@ fn test_stake_delegation_and_deactivation() { fn test_offline_stake_delegation_and_deactivation() { solana_logger::setup(); - let aproximate_min_stake = MIN_DELEGATE_STAKE_AMOUNT + 400; let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); @@ -312,27 +310,19 @@ fn test_offline_stake_delegation_and_deactivation() { &rpc_client, &config_validator, &config_validator.signers[0].pubkey(), - aproximate_min_stake * 2, + 100_000, ) .unwrap(); - check_recent_balance( - aproximate_min_stake * 2, - &rpc_client, - &config_validator.signers[0].pubkey(), - ); + check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey()); request_and_confirm_airdrop( &rpc_client, &config_offline, &config_offline.signers[0].pubkey(), - aproximate_min_stake * 2, + 100_000, ) .unwrap(); - check_recent_balance( - aproximate_min_stake * 2, - &rpc_client, - &config_offline.signers[0].pubkey(), - ); + check_recent_balance(100_000, &rpc_client, &config_offline.signers[0].pubkey()); // Create stake account config_validator.signers.push(&stake_keypair); @@ -342,12 +332,13 @@ fn test_offline_stake_delegation_and_deactivation() { staker: Some(config_offline.signers[0].pubkey()), withdrawer: None, lockup: Lockup::default(), - amount: SpendAmount::Some(aproximate_min_stake), + amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; @@ -365,6 +356,7 @@ fn test_offline_stake_delegation_and_deactivation() { blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }; config_offline.output_format = OutputFormat::JsonCompact; @@ -385,6 +377,7 @@ fn test_offline_stake_delegation_and_deactivation() { blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }; process_command(&config_payer).unwrap(); @@ -399,6 +392,8 @@ fn test_offline_stake_delegation_and_deactivation() { blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }; let sig_response = process_command(&config_offline).unwrap(); @@ -416,6 +411,8 @@ fn test_offline_stake_delegation_and_deactivation() { blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), nonce_account: None, nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }; process_command(&config_payer).unwrap(); @@ -425,7 +422,6 @@ fn test_offline_stake_delegation_and_deactivation() { fn test_nonced_stake_delegation_and_deactivation() { solana_logger::setup(); - let aproximate_min_stake = MIN_DELEGATE_STAKE_AMOUNT + 400; let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); @@ -443,13 +439,8 @@ fn test_nonced_stake_delegation_and_deactivation() { .get_minimum_balance_for_rent_exemption(NonceState::size()) .unwrap(); - request_and_confirm_airdrop( - &rpc_client, - &config, - &config.signers[0].pubkey(), - aproximate_min_stake * 2, - ) - .unwrap(); + request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 100_000) + .unwrap(); // Create stake account let stake_keypair = Keypair::new(); @@ -460,13 +451,13 @@ fn test_nonced_stake_delegation_and_deactivation() { staker: None, withdrawer: None, lockup: Lockup::default(), - // Minumum plus add some lamports for rent - amount: SpendAmount::Some(aproximate_min_stake), + amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; @@ -479,6 +470,7 @@ fn test_nonced_stake_delegation_and_deactivation() { nonce_account: 1, seed: None, nonce_authority: Some(config.signers[0].pubkey()), + memo: None, amount: SpendAmount::Some(minimum_nonce_balance), }; process_command(&config).unwrap(); @@ -508,6 +500,7 @@ fn test_nonced_stake_delegation_and_deactivation() { ), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, + memo: None, fee_payer: 0, }; process_command(&config).unwrap(); @@ -534,6 +527,8 @@ fn test_nonced_stake_delegation_and_deactivation() { ), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }; process_command(&config).unwrap(); @@ -556,13 +551,8 @@ fn test_stake_authorize() { config.json_rpc_url = test_validator.rpc_url(); config.signers = vec![&default_signer]; - request_and_confirm_airdrop( - &rpc_client, - &config, - &config.signers[0].pubkey(), - MIN_DELEGATE_STAKE_AMOUNT + 1_000, - ) - .unwrap(); + request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 100_000) + .unwrap(); let offline_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); let mut config_offline = CliConfig::recent_for_tests(); @@ -577,7 +567,7 @@ fn test_stake_authorize() { &rpc_client, &config_offline, &config_offline.signers[0].pubkey(), - MIN_DELEGATE_STAKE_AMOUNT + 1_000, + 100_000, ) .unwrap(); @@ -591,12 +581,13 @@ fn test_stake_authorize() { staker: None, withdrawer: None, lockup: Lockup::default(), - amount: SpendAmount::Some(MIN_DELEGATE_STAKE_AMOUNT + 400), + amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; @@ -614,6 +605,7 @@ fn test_stake_authorize() { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }; @@ -643,6 +635,7 @@ fn test_stake_authorize() { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }; @@ -667,6 +660,7 @@ fn test_stake_authorize() { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }; @@ -691,6 +685,7 @@ fn test_stake_authorize() { blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }; @@ -708,6 +703,7 @@ fn test_stake_authorize() { blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }; @@ -730,6 +726,7 @@ fn test_stake_authorize() { nonce_account: 1, seed: None, nonce_authority: Some(offline_authority_pubkey), + memo: None, amount: SpendAmount::Some(minimum_nonce_balance), }; process_command(&config).unwrap(); @@ -756,6 +753,7 @@ fn test_stake_authorize() { blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }; @@ -777,6 +775,7 @@ fn test_stake_authorize() { ), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }; @@ -834,24 +833,14 @@ fn test_stake_authorize_with_fee_payer() { config_offline.command = CliCommand::ClusterVersion; process_command(&config_offline).unwrap_err(); - request_and_confirm_airdrop( - &rpc_client, - &config, - &default_pubkey, - MIN_DELEGATE_STAKE_AMOUNT + 10_000, - ) - .unwrap(); - check_recent_balance( - MIN_DELEGATE_STAKE_AMOUNT + 10_000, - &rpc_client, - &config.signers[0].pubkey(), - ); + request_and_confirm_airdrop(&rpc_client, &config, &default_pubkey, 100_000).unwrap(); + check_recent_balance(100_000, &rpc_client, &config.signers[0].pubkey()); - request_and_confirm_airdrop(&rpc_client, &config, &payer_pubkey, 50_000).unwrap(); - check_recent_balance(50_000, &rpc_client, &payer_pubkey); + request_and_confirm_airdrop(&rpc_client, &config_payer, &payer_pubkey, 100_000).unwrap(); + check_recent_balance(100_000, &rpc_client, &payer_pubkey); - request_and_confirm_airdrop(&rpc_client, &config, &offline_pubkey, 50_000).unwrap(); - check_recent_balance(50_000, &rpc_client, &offline_pubkey); + request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap(); + check_recent_balance(100_000, &rpc_client, &offline_pubkey); check_ready(&rpc_client); @@ -865,22 +854,19 @@ fn test_stake_authorize_with_fee_payer() { staker: None, withdrawer: None, lockup: Lockup::default(), - amount: SpendAmount::Some(MIN_DELEGATE_STAKE_AMOUNT + 400), + amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; process_command(&config).unwrap(); - // `config` balance should be 10_000 - 1 stake account sig - 1 fee sig - rent exempt (400) - check_recent_balance( - 10_000 - SIG_FEE - SIG_FEE - 400, - &rpc_client, - &default_pubkey, - ); + // `config` balance should be 50,000 - 1 stake account sig - 1 fee sig + check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey); // Assign authority with separate fee payer config.signers = vec![&default_signer, &payer_keypair]; @@ -892,15 +878,16 @@ fn test_stake_authorize_with_fee_payer() { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 1, custodian: None, }; process_command(&config).unwrap(); // `config` balance has not changed, despite submitting the TX - check_recent_balance(10_000 - 2 * SIG_FEE - 400, &rpc_client, &default_pubkey); + check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey); // `config_payer` however has paid `config`'s authority sig // and `config_payer`'s fee sig - check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &payer_pubkey); + check_recent_balance(100_000 - SIG_FEE - SIG_FEE, &rpc_client, &payer_pubkey); // Assign authority with offline fee payer let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap(); @@ -912,6 +899,7 @@ fn test_stake_authorize_with_fee_payer() { blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }; @@ -929,22 +917,22 @@ fn test_stake_authorize_with_fee_payer() { blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, custodian: None, }; process_command(&config).unwrap(); // `config`'s balance again has not changed - check_recent_balance(10_000 - 2 * SIG_FEE - 400, &rpc_client, &default_pubkey); + check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey); // `config_offline` however has paid 1 sig due to being both authority // and fee payer - check_recent_balance(50_000 - SIG_FEE, &rpc_client, &offline_pubkey); + check_recent_balance(100_000 - SIG_FEE, &rpc_client, &offline_pubkey); } #[test] fn test_stake_split() { solana_logger::setup(); - const SIG_FEE: u64 = 42; let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); @@ -967,43 +955,17 @@ fn test_stake_split() { config_offline.command = CliCommand::ClusterVersion; process_command(&config_offline).unwrap_err(); - // Create stake account, identity is authority - let minimum_stake_balance = MIN_DELEGATE_STAKE_AMOUNT - + rpc_client - .get_minimum_balance_for_rent_exemption(std::mem::size_of::()) - .unwrap(); - - // Create nonce account - let minimum_nonce_balance = rpc_client - .get_minimum_balance_for_rent_exemption(NonceState::size()) + request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 500_000) .unwrap(); + check_recent_balance(500_000, &rpc_client, &config.signers[0].pubkey()); - let minimum_stake_balance_request = minimum_stake_balance + 2 * SIG_FEE; - - let minimum_stake_request_with_nonce = minimum_stake_balance_request + minimum_nonce_balance; - - request_and_confirm_airdrop( - &rpc_client, - &config, - &config.signers[0].pubkey(), - minimum_stake_request_with_nonce, - ) - .unwrap(); - check_recent_balance( - minimum_stake_request_with_nonce, - &rpc_client, - &config.signers[0].pubkey(), - ); - - request_and_confirm_airdrop( - &rpc_client, - &config, - &offline_pubkey, - minimum_stake_balance_request, - ) - .unwrap(); - check_recent_balance(minimum_stake_balance_request, &rpc_client, &offline_pubkey); + request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap(); + check_recent_balance(100_000, &rpc_client, &offline_pubkey); + // Create stake account, identity is authority + let minimum_stake_balance = rpc_client + .get_minimum_balance_for_rent_exemption(std::mem::size_of::()) + .unwrap(); let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); let stake_account_pubkey = stake_keypair.pubkey(); config.signers.push(&stake_keypair); @@ -1013,24 +975,34 @@ fn test_stake_split() { staker: Some(offline_pubkey), withdrawer: Some(offline_pubkey), lockup: Lockup::default(), - amount: SpendAmount::Some(minimum_stake_balance), + amount: SpendAmount::Some(10 * minimum_stake_balance), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; process_command(&config).unwrap(); - check_recent_balance(minimum_stake_balance, &rpc_client, &stake_account_pubkey); + check_recent_balance( + 10 * minimum_stake_balance, + &rpc_client, + &stake_account_pubkey, + ); + // Create nonce account + let minimum_nonce_balance = rpc_client + .get_minimum_balance_for_rent_exemption(NonceState::size()) + .unwrap(); let nonce_account = keypair_from_seed(&[1u8; 32]).unwrap(); config.signers = vec![&default_signer, &nonce_account]; config.command = CliCommand::CreateNonceAccount { nonce_account: 1, seed: None, nonce_authority: Some(offline_pubkey), + memo: None, amount: SpendAmount::Some(minimum_nonce_balance), }; process_command(&config).unwrap(); @@ -1049,7 +1021,6 @@ fn test_stake_split() { // Nonced offline split let split_account = keypair_from_seed(&[2u8; 32]).unwrap(); check_recent_balance(0, &rpc_client, &split_account.pubkey()); - config_offline.signers.push(&split_account); config_offline.command = CliCommand::SplitStake { stake_account_pubkey, @@ -1059,9 +1030,10 @@ fn test_stake_split() { blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, + memo: None, split_stake_account: 1, seed: None, - lamports: minimum_stake_balance, + lamports: 2 * minimum_stake_balance, fee_payer: 0, }; config_offline.output_format = OutputFormat::JsonCompact; @@ -1069,10 +1041,6 @@ fn test_stake_split() { let sign_only = parse_sign_only_reply_string(&sig_response); assert!(sign_only.has_all_signers()); let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap(); - - check_recent_balance(minimum_stake_balance_request, &rpc_client, &offline_pubkey); - check_recent_balance(minimum_stake_balance, &rpc_client, &stake_account_pubkey); - config.signers = vec![&offline_presigner, &split_account]; config.command = CliCommand::SplitStake { stake_account_pubkey, @@ -1085,21 +1053,29 @@ fn test_stake_split() { ), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, + memo: None, split_stake_account: 1, seed: None, - lamports: minimum_stake_balance, + lamports: 2 * minimum_stake_balance, fee_payer: 0, }; process_command(&config).unwrap(); - check_recent_balance(0, &rpc_client, &stake_account_pubkey); - check_recent_balance(minimum_stake_balance, &rpc_client, &split_account.pubkey()); + check_recent_balance( + 8 * minimum_stake_balance, + &rpc_client, + &stake_account_pubkey, + ); + check_recent_balance( + 2 * minimum_stake_balance, + &rpc_client, + &split_account.pubkey(), + ); } #[test] fn test_stake_set_lockup() { solana_logger::setup(); - let aproximate_min_stake = MIN_DELEGATE_STAKE_AMOUNT + 400; let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let faucet_addr = run_local_faucet(mint_keypair, None); @@ -1122,27 +1098,17 @@ fn test_stake_set_lockup() { config_offline.command = CliCommand::ClusterVersion; process_command(&config_offline).unwrap_err(); - request_and_confirm_airdrop( - &rpc_client, - &config, - &config.signers[0].pubkey(), - 20 * aproximate_min_stake, - ) - .unwrap(); - check_recent_balance( - 20 * aproximate_min_stake, - &rpc_client, - &config.signers[0].pubkey(), - ); + request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 500_000) + .unwrap(); + check_recent_balance(500_000, &rpc_client, &config.signers[0].pubkey()); request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap(); check_recent_balance(100_000, &rpc_client, &offline_pubkey); // Create stake account, identity is authority - let minimum_stake_balance = aproximate_min_stake - + rpc_client - .get_minimum_balance_for_rent_exemption(std::mem::size_of::()) - .unwrap(); + let minimum_stake_balance = rpc_client + .get_minimum_balance_for_rent_exemption(std::mem::size_of::()) + .unwrap(); let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap(); let stake_account_pubkey = stake_keypair.pubkey(); @@ -1157,7 +1123,7 @@ fn test_stake_set_lockup() { stake_account: 1, seed: None, staker: Some(offline_pubkey), - withdrawer: Some(offline_pubkey), + withdrawer: Some(config.signers[0].pubkey()), lockup, amount: SpendAmount::Some(10 * minimum_stake_balance), sign_only: false, @@ -1165,6 +1131,7 @@ fn test_stake_set_lockup() { blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; @@ -1191,6 +1158,7 @@ fn test_stake_set_lockup() { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }; process_command(&config).unwrap(); @@ -1225,6 +1193,7 @@ fn test_stake_set_lockup() { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }; process_command(&config).unwrap(); @@ -1244,6 +1213,7 @@ fn test_stake_set_lockup() { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }; process_command(&config).unwrap(); @@ -1275,6 +1245,7 @@ fn test_stake_set_lockup() { blockhash_query: BlockhashQuery::default(), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, }; process_command(&config).unwrap(); @@ -1290,6 +1261,7 @@ fn test_stake_set_lockup() { nonce_account: 1, seed: None, nonce_authority: Some(offline_pubkey), + memo: None, amount: SpendAmount::Some(minimum_nonce_balance), }; process_command(&config).unwrap(); @@ -1320,6 +1292,7 @@ fn test_stake_set_lockup() { blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_account_pubkey), nonce_authority: 0, + memo: None, fee_payer: 0, }; config_offline.output_format = OutputFormat::JsonCompact; @@ -1340,6 +1313,7 @@ fn test_stake_set_lockup() { ), nonce_account: Some(nonce_account_pubkey), nonce_authority: 0, + memo: None, fee_payer: 0, }; process_command(&config).unwrap(); @@ -1386,18 +1360,8 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { .unwrap(); check_recent_balance(200_000, &rpc_client, &config.signers[0].pubkey()); - request_and_confirm_airdrop( - &rpc_client, - &config, - &offline_pubkey, - 2 * MIN_DELEGATE_STAKE_AMOUNT + 100_000, - ) - .unwrap(); - check_recent_balance( - 2 * MIN_DELEGATE_STAKE_AMOUNT + 100_000, - &rpc_client, - &offline_pubkey, - ); + request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap(); + check_recent_balance(100_000, &rpc_client, &offline_pubkey); // Create nonce account let minimum_nonce_balance = rpc_client @@ -1410,6 +1374,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { nonce_account: 1, seed: None, nonce_authority: Some(offline_pubkey), + memo: None, amount: SpendAmount::Some(minimum_nonce_balance), }; process_command(&config).unwrap(); @@ -1434,12 +1399,13 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { staker: None, withdrawer: None, lockup: Lockup::default(), - amount: SpendAmount::Some(MIN_DELEGATE_STAKE_AMOUNT + 400), + amount: SpendAmount::Some(50_000), sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_pubkey), nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; @@ -1456,7 +1422,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { staker: Some(offline_pubkey), withdrawer: None, lockup: Lockup::default(), - amount: SpendAmount::Some(MIN_DELEGATE_STAKE_AMOUNT + 400), + amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( @@ -1465,11 +1431,12 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { ), nonce_account: Some(nonce_pubkey), nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; process_command(&config).unwrap(); - check_recent_balance(MIN_DELEGATE_STAKE_AMOUNT + 400, &rpc_client, &stake_pubkey); + check_recent_balance(50_000, &rpc_client, &stake_pubkey); // Fetch nonce hash let nonce_hash = nonce_utils::get_account_with_commitment( @@ -1488,7 +1455,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { config_offline.command = CliCommand::WithdrawStake { stake_account_pubkey: stake_pubkey, destination_account_pubkey: recipient_pubkey, - lamports: 42, + amount: SpendAmount::Some(42), withdraw_authority: 0, custodian: None, sign_only: true, @@ -1496,6 +1463,8 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_pubkey), nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }; let sig_response = process_command(&config_offline).unwrap(); @@ -1505,7 +1474,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { config.command = CliCommand::WithdrawStake { stake_account_pubkey: stake_pubkey, destination_account_pubkey: recipient_pubkey, - lamports: 42, + amount: SpendAmount::Some(42), withdraw_authority: 0, custodian: None, sign_only: false, @@ -1516,6 +1485,8 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { ), nonce_account: Some(nonce_pubkey), nonce_authority: 0, + memo: None, + seed: None, fee_payer: 0, }; process_command(&config).unwrap(); @@ -1540,12 +1511,13 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { staker: None, withdrawer: None, lockup: Lockup::default(), - amount: SpendAmount::Some(MIN_DELEGATE_STAKE_AMOUNT + 400), + amount: SpendAmount::Some(50_000), sign_only: true, dump_transaction_message: false, blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_pubkey), nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; @@ -1560,7 +1532,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { staker: Some(offline_pubkey), withdrawer: Some(offline_pubkey), lockup: Lockup::default(), - amount: SpendAmount::Some(MIN_DELEGATE_STAKE_AMOUNT + 400), + amount: SpendAmount::Some(50_000), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::FeeCalculator( @@ -1569,11 +1541,12 @@ fn test_offline_nonced_create_stake_account_and_withdraw() { ), nonce_account: Some(nonce_pubkey), nonce_authority: 0, + memo: None, fee_payer: 0, from: 0, }; process_command(&config).unwrap(); let seed_address = Pubkey::create_with_seed(&stake_pubkey, seed, &solana_stake_program::id()).unwrap(); - check_recent_balance(MIN_DELEGATE_STAKE_AMOUNT + 400, &rpc_client, &seed_address); + check_recent_balance(50_000, &rpc_client, &seed_address); } diff --git a/cli/tests/transfer.rs b/cli/tests/transfer.rs index 1fdd341234..3d97158830 100644 --- a/cli/tests/transfer.rs +++ b/cli/tests/transfer.rs @@ -52,10 +52,12 @@ fn test_transfer() { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -71,10 +73,12 @@ fn test_transfer() { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -102,10 +106,12 @@ fn test_transfer() { from: 0, sign_only: true, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -122,10 +128,12 @@ fn test_transfer() { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -144,6 +152,7 @@ fn test_transfer() { nonce_account: 1, seed: None, nonce_authority: None, + memo: None, amount: SpendAmount::Some(minimum_nonce_balance), }; process_command(&config).unwrap(); @@ -167,6 +176,7 @@ fn test_transfer() { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::FeeCalculator( blockhash_query::Source::NonceAccount(nonce_account.pubkey()), @@ -174,6 +184,7 @@ fn test_transfer() { ), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -196,6 +207,7 @@ fn test_transfer() { config.command = CliCommand::AuthorizeNonceAccount { nonce_account: nonce_account.pubkey(), nonce_authority: 0, + memo: None, new_authority: offline_pubkey, }; process_command(&config).unwrap(); @@ -219,10 +231,12 @@ fn test_transfer() { from: 0, sign_only: true, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::None(nonce_hash), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -238,6 +252,7 @@ fn test_transfer() { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::FeeCalculator( blockhash_query::Source::NonceAccount(nonce_account.pubkey()), @@ -245,6 +260,7 @@ fn test_transfer() { ), nonce_account: Some(nonce_account.pubkey()), nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -305,10 +321,12 @@ fn test_transfer_multisession_signing() { from: 1, sign_only: true, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -334,10 +352,12 @@ fn test_transfer_multisession_signing() { from: 1, sign_only: true, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::None(blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -360,10 +380,12 @@ fn test_transfer_multisession_signing() { from: 1, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -408,10 +430,12 @@ fn test_transfer_all() { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -421,6 +445,54 @@ fn test_transfer_all() { check_recent_balance(49_999, &rpc_client, &recipient_pubkey); } +#[test] +fn test_transfer_unfunded_recipient() { + solana_logger::setup(); + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr)); + + let rpc_client = + RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); + + let default_signer = Keypair::new(); + + let mut config = CliConfig::recent_for_tests(); + config.json_rpc_url = test_validator.rpc_url(); + config.signers = vec![&default_signer]; + + let sender_pubkey = config.signers[0].pubkey(); + let recipient_pubkey = Pubkey::new(&[1u8; 32]); + + request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 50_000).unwrap(); + check_recent_balance(50_000, &rpc_client, &sender_pubkey); + check_recent_balance(0, &rpc_client, &recipient_pubkey); + + check_ready(&rpc_client); + + // Plain ole transfer + config.command = CliCommand::Transfer { + amount: SpendAmount::All, + to: recipient_pubkey, + from: 0, + sign_only: false, + dump_transaction_message: false, + allow_unfunded_recipient: false, + no_wait: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + derived_address_seed: None, + derived_address_program_id: None, + }; + + // Expect failure due to unfunded recipient and the lack of the `allow_unfunded_recipient` flag + process_command(&config).unwrap_err(); +} + #[test] fn test_transfer_with_seed() { solana_logger::setup(); @@ -464,10 +536,12 @@ fn test_transfer_with_seed() { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: Some(derived_address_seed), derived_address_program_id: Some(derived_address_program_id), diff --git a/cli/tests/vote.rs b/cli/tests/vote.rs index 887c5801df..64c67a1d0b 100644 --- a/cli/tests/vote.rs +++ b/cli/tests/vote.rs @@ -45,6 +45,7 @@ fn test_vote_authorize_and_withdraw() { authorized_voter: None, authorized_withdrawer: Some(config.signers[0].pubkey()), commission: 0, + memo: None, }; process_command(&config).unwrap(); let vote_account = rpc_client @@ -67,10 +68,12 @@ fn test_vote_authorize_and_withdraw() { from: 0, sign_only: false, dump_transaction_message: false, + allow_unfunded_recipient: true, no_wait: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), nonce_account: None, nonce_authority: 0, + memo: None, fee_payer: 0, derived_address_seed: None, derived_address_program_id: None, @@ -86,6 +89,7 @@ fn test_vote_authorize_and_withdraw() { vote_account_pubkey, new_authorized_pubkey: withdraw_authority.pubkey(), vote_authorize: VoteAuthorize::Withdrawer, + memo: None, }; process_command(&config).unwrap(); let vote_account = rpc_client @@ -103,6 +107,7 @@ fn test_vote_authorize_and_withdraw() { withdraw_authority: 1, withdraw_amount: SpendAmount::Some(100), destination_account_pubkey: destination_account, + memo: None, }; process_command(&config).unwrap(); check_recent_balance(expected_balance - 100, &rpc_client, &vote_account_pubkey); @@ -115,6 +120,7 @@ fn test_vote_authorize_and_withdraw() { vote_account_pubkey, new_identity_account: 2, withdraw_authority: 1, + memo: None, }; process_command(&config).unwrap(); } diff --git a/client/Cargo.toml b/client/Cargo.toml index a7176090b1..b19db952fe 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-client" -version = "1.5.19" +version = "1.6.14" description = "Solana Client" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -15,25 +15,26 @@ bincode = "1.3.1" bs58 = "0.3.1" clap = "2.33.0" indicatif = "0.15.0" -jsonrpc-core = "15.0.0" +jsonrpc-core = "17.0.0" log = "0.4.11" net2 = "0.2.37" -rayon = "1.4.0" -reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] } +rayon = "1.5.0" +reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] } semver = "0.11.0" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" serde_json = "1.0.56" -solana-account-decoder = { path = "../account-decoder", version = "=1.5.19" } -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-net-utils = { path = "../net-utils", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-transaction-status = { path = "../transaction-status", version = "=1.5.19" } -solana-version = { path = "../version" } -solana-vote-program = { path = "../programs/vote", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake" } -solana-faucet = { path = "../faucet", version = "=1.5.19" } +solana-account-decoder = { path = "../account-decoder", version = "=1.6.14" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-faucet = { path = "../faucet", version = "=1.6.14" } +solana-net-utils = { path = "../net-utils", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-transaction-status = { path = "../transaction-status", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } +solana-vote-program = { path = "../programs/vote", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } thiserror = "1.0" +tokio = { version = "1", features = ["full"] } tungstenite = "0.10.1" url = "2.1.1" evm-state = { path = "../evm-utils/evm-state" } @@ -42,9 +43,9 @@ derivative = "2.2" [dev-dependencies] assert_matches = "1.3.0" -jsonrpc-core = "15.0.0" -jsonrpc-http-server = "15.0.0" -solana-logger = { path = "../logger", version = "=1.5.19" } +jsonrpc-core = "17.0.0" +jsonrpc-http-server = "17.0.0" +solana-logger = { path = "../logger", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/src/blockhash_query.rs b/client/src/blockhash_query.rs index 1cf40056a2..ced886b04b 100644 --- a/client/src/blockhash_query.rs +++ b/client/src/blockhash_query.rs @@ -1,12 +1,15 @@ -use crate::{nonce_utils, rpc_client::RpcClient}; -use clap::ArgMatches; -use solana_clap_utils::{ - input_parsers::{pubkey_of, value_of}, - nonce::*, - offline::*, -}; -use solana_sdk::{ - commitment_config::CommitmentConfig, fee_calculator::FeeCalculator, hash::Hash, pubkey::Pubkey, +use { + crate::{nonce_utils, rpc_client::RpcClient}, + clap::ArgMatches, + solana_clap_utils::{ + input_parsers::{pubkey_of, value_of}, + nonce::*, + offline::*, + }, + solana_sdk::{ + commitment_config::CommitmentConfig, fee_calculator::FeeCalculator, hash::Hash, + pubkey::Pubkey, + }, }; #[derive(Debug, PartialEq)] @@ -358,7 +361,7 @@ mod tests { let nonce_pubkey = Pubkey::new(&[4u8; 32]); let rpc_nonce_account = UiAccount::encode( &nonce_pubkey, - nonce_account, + &nonce_account, UiAccountEncoding::Base64, None, None, diff --git a/client/src/client_error.rs b/client/src/client_error.rs index 9c2a0f2988..fec5d047c3 100644 --- a/client/src/client_error.rs +++ b/client/src/client_error.rs @@ -1,10 +1,12 @@ -use crate::rpc_request; -use solana_faucet::faucet::FaucetError; -use solana_sdk::{ - signature::SignerError, transaction::TransactionError, transport::TransportError, +use { + crate::rpc_request, + solana_faucet::faucet::FaucetError, + solana_sdk::{ + signature::SignerError, transaction::TransactionError, transport::TransportError, + }, + std::io, + thiserror::Error, }; -use std::io; -use thiserror::Error; pub use reqwest; // export `reqwest` for clients diff --git a/client/src/http_sender.rs b/client/src/http_sender.rs index 982c085a3f..88f45ecde4 100644 --- a/client/src/http_sender.rs +++ b/client/src/http_sender.rs @@ -1,17 +1,31 @@ -use crate::{ - client_error::Result, - rpc_custom_error, - rpc_request::{RpcError, RpcRequest, RpcResponseErrorData}, - rpc_response::RpcSimulateTransactionResult, - rpc_sender::RpcSender, +use { + crate::{ + client_error::Result, + rpc_custom_error, + rpc_request::{RpcError, RpcRequest, RpcResponseErrorData}, + rpc_response::RpcSimulateTransactionResult, + rpc_sender::RpcSender, + }, + log::*, + reqwest::{ + self, + header::{CONTENT_TYPE, RETRY_AFTER}, + StatusCode, + }, + std::{ + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + thread::sleep, + time::Duration, + }, }; -use log::*; -use reqwest::{self, header::CONTENT_TYPE, StatusCode}; -use std::{thread::sleep, time::Duration}; pub struct HttpSender { - client: reqwest::blocking::Client, + client: Arc, url: String, + request_id: AtomicU64, } impl HttpSender { @@ -20,12 +34,22 @@ impl HttpSender { } pub fn new_with_timeout(url: String, timeout: Duration) -> Self { - let client = reqwest::blocking::Client::builder() - .timeout(timeout) - .build() - .expect("build rpc client"); + // `reqwest::blocking::Client` panics if run in a tokio async context. Shuttle the + // request to a different tokio thread to avoid this + let client = Arc::new( + tokio::task::block_in_place(move || { + reqwest::blocking::Client::builder() + .timeout(timeout) + .build() + }) + .expect("build rpc client"), + ); - Self { client, url } + Self { + client, + url, + request_id: AtomicU64::new(0), + } } } @@ -38,39 +62,57 @@ struct RpcErrorObject { impl RpcSender for HttpSender { fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result { - // Concurrent requests are not supported so reuse the same request id for all requests - let request_id = 1; - - let request_json = request.build_request_json(request_id, params); + let request_id = self.request_id.fetch_add(1, Ordering::Relaxed); + let request_json = request.build_request_json(request_id, params).to_string(); let mut too_many_requests_retries = 5; loop { - match self - .client - .post(&self.url) - .header(CONTENT_TYPE, "application/json") - .body(request_json.to_string()) - .send() - { + // `reqwest::blocking::Client` panics if run in a tokio async context. Shuttle the + // request to a different tokio thread to avoid this + let response = { + let client = self.client.clone(); + let request_json = request_json.clone(); + tokio::task::block_in_place(move || { + client + .post(&self.url) + .header(CONTENT_TYPE, "application/json") + .body(request_json) + .send() + }) + }; + + match response { Ok(response) => { if !response.status().is_success() { if response.status() == StatusCode::TOO_MANY_REQUESTS && too_many_requests_retries > 0 { + let mut duration = Duration::from_millis(500); + if let Some(retry_after) = response.headers().get(RETRY_AFTER) { + if let Ok(retry_after) = retry_after.to_str() { + if let Ok(retry_after) = retry_after.parse::() { + if retry_after < 120 { + duration = Duration::from_secs(retry_after); + } + } + } + } + too_many_requests_retries -= 1; debug!( - "Server responded with {:?}, {} retries left", - response, too_many_requests_retries + "Too many requests: server responded with {:?}, {} retries left, pausing for {:?}", + response, too_many_requests_retries, duration ); - // Sleep for 500ms to give the server a break - sleep(Duration::from_millis(500)); + sleep(duration); continue; } return Err(response.error_for_status().unwrap_err().into()); } - let json: serde_json::Value = serde_json::from_str(&response.text()?)?; + let response_text = tokio::task::block_in_place(move || response.text())?; + + let json: serde_json::Value = serde_json::from_str(&response_text)?; if json["error"].is_object() { return match serde_json::from_value::(json["error"].clone()) { @@ -121,3 +163,22 @@ impl RpcSender for HttpSender { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test(flavor = "multi_thread")] + async fn http_sender_on_tokio_multi_thread() { + let http_sender = HttpSender::new("http://localhost:1234".to_string()); + let _ = http_sender.send(RpcRequest::GetVersion, serde_json::Value::Null); + } + + #[tokio::test(flavor = "current_thread")] + #[should_panic(expected = "can call blocking only when running on the multi-threaded runtime")] + async fn http_sender_ontokio_current_thread_should_panic() { + // RpcClient::new() will panic in the tokio current-thread runtime due to `tokio::task::block_in_place()` usage, and there + // doesn't seem to be a way to detect whether the tokio runtime is multi_thread or current_thread... + let _ = HttpSender::new("http://localhost:1234".to_string()); + } +} diff --git a/client/src/lib.rs b/client/src/lib.rs index 119c46c134..a4f5f69d9c 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -18,3 +18,4 @@ pub mod rpc_request; pub mod rpc_response; pub mod rpc_sender; pub mod thin_client; +pub mod tpu_client; diff --git a/client/src/mock_sender.rs b/client/src/mock_sender.rs index d523bd4a3b..81995b0bc4 100644 --- a/client/src/mock_sender.rs +++ b/client/src/mock_sender.rs @@ -1,21 +1,23 @@ -use crate::{ - client_error::Result, - rpc_request::RpcRequest, - rpc_response::{Response, RpcResponseContext, RpcVersionInfo}, - rpc_sender::RpcSender, +use { + crate::{ + client_error::Result, + rpc_request::RpcRequest, + rpc_response::{Response, RpcResponseContext, RpcVersionInfo}, + rpc_sender::RpcSender, + }, + serde_json::{json, Number, Value}, + solana_sdk::{ + epoch_info::EpochInfo, + fee_calculator::{FeeCalculator, FeeRateGovernor}, + instruction::InstructionError, + signature::Signature, + transaction::{self, Transaction, TransactionError}, + }, + solana_stake_program::stake_state::MIN_DELEGATE_STAKE_AMOUNT, + solana_transaction_status::{TransactionConfirmationStatus, TransactionStatus}, + solana_version::Version, + std::{collections::HashMap, sync::RwLock}, }; -use serde_json::{json, Number, Value}; -use solana_sdk::{ - epoch_info::EpochInfo, - fee_calculator::{FeeCalculator, FeeRateGovernor}, - instruction::InstructionError, - signature::Signature, - transaction::{self, Transaction, TransactionError}, -}; -use solana_stake_program::stake_state::MIN_DELEGATE_STAKE_AMOUNT; -use solana_transaction_status::{TransactionConfirmationStatus, TransactionStatus}; -use solana_version::Version; -use std::{collections::HashMap, sync::RwLock}; pub const PUBKEY: &str = "7RoSF9fUmdphVCpabEoefH81WwrW7orsWonXWqTXkKV8"; pub const SIGNATURE: &str = @@ -123,6 +125,7 @@ impl RpcSender for MockSender { } RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)), RpcRequest::GetSlot => Value::Number(Number::from(0)), + RpcRequest::GetMaxShredInsertSlot => Value::Number(Number::from(0)), RpcRequest::RequestAirdrop => Value::String(Signature::new(&[8; 64]).to_string()), RpcRequest::SendTransaction => { let signature = if self.url == "malicious" { diff --git a/client/src/nonce_utils.rs b/client/src/nonce_utils.rs index 310087867a..de2cf41286 100644 --- a/client/src/nonce_utils.rs +++ b/client/src/nonce_utils.rs @@ -1,14 +1,16 @@ -use crate::rpc_client::RpcClient; -use solana_sdk::{ - account::Account, - account_utils::StateMut, - commitment_config::CommitmentConfig, - nonce::{ - state::{Data, Versions}, - State, +use { + crate::rpc_client::RpcClient, + solana_sdk::{ + account::{Account, ReadableAccount}, + account_utils::StateMut, + commitment_config::CommitmentConfig, + nonce::{ + state::{Data, Versions}, + State, + }, + pubkey::Pubkey, + system_program, }, - pubkey::Pubkey, - system_program, }; #[derive(Debug, thiserror::Error, PartialEq)] @@ -49,24 +51,28 @@ pub fn get_account_with_commitment( .and_then(|a| account_identity_ok(&a).map(|()| a)) } -pub fn account_identity_ok(account: &Account) -> Result<(), Error> { - if account.owner != system_program::id() { +pub fn account_identity_ok(account: &T) -> Result<(), Error> { + if account.owner() != &system_program::id() { Err(Error::InvalidAccountOwner) - } else if account.data.is_empty() { + } else if account.data().is_empty() { Err(Error::UnexpectedDataSize) } else { Ok(()) } } -pub fn state_from_account(account: &Account) -> Result { +pub fn state_from_account>( + account: &T, +) -> Result { account_identity_ok(account)?; StateMut::::state(account) .map_err(|_| Error::InvalidAccountData) .map(|v| v.convert_to_current()) } -pub fn data_from_account(account: &Account) -> Result { +pub fn data_from_account>( + account: &T, +) -> Result { account_identity_ok(account)?; state_from_account(account).and_then(|ref s| data_from_state(s).map(|d| d.clone())) } diff --git a/client/src/perf_utils.rs b/client/src/perf_utils.rs index 2ec7dd4b21..a22451b4b7 100644 --- a/client/src/perf_utils.rs +++ b/client/src/perf_utils.rs @@ -1,12 +1,14 @@ -use log::*; -use solana_sdk::{client::Client, commitment_config::CommitmentConfig, timing::duration_as_s}; -use std::{ - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, RwLock, +use { + log::*, + solana_sdk::{client::Client, commitment_config::CommitmentConfig, timing::duration_as_s}, + std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, + thread::sleep, + time::{Duration, Instant}, }, - thread::sleep, - time::{Duration, Instant}, }; #[derive(Default)] diff --git a/client/src/pubsub_client.rs b/client/src/pubsub_client.rs index fada6e6591..d3dd63bcc3 100644 --- a/client/src/pubsub_client.rs +++ b/client/src/pubsub_client.rs @@ -1,27 +1,33 @@ -use crate::{ - rpc_config::{RpcSignatureSubscribeConfig, RpcTransactionLogsConfig, RpcTransactionLogsFilter}, - rpc_response::{Response as RpcResponse, RpcLogsResponse, RpcSignatureResult, SlotInfo}, -}; -use log::*; -use serde::de::DeserializeOwned; -use serde_json::{ - json, - value::Value::{Number, Object}, - Map, Value, -}; -use solana_sdk::signature::Signature; -use std::{ - marker::PhantomData, - sync::{ - atomic::{AtomicBool, Ordering}, - mpsc::{channel, Receiver}, - Arc, RwLock, +use { + crate::{ + rpc_config::{ + RpcSignatureSubscribeConfig, RpcTransactionLogsConfig, RpcTransactionLogsFilter, + }, + rpc_response::{ + Response as RpcResponse, RpcLogsResponse, RpcSignatureResult, SlotInfo, SlotUpdate, + }, + }, + log::*, + serde::de::DeserializeOwned, + serde_json::{ + json, + value::Value::{Number, Object}, + Map, Value, }, - thread::JoinHandle, + solana_sdk::signature::Signature, + std::{ + marker::PhantomData, + sync::{ + atomic::{AtomicBool, Ordering}, + mpsc::{channel, Receiver}, + Arc, RwLock, + }, + thread::JoinHandle, + }, + thiserror::Error, + tungstenite::{client::AutoStream, connect, Message, WebSocket}, + url::{ParseError, Url}, }; -use thiserror::Error; -use tungstenite::{client::AutoStream, connect, Message, WebSocket}; -use url::{ParseError, Url}; #[derive(Debug, Error)] pub enum PubsubClientError { @@ -336,6 +342,54 @@ impl PubsubClient { Ok((result, receiver)) } + + pub fn slot_updates_subscribe( + url: &str, + handler: impl Fn(SlotUpdate) + Send + 'static, + ) -> Result, PubsubClientError> { + let url = Url::parse(url)?; + let (socket, _response) = connect(url)?; + + let socket = Arc::new(RwLock::new(socket)); + let exit = Arc::new(AtomicBool::new(false)); + let exit_clone = exit.clone(); + let subscription_id = PubsubClientSubscription::::send_subscribe( + &socket, + json!({ + "jsonrpc":"2.0","id":1,"method":"slotsUpdatesSubscribe","params":[] + }) + .to_string(), + )?; + + let t_cleanup = { + let socket = socket.clone(); + std::thread::spawn(move || { + loop { + if exit_clone.load(Ordering::Relaxed) { + break; + } + match PubsubClientSubscription::read_message(&socket) { + Ok(message) => handler(message), + Err(err) => { + info!("receive error: {:?}", err); + break; + } + } + } + + info!("websocket - exited receive loop"); + }) + }; + + Ok(PubsubClientSubscription { + message_type: PhantomData, + operation: "slotsUpdates", + socket, + subscription_id, + t_cleanup: Some(t_cleanup), + exit, + }) + } } #[cfg(test)] diff --git a/client/src/rpc_cache.rs b/client/src/rpc_cache.rs index 2e2a1a12d6..38dbba5825 100644 --- a/client/src/rpc_cache.rs +++ b/client/src/rpc_cache.rs @@ -1,7 +1,9 @@ -use crate::{rpc_config::RpcLargestAccountsFilter, rpc_response::RpcAccountBalance}; -use std::{ - collections::HashMap, - time::{Duration, SystemTime}, +use { + crate::{rpc_config::RpcLargestAccountsFilter, rpc_response::RpcAccountBalance}, + std::{ + collections::HashMap, + time::{Duration, SystemTime}, + }, }; #[derive(Debug, Clone)] diff --git a/client/src/rpc_client.rs b/client/src/rpc_client.rs index 95a196596a..b14a7e831f 100644 --- a/client/src/rpc_client.rs +++ b/client/src/rpc_client.rs @@ -1,53 +1,47 @@ -use crate::{ - client_error::{ClientError, ClientErrorKind, Result as ClientResult}, - http_sender::HttpSender, - mock_sender::{MockSender, Mocks}, - rpc_config::RpcAccountInfoConfig, - rpc_config::{ - RpcConfirmedBlockConfig, RpcConfirmedTransactionConfig, RpcEpochConfig, - RpcGetConfirmedSignaturesForAddress2Config, RpcLargestAccountsConfig, - RpcProgramAccountsConfig, RpcRequestAirdropConfig, RpcSendTransactionConfig, - RpcSimulateTransactionConfig, RpcTokenAccountsFilter, +use { + crate::{ + client_error::{ClientError, ClientErrorKind, Result as ClientResult}, + http_sender::HttpSender, + mock_sender::{MockSender, Mocks}, + rpc_config::RpcAccountInfoConfig, + rpc_config::*, + rpc_request::{RpcError, RpcRequest, RpcResponseErrorData, TokenAccountsFilter}, + rpc_response::*, + rpc_sender::RpcSender, }, - rpc_request::{RpcError, RpcRequest, RpcResponseErrorData, TokenAccountsFilter}, - rpc_response::*, - rpc_sender::RpcSender, -}; -use bincode::serialize; -use indicatif::{ProgressBar, ProgressStyle}; -use log::*; -use serde_json::{json, Value}; -use solana_account_decoder::{ - parse_token::{TokenAccountType, UiTokenAccount, UiTokenAmount}, - UiAccount, UiAccountData, UiAccountEncoding, -}; -use solana_sdk::{ - account::Account, - clock::{ - Epoch, Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, - MAX_HASH_AGE_IN_SECONDS, + bincode::serialize, + indicatif::{ProgressBar, ProgressStyle}, + log::*, + serde_json::{json, Value}, + solana_account_decoder::{ + parse_token::{TokenAccountType, UiTokenAccount, UiTokenAmount}, + UiAccount, UiAccountData, UiAccountEncoding, + }, + solana_sdk::{ + account::Account, + clock::{Epoch, Slot, UnixTimestamp, DEFAULT_MS_PER_SLOT, MAX_HASH_AGE_IN_SECONDS}, + commitment_config::{CommitmentConfig, CommitmentLevel}, + epoch_info::EpochInfo, + epoch_schedule::EpochSchedule, + fee_calculator::{FeeCalculator, FeeRateGovernor}, + hash::Hash, + pubkey::Pubkey, + signature::Signature, + transaction::{self, uses_durable_nonce, Transaction}, + }, + solana_transaction_status::{ + EncodedConfirmedBlock, EncodedConfirmedTransaction, TransactionStatus, UiConfirmedBlock, + UiTransactionEncoding, + }, + solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY, + std::{ + cmp::min, + net::SocketAddr, + str::FromStr, + sync::RwLock, + thread::sleep, + time::{Duration, Instant}, }, - commitment_config::{CommitmentConfig, CommitmentLevel}, - epoch_info::EpochInfo, - epoch_schedule::EpochSchedule, - fee_calculator::{FeeCalculator, FeeRateGovernor}, - hash::Hash, - pubkey::Pubkey, - signature::Signature, - transaction::{self, uses_durable_nonce, Transaction}, -}; -use solana_transaction_status::{ - EncodedConfirmedBlock, EncodedConfirmedTransaction, TransactionStatus, UiConfirmedBlock, - UiTransactionEncoding, -}; -use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY; -use std::{ - cmp::min, - net::SocketAddr, - str::FromStr, - sync::RwLock, - thread::sleep, - time::{Duration, Instant}, }; pub struct RpcClient { @@ -129,12 +123,18 @@ impl RpcClient { Self::new(get_rpc_request_str(addr, false)) } + pub fn new_socket_with_commitment( + addr: SocketAddr, + commitment_config: CommitmentConfig, + ) -> Self { + Self::new_with_commitment(get_rpc_request_str(addr, false), commitment_config) + } + pub fn new_socket_with_timeout(addr: SocketAddr, timeout: Duration) -> Self { let url = get_rpc_request_str(addr, false); Self::new_with_timeout(url, timeout) } - #[allow(dead_code)] fn get_node_version(&self) -> Result { let r_node_version = self.node_version.read().unwrap(); if let Some(version) = &*r_node_version { @@ -157,9 +157,8 @@ impl RpcClient { self.commitment_config } - #[allow(clippy::unnecessary_wraps)] // to keep interface compatible with solana fn use_deprecated_commitment(&self) -> Result { - Ok(false) + Ok(self.get_node_version()? < semver::Version::new(1, 5, 5)) } fn maybe_map_commitment( @@ -216,7 +215,11 @@ impl RpcClient { #[allow(clippy::unnecessary_wraps)] // to keep interface compatible with solana fn default_cluster_transaction_encoding(&self) -> Result { - Ok(UiTransactionEncoding::Base64) + if self.get_node_version()? < semver::Version::new(1, 3, 16) { + Ok(UiTransactionEncoding::Base58) + } else { + Ok(UiTransactionEncoding::Base64) + } } pub fn send_transaction_with_config( @@ -403,6 +406,20 @@ impl RpcClient { ) } + pub fn get_block_height(&self) -> ClientResult { + self.get_block_height_with_commitment(self.commitment_config) + } + + pub fn get_block_height_with_commitment( + &self, + commitment_config: CommitmentConfig, + ) -> ClientResult { + self.send( + RpcRequest::GetBlockHeight, + json!([self.maybe_map_commitment(commitment_config)?]), + ) + } + pub fn get_slot_leaders(&self, start_slot: Slot, limit: u64) -> ClientResult> { self.send(RpcRequest::GetSlotLeaders, json!([start_slot, limit])) .and_then(|slot_leaders: Vec| { @@ -421,6 +438,18 @@ impl RpcClient { }) } + /// Get block production for the current epoch + pub fn get_block_production(&self) -> RpcResult { + self.send(RpcRequest::GetBlockProduction, Value::Null) + } + + pub fn get_block_production_with_config( + &self, + config: RpcBlockProductionConfig, + ) -> RpcResult { + self.send(RpcRequest::GetBlockProduction, json!(config)) + } + pub fn get_stake_activation( &self, stake_account: Pubkey, @@ -494,10 +523,17 @@ impl RpcClient { &self, commitment_config: CommitmentConfig, ) -> ClientResult { - self.send( - RpcRequest::GetVoteAccounts, - json!([self.maybe_map_commitment(commitment_config)?]), - ) + self.get_vote_accounts_with_config(RpcGetVoteAccountsConfig { + commitment: Some(self.maybe_map_commitment(commitment_config)?), + ..RpcGetVoteAccountsConfig::default() + }) + } + + pub fn get_vote_accounts_with_config( + &self, + config: RpcGetVoteAccountsConfig, + ) -> ClientResult { + self.send(RpcRequest::GetVoteAccounts, json!([config])) } pub fn wait_for_max_stake( @@ -734,12 +770,23 @@ impl RpcClient { slot: Option, commitment_config: CommitmentConfig, ) -> ClientResult> { - self.send( - RpcRequest::GetLeaderSchedule, - json!([slot, self.maybe_map_commitment(commitment_config)?]), + self.get_leader_schedule_with_config( + slot, + RpcLeaderScheduleConfig { + commitment: Some(self.maybe_map_commitment(commitment_config)?), + ..RpcLeaderScheduleConfig::default() + }, ) } + pub fn get_leader_schedule_with_config( + &self, + slot: Option, + config: RpcLeaderScheduleConfig, + ) -> ClientResult> { + self.send(RpcRequest::GetLeaderSchedule, json!([slot, config])) + } + pub fn get_epoch_schedule(&self) -> ClientResult { self.send(RpcRequest::GetEpochSchedule, Value::Null) } @@ -803,38 +850,52 @@ impl RpcClient { &self, transaction: &Transaction, ) -> ClientResult { - const SEND_RETRIES: usize = 20; - const STATUS_RETRIES: usize = 40; + const SEND_RETRIES: usize = usize::MAX; + const GET_STATUS_RETRIES: usize = usize::MAX; - for _ in 0..SEND_RETRIES { + 'sending: for _ in 0..SEND_RETRIES { let signature = self.send_transaction(transaction)?; - // TODO(velas): sync with solana codebase or rewrite this function into new one - let _recent_blockhash = if uses_durable_nonce(transaction).is_some() { - self.get_recent_blockhash_with_commitment(CommitmentConfig::processed())? - .value - .0 + + let recent_blockhash = if uses_durable_nonce(transaction).is_some() { + let (recent_blockhash, ..) = self + .get_recent_blockhash_with_commitment(CommitmentConfig::processed())? + .value; + recent_blockhash } else { transaction.message.recent_blockhash }; - for status_retry in 0..STATUS_RETRIES { - if let Some(v) = self.get_signature_status(&signature)? { - return Ok(v.map(|_| signature)?); - } - - if cfg!(not(test)) && status_retry < STATUS_RETRIES - // Ignore sleep at last step. - { - // Retry twice a second - sleep(Duration::from_millis(500)); + for status_retry in 0..GET_STATUS_RETRIES { + match self.get_signature_status(&signature)? { + Some(Ok(_)) => return Ok(signature), + Some(Err(e)) => return Err(e.into()), + None => { + let fee_calculator = self + .get_fee_calculator_for_blockhash_with_commitment( + &recent_blockhash, + CommitmentConfig::processed(), + )? + .value; + if fee_calculator.is_none() { + // Block hash is not found by some reason + break 'sending; + } else if cfg!(not(test)) + // Ignore sleep at last step. + && status_retry < GET_STATUS_RETRIES + { + // Retry twice a second + sleep(Duration::from_millis(500)); + continue; + } + } } } } Err(RpcError::ForUser( "unable to confirm transaction. \ - This can happen in situations such as transaction expiration \ - and insufficient fee-payer funds" + This can happen in situations such as transaction expiration \ + and insufficient fee-payer funds" .to_string(), ) .into()) @@ -889,6 +950,14 @@ impl RpcClient { })? } + pub fn get_max_retransmit_slot(&self) -> ClientResult { + self.send(RpcRequest::GetMaxRetransmitSlot, Value::Null) + } + + pub fn get_max_shred_insert_slot(&self) -> ClientResult { + self.send(RpcRequest::GetMaxShredInsertSlot, Value::Null) + } + pub fn get_multiple_accounts(&self, pubkeys: &[Pubkey]) -> ClientResult>> { Ok(self .get_multiple_accounts_with_commitment(pubkeys, self.commitment_config)? @@ -967,12 +1036,11 @@ impl RpcClient { self.get_program_accounts_with_config( pubkey, RpcProgramAccountsConfig { - filters: None, account_config: RpcAccountInfoConfig { - encoding: Some(UiAccountEncoding::Base64), - commitment: Some(self.commitment_config), + encoding: Some(UiAccountEncoding::Base64Zstd), ..RpcAccountInfoConfig::default() }, + ..RpcProgramAccountsConfig::default() }, ) } @@ -982,7 +1050,10 @@ impl RpcClient { pubkey: &Pubkey, config: RpcProgramAccountsConfig, ) -> ClientResult> { - let commitment = config.account_config.commitment.unwrap_or_default(); + let commitment = config + .account_config + .commitment + .unwrap_or_else(|| self.commitment()); let commitment = self.maybe_map_commitment(commitment)?; let account_config = RpcAccountInfoConfig { commitment: Some(commitment), @@ -1032,6 +1103,7 @@ impl RpcClient { blockhash, fee_calculator, last_valid_slot, + .. }, }) = self .send::>( @@ -1039,6 +1111,19 @@ impl RpcClient { json!([self.maybe_map_commitment(commitment_config)?]), ) { (context, blockhash, fee_calculator, last_valid_slot) + } else if let Ok(Response { + context, + value: + DeprecatedRpcFees { + blockhash, + fee_calculator, + last_valid_slot, + }, + }) = self.send::>( + RpcRequest::GetFees, + json!([self.maybe_map_commitment(commitment_config)?]), + ) { + (context, blockhash, fee_calculator, last_valid_slot) } else if let Ok(Response { context, value: @@ -1123,9 +1208,7 @@ impl RpcClient { debug!("Got same blockhash ({:?}), will retry...", blockhash); // Retry ~twice during a slot - sleep(Duration::from_millis( - 500 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND, - )); + sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT / 2)); num_retries += 1; } Err(RpcError::ForUser(format!( @@ -1695,10 +1778,6 @@ impl RpcClient { } } - pub fn validator_exit(&self) -> ClientResult { - self.send(RpcRequest::ValidatorExit, Value::Null) - } - // EVM scope. pub fn get_evm_transaction_count( &self, @@ -1787,7 +1866,7 @@ mod tests { use super::*; use crate::{client_error::ClientErrorKind, mock_sender::PUBKEY}; use assert_matches::assert_matches; - use jsonrpc_core::{Error, IoHandler, Params}; + use jsonrpc_core::{futures::prelude::*, Error, IoHandler, Params}; use jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder}; use serde_json::Number; use solana_sdk::{ @@ -1798,20 +1877,35 @@ mod tests { #[test] fn test_send() { + _test_send(); + } + + #[tokio::test(flavor = "current_thread")] + #[should_panic(expected = "can call blocking only when running on the multi-threaded runtime")] + async fn test_send_async_current_thread_should_panic() { + _test_send(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_send_async_multi_thread() { + _test_send(); + } + + fn _test_send() { let (sender, receiver) = channel(); thread::spawn(move || { let rpc_addr = "0.0.0.0:0".parse().unwrap(); let mut io = IoHandler::default(); // Successful request io.add_method("getBalance", |_params: Params| { - Ok(Value::Number(Number::from(50))) + future::ok(Value::Number(Number::from(50))) }); // Failed request io.add_method("getRecentBlockhash", |params: Params| { if params != Params::None { - Err(Error::invalid_request()) + future::err(Error::invalid_request()) } else { - Ok(Value::String( + future::ok(Value::String( "deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx".to_string(), )) } diff --git a/client/src/rpc_config.rs b/client/src/rpc_config.rs index 3a6b058eef..09caad53fc 100644 --- a/client/src/rpc_config.rs +++ b/client/src/rpc_config.rs @@ -1,10 +1,12 @@ -use crate::rpc_filter::RpcFilterType; -use solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig}; -use solana_sdk::{ - clock::{Epoch, Slot}, - commitment_config::{CommitmentConfig, CommitmentLevel}, +use { + crate::rpc_filter::RpcFilterType, + solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig}, + solana_sdk::{ + clock::{Epoch, Slot}, + commitment_config::{CommitmentConfig, CommitmentLevel}, + }, + solana_transaction_status::{TransactionDetails, UiTransactionEncoding}, }; -use solana_transaction_status::{TransactionDetails, UiTransactionEncoding}; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -21,14 +23,24 @@ pub struct RpcSendTransactionConfig { pub encoding: Option, } +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcSimulateTransactionAccountsConfig { + pub encoding: Option, + pub addresses: Vec, +} + #[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcSimulateTransactionConfig { #[serde(default)] pub sig_verify: bool, + #[serde(default)] + pub replace_recent_blockhash: bool, #[serde(flatten)] pub commitment: Option, pub encoding: Option, + pub accounts: Option, } #[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] @@ -39,6 +51,54 @@ pub struct RpcRequestAirdropConfig { pub commitment: Option, } +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcLeaderScheduleConfig { + pub identity: Option, // validator identity, as a base-58 encoded string + #[serde(flatten)] + pub commitment: Option, +} + +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcBlockProductionConfigRange { + pub first_slot: Slot, + pub last_slot: Option, +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcBlockProductionConfig { + pub identity: Option, // validator identity, as a base-58 encoded string + pub range: Option, // current epoch if `None` + #[serde(flatten)] + pub commitment: Option, +} + +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcGetVoteAccountsConfig { + pub vote_pubkey: Option, // validator vote address, as a base-58 encoded string + #[serde(flatten)] + pub commitment: Option, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum RpcLeaderScheduleConfigWrapper { + SlotOnly(Option), + ConfigOnly(Option), +} + +impl RpcLeaderScheduleConfigWrapper { + pub fn unzip(&self) -> (Option, Option) { + match &self { + RpcLeaderScheduleConfigWrapper::SlotOnly(slot) => (*slot, None), + RpcLeaderScheduleConfigWrapper::ConfigOnly(config) => (None, config.clone()), + } + } +} + #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum RpcLargestAccountsFilter { @@ -77,6 +137,7 @@ pub struct RpcProgramAccountsConfig { pub filters: Option>, #[serde(flatten)] pub account_config: RpcAccountInfoConfig, + pub with_context: Option, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] diff --git a/client/src/rpc_custom_error.rs b/client/src/rpc_custom_error.rs index e6191bf680..400947dcd6 100644 --- a/client/src/rpc_custom_error.rs +++ b/client/src/rpc_custom_error.rs @@ -1,8 +1,10 @@ //! Implementation defined RPC server errors -use crate::rpc_response::RpcSimulateTransactionResult; -use jsonrpc_core::{Error, ErrorCode}; -use solana_sdk::clock::Slot; +use { + crate::rpc_response::RpcSimulateTransactionResult, + jsonrpc_core::{Error, ErrorCode}, + solana_sdk::clock::Slot, +}; pub const JSON_RPC_SERVER_ERROR_BLOCK_CLEANED_UP: i64 = -32001; pub const JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE: i64 = -32002; @@ -13,6 +15,8 @@ pub const JSON_RPC_SERVER_ERROR_TRANSACTION_PRECOMPILE_VERIFICATION_FAILURE: i64 pub const JSON_RPC_SERVER_ERROR_SLOT_SKIPPED: i64 = -32007; pub const JSON_RPC_SERVER_ERROR_NO_SNAPSHOT: i64 = -32008; pub const JSON_RPC_SERVER_ERROR_LONG_TERM_STORAGE_SLOT_SKIPPED: i64 = -32009; +pub const JSON_RPC_SERVER_ERROR_KEY_EXCLUDED_FROM_SECONDARY_INDEX: i64 = -32010; +pub const JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE: i64 = -32011; pub enum RpcCustomError { BlockCleanedUp { @@ -38,6 +42,10 @@ pub enum RpcCustomError { LongTermStorageSlotSkipped { slot: Slot, }, + KeyExcludedFromSecondaryIndex { + index_key: String, + }, + TransactionHistoryNotAvailable, } #[derive(Debug, Serialize, Deserialize)] @@ -115,6 +123,24 @@ impl From for Error { message: format!("Slot {} was skipped, or missing in long-term storage", slot), data: None, }, + RpcCustomError::KeyExcludedFromSecondaryIndex { index_key } => Self { + code: ErrorCode::ServerError( + JSON_RPC_SERVER_ERROR_KEY_EXCLUDED_FROM_SECONDARY_INDEX, + ), + message: format!( + "{} excluded from account secondary indexes; \ + this RPC method unavailable for key", + index_key + ), + data: None, + }, + RpcCustomError::TransactionHistoryNotAvailable => Self { + code: ErrorCode::ServerError( + JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE, + ), + message: "Transaction history is not available from this node".to_string(), + data: None, + }, } } } diff --git a/client/src/rpc_request.rs b/client/src/rpc_request.rs index a9a818e380..cd28832ee3 100644 --- a/client/src/rpc_request.rs +++ b/client/src/rpc_request.rs @@ -1,16 +1,19 @@ -use crate::rpc_response::RpcSimulateTransactionResult; -use derivative::Derivative; -use serde_json::{json, Value}; -use solana_sdk::{clock::Slot, pubkey::Pubkey}; -use std::fmt; -use thiserror::Error; +use { + crate::rpc_response::RpcSimulateTransactionResult, + derivative::Derivative, + serde_json::{json, Value}, + solana_sdk::{clock::Slot, pubkey::Pubkey}, + std::fmt, + thiserror::Error, +}; #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] pub enum RpcRequest { DeregisterNode, - ValidatorExit, GetAccountInfo, GetBalance, + GetBlockHeight, + GetBlockProduction, GetBlockTime, GetBlockCommitment, GetClusterNodes, @@ -53,20 +56,18 @@ pub enum RpcRequest { GetMaxShredInsertSlot, GetSlotLeader, GetSlotLeaders, - GetSignatureConfirmation, GetStorageTurn, GetStorageTurnRate, GetSlotsPerSegment, GetStakeActivation, GetStoragePubkeysForSlot, GetSupply, + GetTokenLargestAccounts, GetTokenAccountBalance, GetTokenAccountsByDelegate, GetTokenAccountsByOwner, GetTokenSupply, - GetTokenLargestAccounts, - #[deprecated(since = "1.5.19", note = "Please use RpcRequest::GetSupply instead")] GetTotalSupply, @@ -107,11 +108,12 @@ impl fmt::Display for RpcRequest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let method = match self { RpcRequest::DeregisterNode => "deregisterNode", - RpcRequest::ValidatorExit => "validatorExit", RpcRequest::GetAccountInfo => "getAccountInfo", RpcRequest::GetBalance => "getBalance", - RpcRequest::GetBlockCommitment => "getBlockCommitment", + RpcRequest::GetBlockHeight => "getBlockHeight", + RpcRequest::GetBlockProduction => "getBlockProduction", RpcRequest::GetBlockTime => "getBlockTime", + RpcRequest::GetBlockCommitment => "getBlockCommitment", RpcRequest::GetClusterNodes => "getClusterNodes", RpcRequest::GetConfirmedBlock => "getConfirmedBlock", RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks", @@ -133,6 +135,8 @@ impl fmt::Display for RpcRequest { RpcRequest::GetInflationReward => "getInflationReward", RpcRequest::GetLargestAccounts => "getLargestAccounts", RpcRequest::GetLeaderSchedule => "getLeaderSchedule", + RpcRequest::GetMaxRetransmitSlot => "getMaxRetransmitSlot", + RpcRequest::GetMaxShredInsertSlot => "getMaxShredInsertSlot", RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption", RpcRequest::GetMultipleAccounts => "getMultipleAccounts", RpcRequest::GetProgramAccounts => "getProgramAccounts", @@ -142,23 +146,20 @@ impl fmt::Display for RpcRequest { RpcRequest::GetSignatureStatuses => "getSignatureStatuses", RpcRequest::GetSignatureStatus => "getSignatureStatus", RpcRequest::GetSlot => "getSlot", - RpcRequest::GetMaxRetransmitSlot => "getMaxRetransmitSlot", - RpcRequest::GetMaxShredInsertSlot => "getMaxShredInsertSlot", RpcRequest::GetSlotLeader => "getSlotLeader", RpcRequest::GetSlotLeaders => "getSlotLeaders", - RpcRequest::GetSignatureConfirmation => "getSignatureConfirmation", RpcRequest::GetStakeActivation => "getStakeActivation", RpcRequest::GetStorageTurn => "getStorageTurn", RpcRequest::GetStorageTurnRate => "getStorageTurnRate", RpcRequest::GetSlotsPerSegment => "getSlotsPerSegment", RpcRequest::GetStoragePubkeysForSlot => "getStoragePubkeysForSlot", RpcRequest::GetSupply => "getSupply", + RpcRequest::GetTokenLargestAccounts => "getTokenLargestAccounts", RpcRequest::GetTokenAccountBalance => "getTokenAccountBalance", RpcRequest::GetTokenAccountsByDelegate => "getTokenAccountsByDelegate", RpcRequest::GetTokenAccountsByOwner => "getTokenAccountsByOwner", RpcRequest::GetTokenSupply => "getTokenSupply", RpcRequest::GetTotalSupply => "getTotalSupply", - RpcRequest::GetTokenLargestAccounts => "getTokenLargestAccounts", RpcRequest::GetTransactionCount => "getTransactionCount", RpcRequest::GetVersion => "getVersion", RpcRequest::GetVoteAccounts => "getVoteAccounts", diff --git a/client/src/rpc_response.rs b/client/src/rpc_response.rs index c78c632092..a712739e60 100644 --- a/client/src/rpc_response.rs +++ b/client/src/rpc_response.rs @@ -1,15 +1,17 @@ -use crate::client_error; -use solana_account_decoder::{parse_token::UiTokenAmount, UiAccount}; -use solana_sdk::{ - clock::{Epoch, Slot, UnixTimestamp}, - fee_calculator::{FeeCalculator, FeeRateGovernor}, - inflation::Inflation, - transaction::{Result, TransactionError}, -}; -use solana_transaction_status::{ - ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, +use { + crate::client_error, + solana_account_decoder::{parse_token::UiTokenAmount, UiAccount}, + solana_sdk::{ + clock::{Epoch, Slot, UnixTimestamp}, + fee_calculator::{FeeCalculator, FeeRateGovernor}, + inflation::Inflation, + transaction::{Result, TransactionError}, + }, + solana_transaction_status::{ + ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, + }, + std::{collections::HashMap, fmt, net::SocketAddr}, }; -use std::{collections::HashMap, fmt, net::SocketAddr}; pub type RpcResult = client_error::Result>; @@ -44,6 +46,15 @@ pub struct RpcFees { pub blockhash: String, pub fee_calculator: FeeCalculator, pub last_valid_slot: Slot, + pub last_valid_block_height: u64, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct DeprecatedRpcFees { + pub blockhash: String, + pub fee_calculator: FeeCalculator, + pub last_valid_slot: Slot, } #[derive(Serialize, Deserialize, Clone, Debug)] @@ -204,11 +215,28 @@ pub struct RpcContactInfo { pub version: Option, /// First 4 bytes of the FeatureSet identifier pub feature_set: Option, + /// Shred version + pub shred_version: Option, } /// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot pub type RpcLeaderSchedule = HashMap>; +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcBlockProductionRange { + pub first_slot: Slot, + pub last_slot: Slot, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(rename_all = "camelCase")] +pub struct RpcBlockProduction { + /// Map of leader base58 identity pubkeys to a tuple of `(number of leader slots, number of blocks produced)` + pub by_identity: HashMap, + pub range: RpcBlockProductionRange, +} + #[derive(Serialize, Deserialize, Clone)] #[serde(rename_all = "kebab-case")] pub struct RpcVersionInfo { @@ -252,10 +280,10 @@ pub struct RpcVoteAccountStatus { #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "camelCase")] pub struct RpcVoteAccountInfo { - /// Vote account pubkey as base-58 encoded string + /// Vote account address, as base-58 encoded string pub vote_pubkey: String, - /// The pubkey of the node that votes using this account + /// The validator identity, as base-58 encoded string pub node_pubkey: String, /// The current stake, in lamports, delegated to this vote account @@ -290,6 +318,7 @@ pub struct RpcSignatureConfirmation { pub struct RpcSimulateTransactionResult { pub err: Option, pub logs: Option>, + pub accounts: Option>>, } #[derive(Serialize, Deserialize, Clone, Debug)] @@ -324,7 +353,7 @@ pub enum StakeActivationState { Inactive, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] #[serde(rename_all = "camelCase")] pub struct RpcStakeActivation { pub state: StakeActivationState, diff --git a/client/src/thin_client.rs b/client/src/thin_client.rs index 624460f2cf..791bc4991c 100644 --- a/client/src/thin_client.rs +++ b/client/src/thin_client.rs @@ -3,36 +3,38 @@ //! messages to the network directly. The binary encoding of its messages are //! unstable and may change in future releases. -use crate::{rpc_client::RpcClient, rpc_config::RpcProgramAccountsConfig, rpc_response::Response}; -use bincode::{serialize_into, serialized_size}; -use log::*; -use solana_sdk::{ - account::Account, - client::{AsyncClient, Client, SyncClient}, - clock::{Slot, MAX_PROCESSING_AGE}, - commitment_config::CommitmentConfig, - epoch_info::EpochInfo, - fee_calculator::{FeeCalculator, FeeRateGovernor}, - hash::Hash, - instruction::Instruction, - message::Message, - packet::PACKET_DATA_SIZE, - pubkey::Pubkey, - signature::{Keypair, Signature, Signer}, - signers::Signers, - system_instruction, - timing::duration_as_ms, - transaction::{self, Transaction}, - transport::Result as TransportResult, -}; -use std::{ - io, - net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, - sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, - RwLock, +use { + crate::{rpc_client::RpcClient, rpc_config::RpcProgramAccountsConfig, rpc_response::Response}, + bincode::{serialize_into, serialized_size}, + log::*, + solana_sdk::{ + account::Account, + client::{AsyncClient, Client, SyncClient}, + clock::{Slot, MAX_PROCESSING_AGE}, + commitment_config::CommitmentConfig, + epoch_info::EpochInfo, + fee_calculator::{FeeCalculator, FeeRateGovernor}, + hash::Hash, + instruction::Instruction, + message::Message, + packet::PACKET_DATA_SIZE, + pubkey::Pubkey, + signature::{Keypair, Signature, Signer}, + signers::Signers, + system_instruction, + timing::duration_as_ms, + transaction::{self, Transaction}, + transport::Result as TransportResult, + }, + std::{ + io, + net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + RwLock, + }, + time::{Duration, Instant}, }, - time::{Duration, Instant}, }; struct ClientOptimizer { @@ -309,10 +311,6 @@ impl ThinClient { .map_err(|e| e.into()) } - pub fn validator_exit(&self) -> TransportResult { - self.rpc_client().validator_exit().map_err(|e| e.into()) - } - pub fn get_num_blocks_since_signature_confirmation( &mut self, sig: &Signature, diff --git a/client/src/tpu_client.rs b/client/src/tpu_client.rs new file mode 100644 index 0000000000..ae264f9875 --- /dev/null +++ b/client/src/tpu_client.rs @@ -0,0 +1,393 @@ +use crate::{ + pubsub_client::{PubsubClient, PubsubClientError, PubsubClientSubscription}, + rpc_client::RpcClient, + rpc_response::SlotUpdate, +}; +use bincode::serialize; +use log::*; +use solana_sdk::{clock::Slot, pubkey::Pubkey, transaction::Transaction}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + net::{SocketAddr, UdpSocket}, + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, + thread::JoinHandle, + time::{Duration, Instant}, +}; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum TpuSenderError { + #[error("Pubsub error: {0:?}")] + PubsubError(#[from] PubsubClientError), + #[error("RPC error: {0:?}")] + RpcError(#[from] crate::client_error::ClientError), + #[error("IO error: {0:?}")] + IoError(#[from] std::io::Error), +} + +type Result = std::result::Result; + +/// Default number of slots used to build TPU socket fanout set +pub const DEFAULT_FANOUT_SLOTS: u64 = 12; + +/// Maximum number of slots used to build TPU socket fanout set +pub const MAX_FANOUT_SLOTS: u64 = 100; + +/// Config params for `TpuClient` +#[derive(Clone, Debug)] +pub struct TpuClientConfig { + /// The range of upcoming slots to include when determining which + /// leaders to send transactions to (min: 1, max: 100) + pub fanout_slots: u64, +} + +impl Default for TpuClientConfig { + fn default() -> Self { + Self { + fanout_slots: DEFAULT_FANOUT_SLOTS, + } + } +} + +/// Client which sends transactions directly to the current leader's TPU port over UDP. +/// The client uses RPC to determine the current leader and fetch node contact info +pub struct TpuClient { + send_socket: UdpSocket, + fanout_slots: u64, + leader_tpu_service: LeaderTpuService, + exit: Arc, +} + +impl TpuClient { + /// Serializes and sends a transaction to the current leader's TPU port + pub fn send_transaction(&self, transaction: &Transaction) -> bool { + let wire_transaction = serialize(transaction).expect("serialization should succeed"); + self.send_wire_transaction(&wire_transaction) + } + + /// Sends a transaction to the current leader's TPU port + pub fn send_wire_transaction(&self, wire_transaction: &[u8]) -> bool { + let mut sent = false; + for tpu_address in self + .leader_tpu_service + .leader_tpu_sockets(self.fanout_slots) + { + if self + .send_socket + .send_to(wire_transaction, tpu_address) + .is_ok() + { + sent = true; + } + } + sent + } + + /// Create a new client that disconnects when dropped + pub fn new( + rpc_client: Arc, + websocket_url: &str, + config: TpuClientConfig, + ) -> Result { + let exit = Arc::new(AtomicBool::new(false)); + let leader_tpu_service = LeaderTpuService::new(rpc_client, websocket_url, exit.clone())?; + + Ok(Self { + send_socket: UdpSocket::bind("0.0.0.0:0").unwrap(), + fanout_slots: config.fanout_slots.min(MAX_FANOUT_SLOTS).max(1), + leader_tpu_service, + exit, + }) + } +} + +impl Drop for TpuClient { + fn drop(&mut self) { + self.exit.store(true, Ordering::Relaxed); + self.leader_tpu_service.join(); + } +} + +struct LeaderTpuCache { + first_slot: Slot, + leaders: Vec, + leader_tpu_map: HashMap, +} + +impl LeaderTpuCache { + fn new(rpc_client: &RpcClient, first_slot: Slot) -> Self { + let leaders = Self::fetch_slot_leaders(rpc_client, first_slot).unwrap_or_default(); + let leader_tpu_map = Self::fetch_cluster_tpu_sockets(&rpc_client).unwrap_or_default(); + Self { + first_slot, + leaders, + leader_tpu_map, + } + } + + // Last slot that has a cached leader pubkey + fn last_slot(&self) -> Slot { + self.first_slot + self.leaders.len().saturating_sub(1) as u64 + } + + // Get the TPU sockets for the current leader and upcoming leaders according to fanout size + fn get_leader_sockets(&self, current_slot: Slot, fanout_slots: u64) -> Vec { + let mut leader_set = HashSet::new(); + let mut leader_sockets = Vec::new(); + for leader_slot in current_slot..current_slot + fanout_slots { + if let Some(leader) = self.get_slot_leader(leader_slot) { + if let Some(tpu_socket) = self.leader_tpu_map.get(leader) { + if leader_set.insert(*leader) { + leader_sockets.push(*tpu_socket); + } + } + } + } + leader_sockets + } + + fn get_slot_leader(&self, slot: Slot) -> Option<&Pubkey> { + if slot >= self.first_slot { + let index = slot - self.first_slot; + self.leaders.get(index as usize) + } else { + None + } + } + + fn fetch_cluster_tpu_sockets(rpc_client: &RpcClient) -> Result> { + let cluster_contact_info = rpc_client.get_cluster_nodes()?; + Ok(cluster_contact_info + .into_iter() + .filter_map(|contact_info| { + Some(( + Pubkey::from_str(&contact_info.pubkey).ok()?, + contact_info.tpu?, + )) + }) + .collect()) + } + + fn fetch_slot_leaders(rpc_client: &RpcClient, start_slot: Slot) -> Result> { + Ok(rpc_client.get_slot_leaders(start_slot, 2 * MAX_FANOUT_SLOTS)?) + } +} + +// 48 chosen because it's unlikely that 12 leaders in a row will miss their slots +const MAX_SLOT_SKIP_DISTANCE: u64 = 48; + +#[derive(Clone, Debug)] +struct RecentLeaderSlots(Arc>>); +impl RecentLeaderSlots { + fn new(current_slot: Slot) -> Self { + let mut recent_slots = VecDeque::new(); + recent_slots.push_back(current_slot); + Self(Arc::new(RwLock::new(recent_slots))) + } + + fn record_slot(&self, current_slot: Slot) { + let mut recent_slots = self.0.write().unwrap(); + recent_slots.push_back(current_slot); + // 12 recent slots should be large enough to avoid a misbehaving + // validator from affecting the median recent slot + while recent_slots.len() > 12 { + recent_slots.pop_front(); + } + } + + // Estimate the current slot from recent slot notifications. + fn estimated_current_slot(&self) -> Slot { + let mut recent_slots: Vec = self.0.read().unwrap().iter().cloned().collect(); + assert!(!recent_slots.is_empty()); + recent_slots.sort_unstable(); + + // Validators can broadcast invalid blocks that are far in the future + // so check if the current slot is in line with the recent progression. + let max_index = recent_slots.len() - 1; + let median_index = max_index / 2; + let median_recent_slot = recent_slots[median_index]; + let expected_current_slot = median_recent_slot + (max_index - median_index) as u64; + let max_reasonable_current_slot = expected_current_slot + MAX_SLOT_SKIP_DISTANCE; + + // Return the highest slot that doesn't exceed what we believe is a + // reasonable slot. + recent_slots + .into_iter() + .rev() + .find(|slot| *slot <= max_reasonable_current_slot) + .unwrap() + } +} + +#[cfg(test)] +impl From> for RecentLeaderSlots { + fn from(recent_slots: Vec) -> Self { + assert!(!recent_slots.is_empty()); + Self(Arc::new(RwLock::new(recent_slots.into_iter().collect()))) + } +} + +/// Service that tracks upcoming leaders and maintains an up-to-date mapping +/// of leader id to TPU socket address. +struct LeaderTpuService { + recent_slots: RecentLeaderSlots, + leader_tpu_cache: Arc>, + subscription: Option>, + t_leader_tpu_service: Option>, +} + +impl LeaderTpuService { + fn new(rpc_client: Arc, websocket_url: &str, exit: Arc) -> Result { + let start_slot = rpc_client.get_max_shred_insert_slot()?; + + let recent_slots = RecentLeaderSlots::new(start_slot); + let leader_tpu_cache = Arc::new(RwLock::new(LeaderTpuCache::new(&rpc_client, start_slot))); + + let subscription = if !websocket_url.is_empty() { + let recent_slots = recent_slots.clone(); + Some(PubsubClient::slot_updates_subscribe( + websocket_url, + move |update| { + let current_slot = match update { + // This update indicates that a full slot was received by the connected + // node so we can stop sending transactions to the leader for that slot + SlotUpdate::Completed { slot, .. } => slot.saturating_add(1), + // This update indicates that we have just received the first shred from + // the leader for this slot and they are probably still accepting transactions. + SlotUpdate::FirstShredReceived { slot, .. } => slot, + _ => return, + }; + + recent_slots.record_slot(current_slot); + }, + )?) + } else { + None + }; + + let t_leader_tpu_service = Some({ + let recent_slots = recent_slots.clone(); + let leader_tpu_cache = leader_tpu_cache.clone(); + std::thread::Builder::new() + .name("ldr-tpu-srv".to_string()) + .spawn(move || Self::run(rpc_client, recent_slots, leader_tpu_cache, exit)) + .unwrap() + }); + + Ok(LeaderTpuService { + recent_slots, + leader_tpu_cache, + subscription, + t_leader_tpu_service, + }) + } + + fn join(&mut self) { + if let Some(mut subscription) = self.subscription.take() { + let _ = subscription.send_unsubscribe(); + let _ = subscription.shutdown(); + } + if let Some(t_handle) = self.t_leader_tpu_service.take() { + t_handle.join().unwrap(); + } + } + + fn leader_tpu_sockets(&self, fanout_slots: u64) -> Vec { + let current_slot = self.recent_slots.estimated_current_slot(); + self.leader_tpu_cache + .read() + .unwrap() + .get_leader_sockets(current_slot, fanout_slots) + } + + fn run( + rpc_client: Arc, + recent_slots: RecentLeaderSlots, + leader_tpu_cache: Arc>, + exit: Arc, + ) { + let mut last_cluster_refresh = Instant::now(); + let mut sleep_ms = 1000; + loop { + if exit.load(Ordering::Relaxed) { + break; + } + + // Refresh cluster TPU ports every 5min in case validators restart with new port configuration + // or new validators come online + if last_cluster_refresh.elapsed() > Duration::from_secs(5 * 60) { + if let Ok(leader_tpu_map) = LeaderTpuCache::fetch_cluster_tpu_sockets(&rpc_client) { + leader_tpu_cache.write().unwrap().leader_tpu_map = leader_tpu_map; + last_cluster_refresh = Instant::now(); + } else { + sleep_ms = 100; + continue; + } + } + + // Sleep a few slots before checking if leader cache needs to be refreshed again + std::thread::sleep(Duration::from_millis(sleep_ms)); + + let current_slot = recent_slots.estimated_current_slot(); + if current_slot + >= leader_tpu_cache + .read() + .unwrap() + .last_slot() + .saturating_sub(MAX_FANOUT_SLOTS) + { + if let Ok(slot_leaders) = + LeaderTpuCache::fetch_slot_leaders(&rpc_client, current_slot) + { + let mut leader_tpu_cache = leader_tpu_cache.write().unwrap(); + leader_tpu_cache.first_slot = current_slot; + leader_tpu_cache.leaders = slot_leaders; + } else { + sleep_ms = 100; + continue; + } + } + + sleep_ms = 1000; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn assert_slot(recent_slots: RecentLeaderSlots, expected_slot: Slot) { + assert_eq!(recent_slots.estimated_current_slot(), expected_slot); + } + + #[test] + fn test_recent_leader_slots() { + assert_slot(RecentLeaderSlots::new(0), 0); + + let mut recent_slots: Vec = (1..=12).collect(); + assert_slot(RecentLeaderSlots::from(recent_slots.clone()), 12); + + recent_slots.reverse(); + assert_slot(RecentLeaderSlots::from(recent_slots), 12); + + assert_slot( + RecentLeaderSlots::from(vec![0, 1 + MAX_SLOT_SKIP_DISTANCE]), + 1 + MAX_SLOT_SKIP_DISTANCE, + ); + assert_slot( + RecentLeaderSlots::from(vec![0, 2 + MAX_SLOT_SKIP_DISTANCE]), + 0, + ); + + assert_slot(RecentLeaderSlots::from(vec![1]), 1); + assert_slot(RecentLeaderSlots::from(vec![1, 100]), 1); + assert_slot(RecentLeaderSlots::from(vec![1, 2, 100]), 2); + assert_slot(RecentLeaderSlots::from(vec![1, 2, 3, 100]), 3); + assert_slot(RecentLeaderSlots::from(vec![1, 2, 3, 99, 100]), 3); + } +} diff --git a/core/Cargo.toml b/core/Cargo.toml index e91262f4b1..3b08dc1406 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-core" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-core" readme = "../README.md" @@ -24,17 +24,18 @@ byteorder = "1.3.4" chrono = { version = "0.4.11", features = ["serde"] } core_affinity = "0.5.10" crossbeam-channel = "0.4" -ed25519-dalek = "=1.0.0-pre.4" +ed25519-dalek = "=1.0.1" fs_extra = "1.2.0" flate2 = "1.0" indexmap = { version = "1.5", features = ["rayon"] } itertools = "0.9.0" -jsonrpc-core = "15.0.0" -jsonrpc-core-client = { version = "15.0.0", features = ["ws"] } -jsonrpc-derive = "15.0.0" -jsonrpc-http-server = "15.0.0" -jsonrpc-pubsub = "15.0.0" -jsonrpc-ws-server = "15.0.0" +jsonrpc-core = "17.1.0" +jsonrpc-core-client = { version = "17.1.0", features = ["ipc", "ws"] } +jsonrpc-derive = "17.1.0" +jsonrpc-http-server = "17.1.0" +jsonrpc-pubsub = "17.1.0" +jsonrpc-ws-server = "17.1.0" +libc = "0.2.81" log = "0.4.11" lru = "0.6.1" miow = "0.2.2" @@ -42,41 +43,47 @@ net2 = "0.2.37" num-traits = "0.2" rand = "0.7.0" rand_chacha = "0.2.2" +rand_core = "0.6.2" raptorq = "1.4.2" -rayon = "1.4.1" +rayon = "1.5.0" regex = "1.3.9" retain_mut = "0.1.2" -rustversion = "1.0.4" -serde = "1.0.118" +serde = "1.0.122" serde_bytes = "0.11" serde_derive = "1.0.103" serde_json = "1.0.56" -solana-account-decoder = { path = "../account-decoder", version = "=1.5.19" } -solana-banks-server = { path = "../banks-server", version = "=1.5.19" } -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-faucet = { path = "../faucet", version = "=1.5.19" } -solana-ledger = { path = "../ledger", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-merkle-tree = { path = "../merkle-tree", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } -solana-measure = { path = "../measure", version = "=1.5.19" } -solana-net-utils = { path = "../net-utils", version = "=1.5.19" } -solana-perf = { path = "../perf", version = "=1.5.19" } -solana-program-test = { path = "../program-test", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.5.19" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } -solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.5.19" } -solana-streamer = { path = "../streamer", version = "=1.5.19" } -solana-sys-tuner = { path = "../sys-tuner", version = "=1.5.19" } -solana-transaction-status = { path = "../transaction-status", version = "=1.5.19" } -solana-vote-program = { path = "../programs/vote", version = "=1.5.19" } -solana-version = { path = "../version" } -velas-account-program = { path = "../programs/velas-account-program" } -spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] } +solana-account-decoder = { path = "../account-decoder", version = "=1.6.14" } +solana-banks-server = { path = "../banks-server", version = "=1.6.14" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-faucet = { path = "../faucet", version = "=1.6.14" } +solana-ledger = { path = "../ledger", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-merkle-tree = { path = "../merkle-tree", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } +solana-measure = { path = "../measure", version = "=1.6.14" } +solana-net-utils = { path = "../net-utils", version = "=1.6.14" } +solana-perf = { path = "../perf", version = "=1.6.14" } +solana-program-test = { path = "../program-test", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.6.14" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } +solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.6.14" } +solana-streamer = { path = "../streamer", version = "=1.6.14" } +solana-sys-tuner = { path = "../sys-tuner", version = "=1.6.14" } +solana-transaction-status = { path = "../transaction-status", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } +solana-vote-program = { path = "../programs/vote", version = "=1.6.14" } +spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] } +tempfile = "3.1.0" +thiserror = "1.0" +tokio = { version = "1", features = ["full"] } +tokio_02 = { version = "0.2", package = "tokio", features = ["full"] } +tokio-util = { version = "0.3", features = ["codec"] } # This crate needs to stay in sync with tokio_02, until that dependency can be removed +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.6.14" } + hex = "0.4.2" rlp = "0.5" sha3 = "0.9.1" @@ -85,15 +92,8 @@ secp256k1 = { version = "0.19.0", features = ["recovery", "global-context"] } evm-state = { path = "../evm-utils/evm-state" } evm-rpc = { path = "../evm-utils/evm-rpc" } solana-evm-loader-program = { path = "../evm-utils/programs/evm_loader" } -tempfile = "3.1.0" -thiserror = "1.0" -tokio = { version = "0.2", features = ["full"] } -tokio_01 = { version = "0.1", package = "tokio" } -tokio_01_bytes = { version = "0.4.7", package = "bytes" } -tokio_fs_01 = { version = "0.1", package = "tokio-fs" } -tokio_io_01 = { version = "0.1", package = "tokio-io" } -tokio_codec_01 = { version = "0.1", package = "tokio-codec" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.5.19" } +velas-account-program = { path = "../programs/velas-account-program" } + trees = "0.2.1" snafu = "0.6" anyhow = "1" @@ -101,9 +101,9 @@ anyhow = "1" [dev-dependencies] matches = "0.1.6" num_cpus = "1.13.0" -reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] } +reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] } serial_test = "0.4.0" -serial_test_derive = "0.4.0" +symlink = "0.1.0" systemstat = "0.1.5" [build-dependencies] diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 91db3e2e80..1bfc27aef9 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -66,6 +66,8 @@ fn bench_consume_buffered(bencher: &mut Bencher) { let (exit, poh_recorder, poh_service, _signal_receiver) = create_test_recorder(&bank, &blockstore, None); + let recorder = poh_recorder.lock().unwrap().recorder(); + let tx = test_tx(); let len = 4096; let chunk_size = 1024; @@ -88,6 +90,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) { &s, None::>, &BankingStageStats::default(), + &recorder, ); }); diff --git a/core/benches/crds_gossip_pull.rs b/core/benches/crds_gossip_pull.rs index 44c351f054..6d7d991342 100644 --- a/core/benches/crds_gossip_pull.rs +++ b/core/benches/crds_gossip_pull.rs @@ -29,13 +29,8 @@ fn bench_hash_as_u64(bencher: &mut Bencher) { fn bench_build_crds_filters(bencher: &mut Bencher) { let thread_pool = ThreadPoolBuilder::new().build().unwrap(); let mut rng = thread_rng(); - let mut crds_gossip_pull = CrdsGossipPull::default(); + let crds_gossip_pull = CrdsGossipPull::default(); let mut crds = Crds::default(); - for _ in 0..50_000 { - crds_gossip_pull - .purged_values - .push_back((solana_sdk::hash::new_rand(&mut rng), rng.gen())); - } let mut num_inserts = 0; for _ in 0..90_000 { if crds diff --git a/core/benches/crds_shards.rs b/core/benches/crds_shards.rs index a020e7da3e..abeb7762a2 100644 --- a/core/benches/crds_shards.rs +++ b/core/benches/crds_shards.rs @@ -3,30 +3,34 @@ extern crate test; use rand::{thread_rng, Rng}; -use solana_core::contact_info::ContactInfo; -use solana_core::crds::VersionedCrdsValue; -use solana_core::crds_shards::CrdsShards; -use solana_core::crds_value::{CrdsData, CrdsValue}; -use solana_sdk::pubkey; +use solana_core::{ + crds::{Crds, VersionedCrdsValue}, + crds_shards::CrdsShards, + crds_value::CrdsValue, +}; use solana_sdk::timing::timestamp; +use std::iter::repeat_with; use test::Bencher; const CRDS_SHARDS_BITS: u32 = 8; -fn new_test_crds_value() -> VersionedCrdsValue { - let data = CrdsData::ContactInfo(ContactInfo::new_localhost(&pubkey::new_rand(), timestamp())); - VersionedCrdsValue::new(timestamp(), CrdsValue::new_unsigned(data)) +fn new_test_crds_value(rng: &mut R) -> VersionedCrdsValue { + let value = CrdsValue::new_rand(rng, None); + let label = value.label(); + let mut crds = Crds::default(); + crds.insert(value, timestamp()).unwrap(); + crds.get(&label).cloned().unwrap() } fn bench_crds_shards_find(bencher: &mut Bencher, num_values: usize, mask_bits: u32) { - let values: Vec = std::iter::repeat_with(new_test_crds_value) + let mut rng = thread_rng(); + let values: Vec<_> = repeat_with(|| new_test_crds_value(&mut rng)) .take(num_values) .collect(); let mut shards = CrdsShards::new(CRDS_SHARDS_BITS); for (index, value) in values.iter().enumerate() { assert!(shards.insert(index, value)); } - let mut rng = thread_rng(); bencher.iter(|| { let mask = rng.gen(); let _hits = shards.find(mask, mask_bits).count(); diff --git a/core/benches/retransmit_stage.rs b/core/benches/retransmit_stage.rs index 1014d2cd1a..06cc569dc1 100644 --- a/core/benches/retransmit_stage.rs +++ b/core/benches/retransmit_stage.rs @@ -81,8 +81,7 @@ fn bench_retransmitter(bencher: &mut Bencher) { let keypair = Arc::new(Keypair::new()); let slot = 0; let parent = 0; - let shredder = - Shredder::new(slot, parent, 0.0, keypair, 0, 0).expect("Failed to create entry shredder"); + let shredder = Shredder::new(slot, parent, keypair, 0, 0).unwrap(); let mut data_shreds = shredder.entries_to_shreds(&entries, true, 0).0; let num_packets = data_shreds.len(); diff --git a/core/benches/shredder.rs b/core/benches/shredder.rs index 2bfd080a80..39a6a4716f 100644 --- a/core/benches/shredder.rs +++ b/core/benches/shredder.rs @@ -8,8 +8,8 @@ use raptorq::{Decoder, Encoder}; use solana_ledger::entry::{create_ticks, Entry}; use solana_ledger::shred::{ max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, Shred, Shredder, - MAX_DATA_SHREDS_PER_FEC_BLOCK, RECOMMENDED_FEC_RATE, SHRED_PAYLOAD_SIZE, - SIZE_OF_DATA_SHRED_IGNORED_TAIL, SIZE_OF_DATA_SHRED_PAYLOAD, + MAX_DATA_SHREDS_PER_FEC_BLOCK, SHRED_PAYLOAD_SIZE, SIZE_OF_DATA_SHRED_IGNORED_TAIL, + SIZE_OF_DATA_SHRED_PAYLOAD, }; use solana_perf::test_tx; use solana_sdk::hash::Hash; @@ -39,8 +39,7 @@ fn make_shreds(num_shreds: usize) -> Vec { Some(shred_size), ); let entries = make_large_unchained_entries(txs_per_entry, num_entries); - let shredder = - Shredder::new(1, 0, RECOMMENDED_FEC_RATE, Arc::new(Keypair::new()), 0, 0).unwrap(); + let shredder = Shredder::new(1, 0, Arc::new(Keypair::new()), 0, 0).unwrap(); let data_shreds = shredder .entries_to_data_shreds( &entries, @@ -75,7 +74,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) { let num_ticks = max_ticks_per_n_shreds(1, Some(SIZE_OF_DATA_SHRED_PAYLOAD)) * num_shreds as u64; let entries = create_ticks(num_ticks, 0, Hash::default()); bencher.iter(|| { - let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0, 0).unwrap(); + let shredder = Shredder::new(1, 0, kp.clone(), 0, 0).unwrap(); shredder.entries_to_shreds(&entries, true, 0); }) } @@ -94,7 +93,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) { let entries = make_large_unchained_entries(txs_per_entry, num_entries); // 1Mb bencher.iter(|| { - let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0, 0).unwrap(); + let shredder = Shredder::new(1, 0, kp.clone(), 0, 0).unwrap(); shredder.entries_to_shreds(&entries, true, 0); }) } @@ -107,7 +106,7 @@ fn bench_deshredder(bencher: &mut Bencher) { let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size; let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64; let entries = create_ticks(num_ticks, 0, Hash::default()); - let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp, 0, 0).unwrap(); + let shredder = Shredder::new(1, 0, kp, 0, 0).unwrap(); let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0; bencher.iter(|| { let raw = &mut Shredder::deshred(&data_shreds).unwrap(); @@ -133,9 +132,8 @@ fn bench_shredder_coding(bencher: &mut Bencher) { let data_shreds = make_shreds(symbol_count); bencher.iter(|| { Shredder::generate_coding_shreds( - RECOMMENDED_FEC_RATE, &data_shreds[..symbol_count], - symbol_count, + true, // is_last_in_slot ) .len(); }) @@ -146,18 +144,16 @@ fn bench_shredder_decoding(bencher: &mut Bencher) { let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize; let data_shreds = make_shreds(symbol_count); let coding_shreds = Shredder::generate_coding_shreds( - RECOMMENDED_FEC_RATE, &data_shreds[..symbol_count], - symbol_count, + true, // is_last_in_slot ); bencher.iter(|| { Shredder::try_recovery( coding_shreds[..].to_vec(), symbol_count, symbol_count, - 0, - 0, - 1, + 0, // first index + 1, // slot ) .unwrap(); }) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 869ad48562..7eb69a021f 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -4,7 +4,7 @@ use crate::{ cluster_info::ClusterInfo, packet_hasher::PacketHasher, - poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry}, + poh_recorder::{PohRecorder, PohRecorderError, TransactionRecorder, WorkingBankEntry}, poh_service::{self, PohService}, }; use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError}; @@ -41,7 +41,7 @@ use solana_sdk::{ message::Message, poh_config::PohConfig, pubkey::Pubkey, - short_vec, + short_vec::decode_shortu16_len, signature::Signature, timing::{duration_as_ms, timestamp}, transaction::{self, Transaction, TransactionError}, @@ -348,6 +348,7 @@ impl BankingStage { gossip_vote_sender: &ReplayVoteSender, test_fn: Option, banking_stage_stats: &BankingStageStats, + recorder: &TransactionRecorder, ) { let mut rebuffered_packets_len = 0; let mut new_tx_count = 0; @@ -377,7 +378,7 @@ impl BankingStage { Self::process_packets_transactions( &bank, &bank_creation_time, - &poh_recorder, + &recorder, &msgs, original_unprocessed_indexes.to_owned(), transaction_status_sender.clone(), @@ -484,6 +485,7 @@ impl BankingStage { transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, banking_stage_stats: &BankingStageStats, + recorder: &TransactionRecorder, ) -> BufferedPacketsDecision { let bank_start; let ( @@ -523,6 +525,7 @@ impl BankingStage { gossip_vote_sender, None::>, banking_stage_stats, + recorder, ); } BufferedPacketsDecision::Forward => { @@ -558,29 +561,23 @@ impl BankingStage { socket: &UdpSocket, hold: bool, ) { - if enable_forwarding { - let next_leader = poh_recorder - .lock() - .unwrap() - .leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET); - next_leader.map_or((), |leader_pubkey| { - let leader_addr = { - cluster_info.lookup_contact_info(&leader_pubkey, |leader| leader.tpu_forwards) - }; + if !enable_forwarding { + if !hold { + buffered_packets.clear(); + } + return; + } - leader_addr.map_or((), |leader_addr| { - let _ = - Self::forward_buffered_packets(&socket, &leader_addr, &buffered_packets); - if hold { - buffered_packets.retain(|b| b.1.is_empty()); - for b in buffered_packets.iter_mut() { - b.2 = true; - } - } else { - buffered_packets.clear(); - } - }) - }) + let addr = match next_leader_tpu_forwards(cluster_info, poh_recorder) { + Some(addr) => addr, + None => return, + }; + let _ = Self::forward_buffered_packets(socket, &addr, buffered_packets); + if hold { + buffered_packets.retain(|(_, index, _)| !index.is_empty()); + for (_, _, forwarded) in buffered_packets.iter_mut() { + *forwarded = true; + } } else { buffered_packets.clear(); } @@ -600,6 +597,7 @@ impl BankingStage { gossip_vote_sender: ReplayVoteSender, duplicates: &Arc, PacketHasher)>>, ) { + let recorder = poh_recorder.lock().unwrap().recorder(); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let mut buffered_packets = VecDeque::with_capacity(batch_limit); let banking_stage_stats = BankingStageStats::new(id); @@ -608,13 +606,14 @@ impl BankingStage { let decision = Self::process_buffered_packets( &my_pubkey, &socket, - poh_recorder, + &poh_recorder, cluster_info, &mut buffered_packets, enable_forwarding, transaction_status_sender.clone(), &gossip_vote_sender, &banking_stage_stats, + &recorder, ); if matches!(decision, BufferedPacketsDecision::Hold) || matches!(decision, BufferedPacketsDecision::ForwardAndHold) @@ -648,6 +647,7 @@ impl BankingStage { &mut buffered_packets, &banking_stage_stats, duplicates, + &recorder, ) { Ok(()) | Err(RecvTimeoutError::Timeout) => (), Err(RecvTimeoutError::Disconnected) => break, @@ -673,7 +673,7 @@ impl BankingStage { bank_slot: Slot, txs: impl Iterator, results: &[TransactionExecutionResult], - poh: &Arc>, + recorder: &TransactionRecorder, ) -> (Result, Vec) { let mut processed_generation = Measure::start("record::process_generation"); let (processed_transactions, processed_transactions_indexes): (Vec<_>, Vec<_>) = results @@ -703,10 +703,7 @@ impl BankingStage { let mut poh_record = Measure::start("record::poh_record"); // record and unlock will unlock all the successful transactions - let res = poh - .lock() - .unwrap() - .record(bank_slot, hash, processed_transactions); + let res = recorder.record(bank_slot, hash, processed_transactions); match res { Ok(()) => (), Err(PohRecorderError::MaxHeightReached) => { @@ -731,7 +728,7 @@ impl BankingStage { fn process_and_record_transactions_locked( bank: &Arc, - poh: &Arc>, + poh: &TransactionRecorder, batch: &TransactionBatch, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, @@ -741,6 +738,11 @@ impl BankingStage { // the likelihood of any single thread getting starved and processing old ids. // TODO: Banking stage threads should be prioritized to complete faster then this queue // expires. + let pre_balances = if transaction_status_sender.is_some() { + bank.collect_balances(batch) + } else { + vec![] + }; let mut mint_decimals: HashMap = HashMap::new(); @@ -819,6 +821,7 @@ impl BankingStage { TransactionTokenBalancesSet::new(pre_token_balances, post_token_balances), inner_instructions, transaction_logs, + tx_results.rent_debits, ); } } @@ -835,13 +838,18 @@ impl BankingStage { hashed_txs.len(), ); + debug!( + "process_and_record_transactions_locked: {:?}", + execute_timings + ); + (Ok(num_to_commit), retryable_txs) } pub fn process_and_record_transactions( bank: &Arc, txs: &[HashedTransaction], - poh: &Arc>, + poh: &TransactionRecorder, chunk_offset: usize, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, @@ -885,7 +893,7 @@ impl BankingStage { bank: &Arc, bank_creation_time: &Instant, transactions: &[HashedTransaction], - poh: &Arc>, + poh: &TransactionRecorder, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, ) -> (usize, Vec) { @@ -968,7 +976,7 @@ impl BankingStage { /// Read the transaction message from packet data fn packet_message(packet: &Packet) -> Option<&[u8]> { - let (sig_len, sig_size) = short_vec::decode_shortu16_len(&packet.data).ok()?; + let (sig_len, sig_size) = decode_shortu16_len(&packet.data).ok()?; let msg_start = sig_len .checked_mul(size_of::()) .and_then(|v| v.checked_add(sig_size))?; @@ -1044,7 +1052,7 @@ impl BankingStage { fn process_packets_transactions( bank: &Arc, bank_creation_time: &Instant, - poh: &Arc>, + poh: &TransactionRecorder, msgs: &Packets, packet_indexes: Vec, transaction_status_sender: Option, @@ -1177,6 +1185,7 @@ impl BankingStage { buffered_packets: &mut UnprocessedPackets, banking_stage_stats: &BankingStageStats, duplicates: &Arc, PacketHasher)>>, + recorder: &TransactionRecorder, ) -> Result<(), RecvTimeoutError> { let mut recv_time = Measure::start("process_packets_recv"); let mms = verified_receiver.recv_timeout(recv_timeout)?; @@ -1220,7 +1229,7 @@ impl BankingStage { Self::process_packets_transactions( &bank, &bank_creation_time, - &poh, + recorder, &msgs, packet_indexes, transaction_status_sender.clone(), @@ -1359,6 +1368,36 @@ impl BankingStage { } } +pub(crate) fn next_leader_tpu( + cluster_info: &ClusterInfo, + poh_recorder: &Mutex, +) -> Option { + if let Some(leader_pubkey) = poh_recorder + .lock() + .unwrap() + .leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET) + { + cluster_info.lookup_contact_info(&leader_pubkey, |leader| leader.tpu) + } else { + None + } +} + +fn next_leader_tpu_forwards( + cluster_info: &ClusterInfo, + poh_recorder: &Arc>, +) -> Option { + if let Some(leader_pubkey) = poh_recorder + .lock() + .unwrap() + .leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET) + { + cluster_info.lookup_contact_info(&leader_pubkey, |leader| leader.tpu_forwards) + } else { + None + } +} + pub fn create_test_recorder( bank: &Arc, blockstore: &Arc, @@ -1371,7 +1410,7 @@ pub fn create_test_recorder( ) { let exit = Arc::new(AtomicBool::new(false)); let poh_config = Arc::new(poh_config.unwrap_or_default()); - let (mut poh_recorder, entry_receiver) = PohRecorder::new( + let (mut poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( bank.tick_height(), bank.last_blockhash(), bank.slot(), @@ -1381,6 +1420,7 @@ pub fn create_test_recorder( blockstore, &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &poh_config, + exit.clone(), ); poh_recorder.set_bank(&bank); @@ -1392,6 +1432,7 @@ pub fn create_test_recorder( bank.ticks_per_slot(), poh_service::DEFAULT_PINNED_CPU_CORE, poh_service::DEFAULT_HASHES_PER_BATCH, + record_receiver, ); (exit, poh_recorder, poh_service, entry_receiver) @@ -1401,7 +1442,7 @@ pub fn create_test_recorder( mod tests { use super::*; use crate::{ - cluster_info::Node, poh_recorder::WorkingBank, + cluster_info::Node, poh_recorder::Record, poh_recorder::WorkingBank, transaction_status_service::TransactionStatusService, }; use crossbeam_channel::unbounded; @@ -1422,7 +1463,12 @@ mod tests { transaction::TransactionError, }; use solana_transaction_status::TransactionWithStatusMeta; - use std::{net::SocketAddr, path::Path, sync::atomic::Ordering, thread::sleep}; + use std::{ + net::SocketAddr, + path::Path, + sync::atomic::{AtomicBool, Ordering}, + thread::sleep, + }; #[test] fn test_banking_stage_shutdown1() { @@ -1532,7 +1578,7 @@ mod tests { genesis_config, mint_keypair, .. - } = create_genesis_config(10); + } = create_slow_genesis_config(10); let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config)); let start_hash = bank.last_blockhash(); let (verified_sender, verified_receiver) = unbounded(); @@ -1652,7 +1698,7 @@ mod tests { genesis_config, mint_keypair, .. - } = create_genesis_config(2); + } = create_slow_genesis_config(2); let (verified_sender, verified_receiver) = unbounded(); // Process a batch that includes a transaction that receives two lamports. @@ -1747,6 +1793,8 @@ mod tests { #[test] fn test_bank_record_transactions() { + solana_logger::setup(); + let GenesisConfigInfo { genesis_config, mint_keypair, @@ -1764,7 +1812,8 @@ mod tests { { let blockstore = Blockstore::open(&ledger_path) .expect("Expected to be able to open database ledger"); - let (poh_recorder, entry_receiver) = PohRecorder::new( + let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( + // TODO use record_receiver bank.tick_height(), bank.last_blockhash(), bank.slot(), @@ -1774,9 +1823,13 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); + let recorder = poh_recorder.recorder(); let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_simulator = simulate_poh(record_receiver, &poh_recorder); + poh_recorder.lock().unwrap().set_working_bank(working_bank); let pubkey = solana_sdk::pubkey::new_rand(); let keypair2 = Keypair::new(); @@ -1792,7 +1845,7 @@ mod tests { bank.slot(), transactions.iter(), &results, - &poh_recorder, + &recorder, ); let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); assert_eq!(entry.transactions.len(), transactions.len()); @@ -1809,7 +1862,7 @@ mod tests { bank.slot(), transactions.iter(), &results, - &poh_recorder, + &recorder, ); res.unwrap(); assert!(retryable.is_empty()); @@ -1822,7 +1875,7 @@ mod tests { bank.slot(), transactions.iter(), &results, - &poh_recorder, + &recorder, ); res.unwrap(); assert!(retryable.is_empty()); @@ -1836,7 +1889,7 @@ mod tests { bank.slot() + 1, transactions.iter(), &results, - &poh_recorder, + &recorder, ); assert_matches!(res, Err(PohRecorderError::MaxHeightReached)); // The first result was an error so it's filtered out. The second result was Ok(), @@ -1844,6 +1897,13 @@ mod tests { assert_eq!(retryable, vec![1]); // Should receive nothing from PohRecorder b/c record failed assert!(entry_receiver.try_recv().is_err()); + + poh_recorder + .lock() + .unwrap() + .is_exited + .store(true, Ordering::Relaxed); + let _ = poh_simulator.join(); } Blockstore::destroy(&ledger_path).unwrap(); } @@ -1989,6 +2049,13 @@ mod tests { ); } + fn create_slow_genesis_config(lamports: u64) -> GenesisConfigInfo { + let mut config_info = create_genesis_config(lamports); + // For these tests there's only 1 slot, don't want to run out of ticks + config_info.genesis_config.ticks_per_slot *= 8; + config_info + } + #[test] fn test_bank_process_and_record_transactions() { solana_logger::setup(); @@ -1996,7 +2063,7 @@ mod tests { genesis_config, mint_keypair, .. - } = create_genesis_config(10_000); + } = create_slow_genesis_config(10_000); let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config)); let pubkey = solana_sdk::pubkey::new_rand(); @@ -2017,7 +2084,7 @@ mod tests { { let blockstore = Blockstore::open(&ledger_path) .expect("Expected to be able to open database ledger"); - let (poh_recorder, entry_receiver) = PohRecorder::new( + let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( bank.tick_height(), bank.last_blockhash(), bank.slot(), @@ -2027,16 +2094,20 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); + let recorder = poh_recorder.recorder(); let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_simulator = simulate_poh(record_receiver, &poh_recorder); + poh_recorder.lock().unwrap().set_working_bank(working_bank); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); BankingStage::process_and_record_transactions( &bank, &transactions, - &poh_recorder, + &recorder, 0, None, &gossip_vote_sender, @@ -2074,7 +2145,7 @@ mod tests { BankingStage::process_and_record_transactions( &bank, &transactions, - &poh_recorder, + &recorder, 0, None, &gossip_vote_sender, @@ -2083,11 +2154,39 @@ mod tests { Err(PohRecorderError::MaxHeightReached) ); + poh_recorder + .lock() + .unwrap() + .is_exited + .store(true, Ordering::Relaxed); + let _ = poh_simulator.join(); + assert_eq!(bank.get_balance(&pubkey), 1); } Blockstore::destroy(&ledger_path).unwrap(); } + fn simulate_poh( + record_receiver: CrossbeamReceiver, + poh_recorder: &Arc>, + ) -> JoinHandle<()> { + let poh_recorder = poh_recorder.clone(); + let is_exited = poh_recorder.lock().unwrap().is_exited.clone(); + let tick_producer = Builder::new() + .name("solana-simulate_poh".to_string()) + .spawn(move || loop { + PohService::read_record_receiver_and_process( + &poh_recorder, + &record_receiver, + Duration::from_millis(10), + ); + if is_exited.load(Ordering::Relaxed) { + break; + } + }); + tick_producer.unwrap() + } + #[test] fn test_bank_process_and_record_transactions_account_in_use() { solana_logger::setup(); @@ -2095,7 +2194,7 @@ mod tests { genesis_config, mint_keypair, .. - } = create_genesis_config(10_000); + } = create_slow_genesis_config(10_000); let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config)); let pubkey = solana_sdk::pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand(); @@ -2116,7 +2215,7 @@ mod tests { { let blockstore = Blockstore::open(&ledger_path) .expect("Expected to be able to open database ledger"); - let (poh_recorder, _entry_receiver) = PohRecorder::new( + let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new( bank.tick_height(), bank.last_blockhash(), bank.slot(), @@ -2126,22 +2225,33 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); + let recorder = poh_recorder.recorder(); let poh_recorder = Arc::new(Mutex::new(poh_recorder)); poh_recorder.lock().unwrap().set_working_bank(working_bank); + let poh_simulator = simulate_poh(record_receiver, &poh_recorder); + let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); let (result, unprocessed) = BankingStage::process_and_record_transactions( &bank, &transactions, - &poh_recorder, + &recorder, 0, None, &gossip_vote_sender, ); + poh_recorder + .lock() + .unwrap() + .is_exited + .store(true, Ordering::Relaxed); + let _ = poh_simulator.join(); + assert!(result.is_ok()); assert_eq!(unprocessed.len(), 1); } @@ -2196,7 +2306,7 @@ mod tests { genesis_config, mint_keypair, .. - } = create_genesis_config(10_000); + } = create_slow_genesis_config(10_000); let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config)); let pubkey = solana_sdk::pubkey::new_rand(); @@ -2211,7 +2321,7 @@ mod tests { { let blockstore = Blockstore::open(&ledger_path) .expect("Expected to be able to open database ledger"); - let (poh_recorder, _entry_receiver) = PohRecorder::new( + let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new( bank.tick_height(), bank.last_blockhash(), bank.slot(), @@ -2221,11 +2331,14 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); - // Poh Recorder has not working bank, so should throw MaxHeightReached error on + // Poh Recorder has no working bank, so should throw MaxHeightReached error on // record - let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let recorder = poh_recorder.recorder(); + + let poh_simulator = simulate_poh(record_receiver, &Arc::new(Mutex::new(poh_recorder))); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); @@ -2234,7 +2347,7 @@ mod tests { &bank, &Instant::now(), &transactions, - &poh_recorder, + &recorder, None, &gossip_vote_sender, ); @@ -2244,6 +2357,9 @@ mod tests { retryable_txs.sort_unstable(); let expected: Vec = (0..transactions.len()).collect(); assert_eq!(retryable_txs, expected); + + recorder.is_exited.store(true, Ordering::Relaxed); + let _ = poh_simulator.join(); } Blockstore::destroy(&ledger_path).unwrap(); @@ -2256,7 +2372,7 @@ mod tests { genesis_config, mint_keypair, .. - } = create_genesis_config(10_000); + } = create_slow_genesis_config(10_000); let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config)); let pubkey = solana_sdk::pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand(); @@ -2290,7 +2406,7 @@ mod tests { let blockstore = Blockstore::open(&ledger_path) .expect("Expected to be able to open database ledger"); let blockstore = Arc::new(blockstore); - let (poh_recorder, _entry_receiver) = PohRecorder::new( + let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new( bank.tick_height(), bank.last_blockhash(), bank.slot(), @@ -2300,9 +2416,13 @@ mod tests { &blockstore, &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); + let recorder = poh_recorder.recorder(); let poh_recorder = Arc::new(Mutex::new(poh_recorder)); + let poh_simulator = simulate_poh(record_receiver, &poh_recorder); + poh_recorder.lock().unwrap().set_working_bank(working_bank); let shreds = entries_to_test_shreds(entries, bank.slot(), 0, true, 0); @@ -2322,7 +2442,7 @@ mod tests { let _ = BankingStage::process_and_record_transactions( &bank, &transactions, - &poh_recorder, + &recorder, 0, Some(TransactionStatusSender { sender: transaction_status_sender, @@ -2355,10 +2475,18 @@ mod tests { assert_eq!(meta, None); } } + + poh_recorder + .lock() + .unwrap() + .is_exited + .store(true, Ordering::Relaxed); + let _ = poh_simulator.join(); } Blockstore::destroy(&ledger_path).unwrap(); } + #[allow(clippy::type_complexity)] fn setup_conflicting_transactions( ledger_path: &Path, ) -> ( @@ -2366,9 +2494,10 @@ mod tests { Arc, Arc>, Receiver, + JoinHandle<()>, ) { Blockstore::destroy(&ledger_path).unwrap(); - let genesis_config_info = create_genesis_config(10_000); + let genesis_config_info = create_slow_genesis_config(10_000); let GenesisConfigInfo { genesis_config, mint_keypair, @@ -2377,7 +2506,8 @@ mod tests { let blockstore = Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"); let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config)); - let (poh_recorder, entry_receiver) = PohRecorder::new( + let exit = Arc::new(AtomicBool::default()); + let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( bank.tick_height(), bank.last_blockhash(), bank.slot(), @@ -2387,6 +2517,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + exit, ); let poh_recorder = Arc::new(Mutex::new(poh_recorder)); @@ -2399,15 +2530,24 @@ mod tests { system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash()), system_transaction::transfer(&mint_keypair, &pubkey2, 1, genesis_config.hash()), ]; - (transactions, bank, poh_recorder, entry_receiver) + let poh_simulator = simulate_poh(record_receiver, &poh_recorder); + + ( + transactions, + bank, + poh_recorder, + entry_receiver, + poh_simulator, + ) } #[test] fn test_consume_buffered_packets() { let ledger_path = get_tmp_ledger_path!(); { - let (transactions, bank, poh_recorder, _entry_receiver) = + let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) = setup_conflicting_transactions(&ledger_path); + let recorder = poh_recorder.lock().unwrap().recorder(); let num_conflicting_transactions = transactions.len(); let mut packets_vec = to_packets_chunked(&transactions, num_conflicting_transactions); assert_eq!(packets_vec.len(), 1); @@ -2435,6 +2575,7 @@ mod tests { &gossip_vote_sender, None::>, &BankingStageStats::default(), + &recorder, ); assert_eq!(buffered_packets[0].1.len(), num_conflicting_transactions); // When the poh recorder has a bank, should process all non conflicting buffered packets. @@ -2450,6 +2591,7 @@ mod tests { &gossip_vote_sender, None::>, &BankingStageStats::default(), + &recorder, ); if num_expected_unprocessed == 0 { assert!(buffered_packets.is_empty()) @@ -2457,6 +2599,12 @@ mod tests { assert_eq!(buffered_packets[0].1.len(), num_expected_unprocessed); } } + poh_recorder + .lock() + .unwrap() + .is_exited + .store(true, Ordering::Relaxed); + let _ = poh_simulator.join(); } Blockstore::destroy(&ledger_path).unwrap(); } @@ -2465,7 +2613,7 @@ mod tests { fn test_consume_buffered_packets_interrupted() { let ledger_path = get_tmp_ledger_path!(); { - let (transactions, bank, poh_recorder, _entry_receiver) = + let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) = setup_conflicting_transactions(&ledger_path); let num_conflicting_transactions = transactions.len(); let packets_vec = to_packets_chunked(&transactions, 1); @@ -2493,6 +2641,7 @@ mod tests { let interrupted_iteration = 1; poh_recorder.lock().unwrap().set_bank(&bank); let poh_recorder_ = poh_recorder.clone(); + let recorder = poh_recorder_.lock().unwrap().recorder(); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); // Start up thread to process the banks let t_consume = Builder::new() @@ -2507,6 +2656,7 @@ mod tests { &gossip_vote_sender, test_fn, &BankingStageStats::default(), + &recorder, ); // Check everything is correct. All indexes after `interrupted_iteration` @@ -2540,6 +2690,12 @@ mod tests { } t_consume.join().unwrap(); + poh_recorder + .lock() + .unwrap() + .is_exited + .store(true, Ordering::Relaxed); + let _ = poh_simulator.join(); } Blockstore::destroy(&ledger_path).unwrap(); } diff --git a/core/src/bigtable_upload_service.rs b/core/src/bigtable_upload_service.rs index 4ece7f0c3c..4b928c18c3 100644 --- a/core/src/bigtable_upload_service.rs +++ b/core/src/bigtable_upload_service.rs @@ -5,13 +5,13 @@ use std::{ sync::{Arc, RwLock}, thread::{self, Builder, JoinHandle}, }; -use tokio::runtime; +use tokio::runtime::Runtime; // Delay uploading the largest confirmed root for this many slots. This is done in an attempt to -// ensure that the `CacheBlockTimeService` has had enough time to add the block time for the root +// ensure that the `CacheBlockMetaService` has had enough time to add the block time for the root // before it's uploaded to BigTable. // -// A more direct connection between CacheBlockTimeService and BigTableUploadService would be +// A more direct connection between CacheBlockMetaService and BigTableUploadService would be // preferable... const LARGEST_CONFIRMED_ROOT_UPLOAD_DELAY: usize = 100; @@ -21,7 +21,7 @@ pub struct BigTableUploadService { impl BigTableUploadService { pub fn new( - runtime_handle: runtime::Handle, + runtime: Arc, bigtable_ledger_storage: solana_storage_bigtable::LedgerStorage, blockstore: Arc, block_commitment_cache: Arc>, @@ -32,7 +32,7 @@ impl BigTableUploadService { .name("bigtable-upload".to_string()) .spawn(move || { Self::run( - runtime_handle, + runtime, bigtable_ledger_storage, blockstore, block_commitment_cache, @@ -45,7 +45,7 @@ impl BigTableUploadService { } fn run( - runtime: runtime::Handle, + runtime: Arc, bigtable_ledger_storage: solana_storage_bigtable::LedgerStorage, blockstore: Arc, block_commitment_cache: Arc>, diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index 99b26cbc07..f08ac8b826 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -447,7 +447,7 @@ pub mod test { entry::create_ticks, genesis_utils::{create_genesis_config, GenesisConfigInfo}, get_tmp_ledger_path, - shred::{max_ticks_per_n_shreds, ProcessShredsStats, Shredder, RECOMMENDED_FEC_RATE}, + shred::{max_ticks_per_n_shreds, ProcessShredsStats, Shredder}, }; use solana_runtime::bank::Bank; use solana_sdk::{ @@ -476,7 +476,7 @@ pub mod test { let coding_shreds = Shredder::data_shreds_to_coding_shreds( &keypair, &data_shreds[0..], - RECOMMENDED_FEC_RATE, + true, // is_last_in_slot &mut ProcessShredsStats::default(), ) .unwrap(); @@ -669,8 +669,6 @@ pub mod test { } } - sleep(Duration::from_millis(2000)); - trace!( "[broadcast_ledger] max_tick_height: {}, start_tick_height: {}, ticks_per_slot: {}", max_tick_height, @@ -678,10 +676,17 @@ pub mod test { ticks_per_slot, ); - let blockstore = broadcast_service.blockstore; - let entries = blockstore - .get_slot_entries(slot, 0) - .expect("Expect entries to be present"); + let mut entries = vec![]; + for _ in 0..10 { + entries = broadcast_service + .blockstore + .get_slot_entries(slot, 0) + .expect("Expect entries to be present"); + if entries.len() >= max_tick_height as usize { + break; + } + sleep(Duration::from_millis(1000)); + } assert_eq!(entries.len(), max_tick_height as usize); drop(entry_sender); diff --git a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs index ce512c6a04..9173be7ded 100644 --- a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -1,6 +1,6 @@ use super::*; use solana_ledger::entry::Entry; -use solana_ledger::shred::{Shredder, RECOMMENDED_FEC_RATE}; +use solana_ledger::shred::Shredder; use solana_sdk::hash::Hash; use solana_sdk::signature::Keypair; @@ -47,7 +47,6 @@ impl BroadcastRun for BroadcastFakeShredsRun { let shredder = Shredder::new( bank.slot(), bank.parent().unwrap().slot(), - RECOMMENDED_FEC_RATE, self.keypair.clone(), (bank.tick_height() % bank.ticks_per_slot()) as u8, self.shred_version, diff --git a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index 17d34e99a1..b66681786d 100644 --- a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -71,7 +71,6 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { let shredder = Shredder::new( bank.slot(), bank.parent().unwrap().slot(), - 0.0, self.keypair.clone(), (bank.tick_height() % bank.ticks_per_slot()) as u8, self.shred_version, diff --git a/core/src/broadcast_stage/standard_broadcast_run.rs b/core/src/broadcast_stage/standard_broadcast_run.rs index f642de2531..6c0bb12482 100644 --- a/core/src/broadcast_stage/standard_broadcast_run.rs +++ b/core/src/broadcast_stage/standard_broadcast_run.rs @@ -8,7 +8,7 @@ use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo; use solana_ledger::{ entry::Entry, shred::{ - ProcessShredsStats, Shred, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK, RECOMMENDED_FEC_RATE, + ProcessShredsStats, Shred, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK, SHRED_TICK_REFERENCE_MASK, }, }; @@ -121,7 +121,6 @@ impl StandardBroadcastRun { let (data_shreds, next_shred_index) = Shredder::new( slot, parent_slot, - RECOMMENDED_FEC_RATE, self.keypair.clone(), reference_tick, self.shred_version, @@ -261,7 +260,6 @@ impl StandardBroadcastRun { num_expected_batches, slot_start_ts: self .slot_broadcast_start - .clone() .expect("Start timestamp must exist for a slot if we're broadcasting the slot"), }); get_leader_schedule_time.stop(); @@ -452,8 +450,7 @@ fn make_coding_shreds( .collect() } }; - Shredder::data_shreds_to_coding_shreds(keypair, &data_shreds, RECOMMENDED_FEC_RATE, stats) - .unwrap() + Shredder::data_shreds_to_coding_shreds(keypair, &data_shreds, is_slot_end, stats).unwrap() } impl BroadcastRun for StandardBroadcastRun { diff --git a/core/src/cache_block_meta_service.rs b/core/src/cache_block_meta_service.rs new file mode 100644 index 0000000000..98069f253a --- /dev/null +++ b/core/src/cache_block_meta_service.rs @@ -0,0 +1,74 @@ +pub use solana_ledger::blockstore_processor::CacheBlockMetaSender; +use { + crossbeam_channel::{Receiver, RecvTimeoutError}, + solana_ledger::blockstore::Blockstore, + solana_measure::measure::Measure, + solana_runtime::bank::Bank, + std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread::{self, Builder, JoinHandle}, + time::Duration, + }, +}; + +pub type CacheBlockMetaReceiver = Receiver>; + +pub struct CacheBlockMetaService { + thread_hdl: JoinHandle<()>, +} + +const CACHE_BLOCK_TIME_WARNING_MS: u64 = 150; + +impl CacheBlockMetaService { + #[allow(clippy::new_ret_no_self)] + pub fn new( + cache_block_meta_receiver: CacheBlockMetaReceiver, + blockstore: Arc, + exit: &Arc, + ) -> Self { + let exit = exit.clone(); + let thread_hdl = Builder::new() + .name("solana-cache-block-time".to_string()) + .spawn(move || loop { + if exit.load(Ordering::Relaxed) { + break; + } + let recv_result = cache_block_meta_receiver.recv_timeout(Duration::from_secs(1)); + match recv_result { + Err(RecvTimeoutError::Disconnected) => { + break; + } + Ok(bank) => { + let mut cache_block_meta_timer = Measure::start("cache_block_meta_timer"); + Self::cache_block_meta(bank, &blockstore); + cache_block_meta_timer.stop(); + if cache_block_meta_timer.as_ms() > CACHE_BLOCK_TIME_WARNING_MS { + warn!( + "cache_block_meta operation took: {}ms", + cache_block_meta_timer.as_ms() + ); + } + } + _ => {} + } + }) + .unwrap(); + Self { thread_hdl } + } + + fn cache_block_meta(bank: Arc, blockstore: &Arc) { + if let Err(e) = blockstore.cache_block_time(bank.slot(), bank.clock().unix_timestamp) { + error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e); + } + if let Err(e) = blockstore.cache_block_height(bank.slot(), bank.block_height()) { + error!("cache_block_height failed: slot {:?} {:?}", bank.slot(), e); + } + } + + pub fn join(self) -> thread::Result<()> { + self.thread_hdl.join() + } +} diff --git a/core/src/cache_block_time_service.rs b/core/src/cache_block_time_service.rs deleted file mode 100644 index 9959b11f46..0000000000 --- a/core/src/cache_block_time_service.rs +++ /dev/null @@ -1,69 +0,0 @@ -use crossbeam_channel::{Receiver, RecvTimeoutError, Sender}; -use solana_ledger::blockstore::Blockstore; -use solana_measure::measure::Measure; -use solana_runtime::bank::Bank; -use std::{ - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - thread::{self, Builder, JoinHandle}, - time::Duration, -}; - -pub type CacheBlockTimeReceiver = Receiver>; -pub type CacheBlockTimeSender = Sender>; - -pub struct CacheBlockTimeService { - thread_hdl: JoinHandle<()>, -} - -const CACHE_BLOCK_TIME_WARNING_MS: u64 = 150; - -impl CacheBlockTimeService { - #[allow(clippy::new_ret_no_self)] - pub fn new( - cache_block_time_receiver: CacheBlockTimeReceiver, - blockstore: Arc, - exit: &Arc, - ) -> Self { - let exit = exit.clone(); - let thread_hdl = Builder::new() - .name("solana-cache-block-time".to_string()) - .spawn(move || loop { - if exit.load(Ordering::Relaxed) { - break; - } - let recv_result = cache_block_time_receiver.recv_timeout(Duration::from_secs(1)); - match recv_result { - Err(RecvTimeoutError::Disconnected) => { - break; - } - Ok(bank) => { - let mut cache_block_time_timer = Measure::start("cache_block_time_timer"); - Self::cache_block_time(bank, &blockstore); - cache_block_time_timer.stop(); - if cache_block_time_timer.as_ms() > CACHE_BLOCK_TIME_WARNING_MS { - warn!( - "cache_block_time operation took: {}ms", - cache_block_time_timer.as_ms() - ); - } - } - _ => {} - } - }) - .unwrap(); - Self { thread_hdl } - } - - fn cache_block_time(bank: Arc, blockstore: &Arc) { - if let Err(e) = blockstore.cache_block_time(bank.slot(), bank.clock().unix_timestamp) { - error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e); - } - } - - pub fn join(self) -> thread::Result<()> { - self.thread_hdl.join() - } -} diff --git a/core/src/cluster_info.rs b/core/src/cluster_info.rs index d8b719487d..a89a8c95b0 100644 --- a/core/src/cluster_info.rs +++ b/core/src/cluster_info.rs @@ -13,7 +13,9 @@ //! //! Bank needs to provide an interface for us to query the stake weight use crate::{ + cluster_info_metrics::{submit_gossip_stats, Counter, GossipStats, ScopedTimer}, contact_info::ContactInfo, + crds::Cursor, crds_gossip::CrdsGossip, crds_gossip_error::CrdsGossipError, crds_gossip_pull::{CrdsFilter, ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS}, @@ -27,9 +29,7 @@ use crate::{ result::{Error, Result}, weighted_shuffle::weighted_shuffle, }; -use rand::distributions::{Distribution, WeightedIndex}; -use rand::{CryptoRng, Rng, SeedableRng}; -use rand_chacha::ChaChaRng; +use rand::{seq::SliceRandom, CryptoRng, Rng}; use solana_ledger::shred::Shred; use solana_sdk::sanitize::{Sanitize, SanitizeError}; @@ -70,11 +70,14 @@ use std::{ fmt::Debug, fs::{self, File}, io::BufReader, + iter::repeat, net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket}, - ops::{Deref, DerefMut}, + ops::{Deref, DerefMut, Div}, path::{Path, PathBuf}, - sync::atomic::{AtomicBool, AtomicU64, Ordering}, - sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}, + sync::{ + atomic::{AtomicBool, Ordering}, + {Arc, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}, + }, thread::{sleep, Builder, JoinHandle}, time::{Duration, Instant}, }; @@ -108,14 +111,19 @@ pub const MAX_SNAPSHOT_HASHES: usize = 16; const MAX_PRUNE_DATA_NODES: usize = 32; /// Number of bytes in the randomly generated token sent with ping messages. const GOSSIP_PING_TOKEN_SIZE: usize = 32; -const GOSSIP_PING_CACHE_CAPACITY: usize = 16384; -const GOSSIP_PING_CACHE_TTL: Duration = Duration::from_secs(640); +const GOSSIP_PING_CACHE_CAPACITY: usize = 65536; +const GOSSIP_PING_CACHE_TTL: Duration = Duration::from_secs(1280); pub const DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS: u64 = 10_000; pub const DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS: u64 = 60_000; /// Minimum serialized size of a Protocol::PullResponse packet. const PULL_RESPONSE_MIN_SERIALIZED_SIZE: usize = 161; // Limit number of unique pubkeys in the crds table. pub(crate) const CRDS_UNIQUE_PUBKEY_CAPACITY: usize = 4096; +/// Minimum stake that a node should have so that its CRDS values are +/// propagated through gossip (few types are exempted). +const MIN_STAKE_FOR_GOSSIP: u64 = solana_sdk::native_token::LAMPORTS_PER_VLX; +/// Minimum number of staked nodes for enforcing stakes in gossip. +const MIN_NUM_STAKED_NODES: usize = 500; #[derive(Debug, PartialEq, Eq)] pub enum ClusterInfoError { @@ -197,110 +205,6 @@ impl<'a> Drop for GossipReadLock<'a> { } } -#[derive(Default)] -struct Counter(AtomicU64); - -impl Counter { - fn add_measure(&self, x: &mut Measure) { - x.stop(); - self.0.fetch_add(x.as_us(), Ordering::Relaxed); - } - fn add_relaxed(&self, x: u64) { - self.0.fetch_add(x, Ordering::Relaxed); - } - fn clear(&self) -> u64 { - self.0.swap(0, Ordering::Relaxed) - } -} - -struct ScopedTimer<'a> { - clock: Instant, - metric: &'a AtomicU64, -} - -impl<'a> From<&'a Counter> for ScopedTimer<'a> { - // Output should be assigned to a *named* variable, - // otherwise it is immediately dropped. - #[must_use] - fn from(counter: &'a Counter) -> Self { - Self { - clock: Instant::now(), - metric: &counter.0, - } - } -} - -impl Drop for ScopedTimer<'_> { - fn drop(&mut self) { - let micros = self.clock.elapsed().as_micros(); - self.metric.fetch_add(micros as u64, Ordering::Relaxed); - } -} - -#[derive(Default)] -struct GossipStats { - entrypoint: Counter, - entrypoint2: Counter, - gossip_packets_dropped_count: Counter, - push_vote_read: Counter, - get_votes: Counter, - get_accounts_hash: Counter, - all_tvu_peers: Counter, - tvu_peers: Counter, - repair_peers: Counter, - new_push_requests: Counter, - new_push_requests2: Counter, - new_push_requests_num: Counter, - filter_pull_response: Counter, - handle_batch_ping_messages_time: Counter, - handle_batch_pong_messages_time: Counter, - handle_batch_prune_messages_time: Counter, - handle_batch_pull_requests_time: Counter, - handle_batch_pull_responses_time: Counter, - handle_batch_push_messages_time: Counter, - packets_received_count: Counter, - packets_received_prune_messages_count: Counter, - packets_received_pull_requests_count: Counter, - packets_received_pull_responses_count: Counter, - packets_received_push_messages_count: Counter, - packets_received_verified_count: Counter, - packets_sent_gossip_requests_count: Counter, - packets_sent_prune_messages_count: Counter, - packets_sent_pull_requests_count: Counter, - packets_sent_pull_responses_count: Counter, - packets_sent_push_messages_count: Counter, - process_gossip_packets_time: Counter, - process_pull_response: Counter, - process_pull_response_count: Counter, - process_pull_response_len: Counter, - process_pull_response_timeout: Counter, - process_pull_response_fail_insert: Counter, - process_pull_response_fail_timeout: Counter, - process_pull_response_success: Counter, - process_pull_requests: Counter, - generate_pull_responses: Counter, - process_prune: Counter, - process_push_message: Counter, - prune_received_cache: Counter, - prune_message_count: Counter, - prune_message_len: Counter, - pull_request_ping_pong_check_failed_count: Counter, - purge: Counter, - trim_crds_table_failed: Counter, - trim_crds_table_purged_values_count: Counter, - epoch_slots_lookup: Counter, - new_pull_requests: Counter, - new_pull_requests_count: Counter, - mark_pull_request: Counter, - skip_pull_response_shred_version: Counter, - skip_pull_shred_version: Counter, - skip_push_message_shred_version: Counter, - push_message_count: Counter, - push_message_value_count: Counter, - push_response_count: Counter, - pull_requests_count: Counter, -} - pub struct ClusterInfo { /// The network pub gossip: RwLock, @@ -310,11 +214,11 @@ pub struct ClusterInfo { entrypoints: RwLock>, outbound_budget: DataBudget, my_contact_info: RwLock, - ping_cache: RwLock, + ping_cache: Mutex, id: Pubkey, stats: GossipStats, socket: UdpSocket, - local_message_pending_push_queue: RwLock>, + local_message_pending_push_queue: Mutex>, contact_debug_interval: u64, // milliseconds, 0 = disabled contact_save_interval: u64, // milliseconds, 0 = disabled instance: NodeInstance, @@ -328,17 +232,17 @@ impl Default for ClusterInfo { } #[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample)] -pub struct PruneData { +struct PruneData { /// Pubkey of the node that sent this prune data - pub pubkey: Pubkey, + pubkey: Pubkey, /// Pubkeys of nodes that should be pruned - pub prunes: Vec, + prunes: Vec, /// Signature of this Prune Message - pub signature: Signature, + signature: Signature, /// The Pubkey of the intended node/destination for this message - pub destination: Pubkey, + destination: Pubkey, /// Wallclock of the node that generated this message - pub wallclock: u64, + wallclock: u64, } impl PruneData { @@ -403,9 +307,9 @@ impl Signable for PruneData { } struct PullData { - pub from_addr: SocketAddr, - pub caller: CrdsValue, - pub filter: CrdsFilter, + from_addr: SocketAddr, + caller: CrdsValue, + filter: CrdsFilter, } pub fn make_accounts_hashes_message( @@ -416,7 +320,7 @@ pub fn make_accounts_hashes_message( Some(CrdsValue::new_signed(message, keypair)) } -type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>; +pub(crate) type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>; // TODO These messages should go through the gpu pipeline for spam filtering #[frozen_abi(digest = "CH5BWuhAyvUiUQYgu2Lcwu7eoiW6bQitvtLS1yFsdmrE")] @@ -525,15 +429,31 @@ impl Sanitize for Protocol { } } -// Rating for pull requests -// A response table is generated as a -// 2-d table arranged by target nodes and a -// list of responses for that node, -// to/responses_index is a location in that table. -struct ResponseScore { - to: usize, // to, index of who the response is to - responses_index: usize, // index into the list of responses for a given to - score: u64, // Relative score of the response +// Retains only CRDS values associated with nodes with enough stake. +// (some crds types are exempted) +fn retain_staked(values: &mut Vec, stakes: &HashMap) { + values.retain(|value| { + match value.data { + CrdsData::ContactInfo(_) => true, + // May Impact new validators starting up without any stake yet. + CrdsData::Vote(_, _) => true, + // Unstaked nodes can still help repair. + CrdsData::EpochSlots(_, _) => true, + // Unstaked nodes can still serve snapshots. + CrdsData::SnapshotHashes(_) => true, + // Otherwise unstaked voting nodes will show up with no version in + // the various dashboards. + CrdsData::Version(_) => true, + CrdsData::NodeInstance(_) => true, + CrdsData::LowestSlot(_, _) + | CrdsData::AccountsHashes(_) + | CrdsData::LegacyVersion(_) + | CrdsData::DuplicateShred(_, _) => { + let stake = stakes.get(&value.pubkey()).copied(); + stake.unwrap_or_default() >= MIN_STAKE_FOR_GOSSIP + } + } + }) } impl ClusterInfo { @@ -550,14 +470,14 @@ impl ClusterInfo { entrypoints: RwLock::new(vec![]), outbound_budget: DataBudget::default(), my_contact_info: RwLock::new(contact_info), - ping_cache: RwLock::new(PingCache::new( + ping_cache: Mutex::new(PingCache::new( GOSSIP_PING_CACHE_TTL, GOSSIP_PING_CACHE_CAPACITY, )), id, stats: GossipStats::default(), socket: UdpSocket::bind("0.0.0.0:0").unwrap(), - local_message_pending_push_queue: RwLock::new(vec![]), + local_message_pending_push_queue: Mutex::default(), contact_debug_interval: DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS, instance: NodeInstance::new(&mut thread_rng(), id, timestamp()), contact_info_path: PathBuf::default(), @@ -585,13 +505,13 @@ impl ClusterInfo { entrypoints: RwLock::new(self.entrypoints.read().unwrap().clone()), outbound_budget: self.outbound_budget.clone_non_atomic(), my_contact_info: RwLock::new(my_contact_info), - ping_cache: RwLock::new(self.ping_cache.read().unwrap().mock_clone()), + ping_cache: Mutex::new(self.ping_cache.lock().unwrap().mock_clone()), id: *new_id, stats: GossipStats::default(), socket: UdpSocket::bind("0.0.0.0:0").unwrap(), - local_message_pending_push_queue: RwLock::new( + local_message_pending_push_queue: Mutex::new( self.local_message_pending_push_queue - .read() + .lock() .unwrap() .clone(), ), @@ -620,13 +540,10 @@ impl ClusterInfo { .into_iter() .map(|v| CrdsValue::new_signed(v, &self.keypair)) .collect(); - { - let mut local_message_pending_push_queue = - self.local_message_pending_push_queue.write().unwrap(); - for entry in entries { - local_message_pending_push_queue.push((entry, now)); - } - } + self.local_message_pending_push_queue + .lock() + .unwrap() + .extend(entries); self.gossip .write() .unwrap() @@ -766,14 +683,10 @@ impl ClusterInfo { where F: FnOnce(&ContactInfo) -> Y, { - let entry = CrdsValueLabel::ContactInfo(*id); - self.gossip - .read() - .unwrap() - .crds - .lookup(&entry) - .and_then(CrdsValue::contact_info) - .map(map) + let label = CrdsValueLabel::ContactInfo(*id); + let gossip = self.gossip.read().unwrap(); + let entry = gossip.crds.get(&label)?; + Some(map(entry.value.contact_info()?)) } pub fn lookup_contact_info_by_gossip_addr( @@ -798,13 +711,11 @@ impl ClusterInfo { } pub fn lookup_epoch_slots(&self, ix: EpochSlotsIndex) -> EpochSlots { - let entry = CrdsValueLabel::EpochSlots(ix, self.id()); - self.gossip - .read() - .unwrap() - .crds - .lookup(&entry) - .and_then(CrdsValue::epoch_slots) + let label = CrdsValueLabel::EpochSlots(ix, self.id()); + let gossip = self.gossip.read().unwrap(); + let entry = gossip.crds.get(&label); + entry + .and_then(|v| v.value.epoch_slots()) .cloned() .unwrap_or_else(|| EpochSlots::new(self.id(), timestamp())) } @@ -967,8 +878,8 @@ impl ClusterInfo { .read() .unwrap() .crds - .lookup(&CrdsValueLabel::LowestSlot(self.id())) - .and_then(|x| x.lowest_slot()) + .get(&CrdsValueLabel::LowestSlot(self.id())) + .and_then(|x| x.value.lowest_slot()) .map(|x| x.lowest) .unwrap_or(0); if min > last { @@ -977,9 +888,9 @@ impl ClusterInfo { &self.keypair, ); self.local_message_pending_push_queue - .write() + .lock() .unwrap() - .push((entry, now)); + .push(entry); } } @@ -993,8 +904,8 @@ impl ClusterInfo { &self.stats.epoch_slots_lookup, ) .crds - .lookup(&CrdsValueLabel::EpochSlots(ix, self.id())) - .and_then(CrdsValue::epoch_slots) + .get(&CrdsValueLabel::EpochSlots(ix, self.id())) + .and_then(|v| v.value.epoch_slots()) .and_then(|x| Some((x.wallclock, x.first_slot()?)))?, ix, )) @@ -1033,9 +944,9 @@ impl ClusterInfo { if n > 0 { let entry = CrdsValue::new_signed(CrdsData::EpochSlots(ix, slots), &self.keypair); self.local_message_pending_push_queue - .write() + .lock() .unwrap() - .push((entry, now)); + .push(entry); } num += n; if num < update.len() { @@ -1061,12 +972,11 @@ impl ClusterInfo { GossipWriteLock::new(self.gossip.write().unwrap(), label, counter) } - pub fn push_message(&self, message: CrdsValue) { - let now = message.wallclock(); + pub(crate) fn push_message(&self, message: CrdsValue) { self.local_message_pending_push_queue - .write() + .lock() .unwrap() - .push((message, now)); + .push(message); } pub fn push_accounts_hashes(&self, accounts_hashes: Vec<(Slot, Hash)>) { @@ -1095,9 +1005,21 @@ impl ClusterInfo { self.push_message(CrdsValue::new_signed(message, &self.keypair)); } + fn push_vote_at_index(&self, vote: Transaction, vote_index: u8) { + assert!((vote_index as usize) < MAX_LOCKOUT_HISTORY); + let self_pubkey = self.id(); + let now = timestamp(); + let vote = Vote::new(self_pubkey, vote, now); + let vote = CrdsData::Vote(vote_index, vote); + let vote = CrdsValue::new_signed(vote, &self.keypair); + self.gossip + .write() + .unwrap() + .process_push_message(&self_pubkey, vec![vote], now); + } + pub fn push_vote(&self, tower: &[Slot], vote: Transaction) { debug_assert!(tower.iter().tuple_windows().all(|(a, b)| a < b)); - let now = timestamp(); // Find a crds vote which is evicted from the tower, and recycle its // vote-index. This can be either an old vote which is popped off the // deque, or recent vote which has expired before getting enough @@ -1126,9 +1048,9 @@ impl ClusterInfo { (0..MAX_LOCKOUT_HISTORY as u8) .filter_map(|ix| { let vote = CrdsValueLabel::Vote(ix, self_pubkey); - let vote = gossip.crds.lookup(&vote)?; + let vote = gossip.crds.get(&vote)?; num_crds_votes += 1; - match &vote.data { + match &vote.value.data { CrdsData::Vote(_, vote) if should_evict_vote(vote) => { Some((vote.wallclock, ix)) } @@ -1140,38 +1062,56 @@ impl ClusterInfo { .map(|(_ /*wallclock*/, ix)| ix) }; let vote_index = vote_index.unwrap_or(num_crds_votes); - assert!((vote_index as usize) < MAX_LOCKOUT_HISTORY); - let vote = Vote::new(self_pubkey, vote, now); - debug_assert_eq!(vote.slot().unwrap(), *tower.last().unwrap()); - let vote = CrdsData::Vote(vote_index, vote); - let vote = CrdsValue::new_signed(vote, &self.keypair); - self.gossip - .write() - .unwrap() - .process_push_message(&self_pubkey, vec![vote], now); + self.push_vote_at_index(vote, vote_index); + } + + pub fn refresh_vote(&self, vote: Transaction, vote_slot: Slot) { + let vote_index = { + let gossip = + self.time_gossip_read_lock("gossip_read_push_vote", &self.stats.push_vote_read); + (0..MAX_LOCKOUT_HISTORY as u8).find(|ix| { + let vote = CrdsValueLabel::Vote(*ix, self.id()); + if let Some(vote) = gossip.crds.get(&vote) { + match &vote.value.data { + CrdsData::Vote(_, prev_vote) => match prev_vote.slot() { + Some(prev_vote_slot) => prev_vote_slot == vote_slot, + None => { + error!("crds vote with no slots!"); + false + } + }, + _ => panic!("this should not happen!"), + } + } else { + false + } + }) + }; + + // If you don't see a vote with the same slot yet, this means you probably + // restarted, and need to wait for your oldest vote to propagate back to you. + // + // We don't write to an arbitrary index, because it may replace one of this validator's + // existing votes on the network. + if let Some(vote_index) = vote_index { + self.push_vote_at_index(vote, vote_index); + } } - pub fn send_vote(&self, vote: &Transaction) -> Result<()> { - let tpu = self.my_contact_info().tpu; + pub fn send_vote(&self, vote: &Transaction, tpu: Option) -> Result<()> { + let tpu = tpu.unwrap_or_else(|| self.my_contact_info().tpu); let buf = serialize(vote)?; self.socket.send_to(&buf, &tpu)?; Ok(()) } - /// Get votes in the crds - /// * since - The timestamp of when the vote inserted must be greater than - /// since. This allows the bank to query for new votes only. - /// - /// * return - The votes, and the max timestamp from the new set. - pub fn get_votes(&self, since: u64) -> (Vec, Vec, u64) { - let mut max_ts = since; - let (labels, txs): (Vec, Vec) = self + /// Returns votes inserted since the given cursor. + pub fn get_votes(&self, cursor: &mut Cursor) -> (Vec, Vec) { + let (labels, txs): (_, Vec<_>) = self .time_gossip_read_lock("get_votes", &self.stats.get_votes) .crds - .get_votes() - .filter(|vote| vote.insert_timestamp > since) + .get_votes(cursor) .map(|vote| { - max_ts = std::cmp::max(vote.insert_timestamp, max_ts); let transaction = match &vote.value.data { CrdsData::Vote(_, vote) => vote.transaction().clone(), _ => panic!("this should not happen!"), @@ -1180,7 +1120,7 @@ impl ClusterInfo { }) .unzip(); inc_new_counter_info!("cluster_info-get_votes-count", txs.len()); - (labels, txs, max_ts) + (labels, txs) } pub(crate) fn push_duplicate_shred(&self, shred: &Shred, other_payload: &[u8]) -> Result<()> { @@ -1218,52 +1158,15 @@ impl ClusterInfo { .map(map) } - pub fn get_lowest_slot_for_node( - &self, - pubkey: &Pubkey, - since: Option, - map: F, - ) -> Option - where - F: FnOnce(&LowestSlot, u64) -> Y, - { - self.gossip - .read() - .unwrap() - .crds - .get(&CrdsValueLabel::LowestSlot(*pubkey)) - .filter(|x| { - since - .map(|since| x.insert_timestamp > since) - .unwrap_or(true) + pub(crate) fn get_epoch_slots(&self, cursor: &mut Cursor) -> Vec { + let gossip = self.gossip.read().unwrap(); + let entries = gossip.crds.get_epoch_slots(cursor); + entries + .map(|entry| match &entry.value.data { + CrdsData::EpochSlots(_, slots) => slots.clone(), + _ => panic!("this should not happen!"), }) - .map(|x| map(x.value.lowest_slot().unwrap(), x.insert_timestamp)) - } - - pub fn get_epoch_slots_since( - &self, - timestamp: u64, - ) -> ( - Vec, - Option, // Most recent insert timestmap. - ) { - let mut max_ts = 0; - let vals: Vec<_> = self - .gossip - .read() - .unwrap() - .crds - .get_epoch_slots_since(timestamp) - .map(|value| { - max_ts = std::cmp::max(max_ts, value.insert_timestamp); - match &value.value.data { - CrdsData::EpochSlots(_, slots) => slots.clone(), - _ => panic!("this should not happen!"), - } - }) - .collect(); - let max_ts = if vals.is_empty() { None } else { Some(max_ts) }; - (vals, max_ts) + .collect() } pub fn get_node_version(&self, pubkey: &Pubkey) -> Option { @@ -1474,7 +1377,7 @@ impl ClusterInfo { /// We need to avoid having obj locked while doing a io, such as the `send_to` pub fn retransmit_to( peers: &[&ContactInfo], - packet: &mut Packet, + packet: &Packet, s: &UdpSocket, forwarded: bool, ) -> Result<()> { @@ -1516,52 +1419,43 @@ impl ClusterInfo { fn append_entrypoint_to_pulls( &self, thread_pool: &ThreadPool, - pulls: &mut Vec<(Pubkey, CrdsFilter, SocketAddr, CrdsValue)>, + pulls: &mut Vec<(ContactInfo, Vec)>, ) { - let entrypoint_id_and_gossip = { + const THROTTLE_DELAY: u64 = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2; + let entrypoint = { let mut entrypoints = self.entrypoints.write().unwrap(); - if entrypoints.is_empty() { - None - } else { - let i = thread_rng().gen_range(0, entrypoints.len()); - let entrypoint = &mut entrypoints[i]; - - if pulls.is_empty() { - // Nobody else to pull from, try an entrypoint - Some((entrypoint.id, entrypoint.gossip)) - } else { - let now = timestamp(); - if now - entrypoint.wallclock <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2 { - None - } else { - entrypoint.wallclock = now; - if self - .time_gossip_read_lock("entrypoint", &self.stats.entrypoint) - .crds - .get_nodes_contact_info() - .any(|node| node.gossip == entrypoint.gossip) - { - None // Found the entrypoint, no need to pull from it - } else { - Some((entrypoint.id, entrypoint.gossip)) - } - } + let entrypoint = match entrypoints.choose_mut(&mut rand::thread_rng()) { + Some(entrypoint) => entrypoint, + None => return, + }; + if !pulls.is_empty() { + let now = timestamp(); + if now <= entrypoint.wallclock.saturating_add(THROTTLE_DELAY) { + return; + } + entrypoint.wallclock = now; + if self + .time_gossip_read_lock("entrypoint", &self.stats.entrypoint) + .crds + .get_nodes_contact_info() + .any(|node| node.gossip == entrypoint.gossip) + { + return; // Found the entrypoint, no need to pull from it } } + entrypoint.clone() }; - - if let Some((id, gossip)) = entrypoint_id_and_gossip { - let r_gossip = self.time_gossip_read_lock("entrypoint", &self.stats.entrypoint2); - let self_info = r_gossip - .crds - .lookup(&CrdsValueLabel::ContactInfo(self.id())) - .unwrap_or_else(|| panic!("self_id invalid {}", self.id())); - r_gossip - .pull - .build_crds_filters(thread_pool, &r_gossip.crds, MAX_BLOOM_SIZE) - .into_iter() - .for_each(|filter| pulls.push((id, filter, gossip, self_info.clone()))); - } + let filters = match pulls.first() { + Some((_, filters)) => filters.clone(), + None => { + let gossip = self.time_gossip_read_lock("entrypoint", &self.stats.entrypoint2); + gossip + .pull + .build_crds_filters(thread_pool, &gossip.crds, MAX_BLOOM_SIZE) + } + }; + self.stats.pull_from_entrypoint_count.add_relaxed(1); + pulls.push((entrypoint, filters)); } /// Splits an input feed of serializable data into chunks where the sum of @@ -1614,71 +1508,84 @@ impl ClusterInfo { }) } + #[allow(clippy::type_complexity)] fn new_pull_requests( &self, thread_pool: &ThreadPool, gossip_validators: Option<&HashSet>, stakes: &HashMap, - ) -> Vec<(SocketAddr, Protocol)> { + ) -> ( + Vec<(SocketAddr, Ping)>, // Ping packets. + Vec<(SocketAddr, Protocol)>, // Pull requests + ) { let now = timestamp(); + let mut pings = Vec::new(); let mut pulls: Vec<_> = { - let r_gossip = - self.time_gossip_read_lock("new_pull_reqs", &self.stats.new_pull_requests); - r_gossip - .new_pull_request(thread_pool, now, gossip_validators, stakes, MAX_BLOOM_SIZE) - .ok() - .into_iter() - .filter_map(|(peer, filters, me)| { - let peer_label = CrdsValueLabel::ContactInfo(peer); - r_gossip - .crds - .lookup(&peer_label) - .and_then(CrdsValue::contact_info) - .map(move |peer_info| { - filters - .into_iter() - .map(move |f| (peer, f, peer_info.gossip, me.clone())) - }) - }) - .flatten() - .collect() + let gossip = self.time_gossip_read_lock("new_pull_reqs", &self.stats.new_pull_requests); + match gossip.new_pull_request( + thread_pool, + self.keypair.deref(), + now, + gossip_validators, + stakes, + MAX_BLOOM_SIZE, + &self.ping_cache, + &mut pings, + ) { + Err(_) => Vec::default(), + Ok((peer, filters)) => vec![(peer, filters)], + } }; self.append_entrypoint_to_pulls(thread_pool, &mut pulls); - self.stats - .new_pull_requests_count - .add_relaxed(pulls.len() as u64); - // There are at most 2 unique peers here: The randomly - // selected pull peer, and possibly also the entrypoint. - let peers: Vec = pulls.iter().map(|(peer, _, _, _)| *peer).dedup().collect(); + let num_requests = pulls.iter().map(|(_, filters)| filters.len() as u64).sum(); + self.stats.new_pull_requests_count.add_relaxed(num_requests); { let mut gossip = self.time_gossip_write_lock("mark_pull", &self.stats.mark_pull_request); - for peer in peers { - gossip.mark_pull_request_creation_time(&peer, now); + for (peer, _) in &pulls { + gossip.mark_pull_request_creation_time(peer.id, now); } } - pulls + let self_info = CrdsData::ContactInfo(self.my_contact_info()); + let self_info = CrdsValue::new_signed(self_info, &self.keypair); + let pulls = pulls .into_iter() - .map(|(_, filter, gossip, self_info)| { - (gossip, Protocol::PullRequest(filter, self_info)) - }) - .collect() + .flat_map(|(peer, filters)| repeat(peer.gossip).zip(filters)) + .map(|(gossip_addr, filter)| { + let request = Protocol::PullRequest(filter, self_info.clone()); + (gossip_addr, request) + }); + self.stats + .new_pull_requests_pings_count + .add_relaxed(pings.len() as u64); + (pings, pulls.collect()) } - fn drain_push_queue(&self) -> Vec<(CrdsValue, u64)> { - let mut push_queue = self.local_message_pending_push_queue.write().unwrap(); + + fn drain_push_queue(&self) -> Vec { + let mut push_queue = self.local_message_pending_push_queue.lock().unwrap(); std::mem::take(&mut *push_queue) } #[cfg(test)] pub fn flush_push_queue(&self) { let pending_push_messages = self.drain_push_queue(); let mut gossip = self.gossip.write().unwrap(); - gossip.process_push_messages(pending_push_messages); + gossip.process_push_message(&self.id, pending_push_messages, timestamp()); } - fn new_push_requests(&self) -> Vec<(SocketAddr, Protocol)> { + fn new_push_requests( + &self, + stakes: &HashMap, + require_stake_for_gossip: bool, + ) -> Vec<(SocketAddr, Protocol)> { let self_id = self.id(); - let (_, push_messages) = self + let mut push_messages = self .time_gossip_write_lock("new_push_requests", &self.stats.new_push_requests) .new_push_messages(self.drain_push_queue(), timestamp()); + if require_stake_for_gossip { + push_messages.retain(|_, data| { + retain_staked(data, stakes); + !data.is_empty() + }) + } let push_messages: Vec<_> = { let gossip = self.time_gossip_read_lock("push_req_lookup", &self.stats.new_push_requests2); @@ -1710,22 +1617,30 @@ impl ClusterInfo { gossip_validators: Option<&HashSet>, stakes: &HashMap, generate_pull_requests: bool, + require_stake_for_gossip: bool, ) -> Vec<(SocketAddr, Protocol)> { self.trim_crds_table(CRDS_UNIQUE_PUBKEY_CAPACITY, &stakes); - let mut pulls: Vec<_> = if generate_pull_requests { - self.new_pull_requests(&thread_pool, gossip_validators, stakes) - } else { - vec![] - }; - let mut pushes: Vec<_> = self.new_push_requests(); - self.stats - .packets_sent_pull_requests_count - .add_relaxed(pulls.len() as u64); + // This will flush local pending push messages before generating + // pull-request bloom filters, preventing pull responses to return the + // same values back to the node itself. Note that packets will arrive + // and are processed out of order. + let mut out: Vec<_> = self.new_push_requests(stakes, require_stake_for_gossip); self.stats .packets_sent_push_messages_count - .add_relaxed(pushes.len() as u64); - pulls.append(&mut pushes); - pulls + .add_relaxed(out.len() as u64); + if generate_pull_requests { + let (pings, pull_requests) = + self.new_pull_requests(&thread_pool, gossip_validators, stakes); + self.stats + .packets_sent_pull_requests_count + .add_relaxed(pull_requests.len() as u64); + let pings = pings + .into_iter() + .map(|(addr, ping)| (addr, Protocol::PingMessage(ping))); + out.extend(pull_requests); + out.extend(pings); + } + out } /// At random pick a node and try to get updated changes from them @@ -1737,12 +1652,14 @@ impl ClusterInfo { stakes: &HashMap, sender: &PacketSender, generate_pull_requests: bool, + require_stake_for_gossip: bool, ) -> Result<()> { let reqs = self.generate_new_gossip_requests( thread_pool, gossip_validators, - &stakes, + stakes, generate_pull_requests, + require_stake_for_gossip, ); if !reqs.is_empty() { let packets = to_packets_with_destination(recycler.clone(), &reqs); @@ -1754,18 +1671,12 @@ impl ClusterInfo { Ok(()) } - fn process_entrypoints(&self, entrypoints_processed: &mut bool) { - if *entrypoints_processed { - return; - } - + fn process_entrypoints(&self) -> bool { let mut entrypoints = self.entrypoints.write().unwrap(); if entrypoints.is_empty() { // No entrypoint specified. Nothing more to process - *entrypoints_processed = true; - return; + return true; } - for entrypoint in entrypoints.iter_mut() { if entrypoint.id == Pubkey::default() { // If a pull from the entrypoint was successful it should exist in the CRDS table @@ -1794,31 +1705,23 @@ impl ClusterInfo { .set_shred_version(entrypoint.shred_version); } } - - *entrypoints_processed = self.my_shred_version() != 0 + self.my_shred_version() != 0 && entrypoints .iter() - .all(|entrypoint| entrypoint.id != Pubkey::default()); + .all(|entrypoint| entrypoint.id != Pubkey::default()) } fn handle_purge( &self, thread_pool: &ThreadPool, - bank_forks: &Option>>, + bank_forks: Option<&RwLock>, stakes: &HashMap, ) { - let timeout = { - if let Some(ref bank_forks) = bank_forks { - let bank = bank_forks.read().unwrap().working_bank(); - let epoch = bank.epoch(); - let epoch_schedule = bank.epoch_schedule(); - epoch_schedule.get_slots_in_epoch(epoch) * DEFAULT_MS_PER_SLOT - } else { - inc_new_counter_info!("cluster_info-purge-no_working_bank", 1); - CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS - } + let epoch_duration = get_epoch_duration(bank_forks); + let timeouts = { + let gossip = self.gossip.read().unwrap(); + gossip.make_timeouts(stakes, epoch_duration) }; - let timeouts = self.gossip.read().unwrap().make_timeouts(stakes, timeout); let num_purged = self .time_gossip_write_lock("purge", &self.stats.purge) .purge(thread_pool, timestamp(), &timeouts); @@ -1840,20 +1743,15 @@ impl ClusterInfo { .chain(std::iter::once(self.id)) .collect(); let mut gossip = self.gossip.write().unwrap(); - match gossip.crds.trim(cap, &keep, stakes) { + match gossip.crds.trim(cap, &keep, stakes, timestamp()) { Err(err) => { self.stats.trim_crds_table_failed.add_relaxed(1); error!("crds table trim failed: {:?}", err); } - Ok(purged_values) => { + Ok(num_purged) => { self.stats .trim_crds_table_purged_values_count - .add_relaxed(purged_values.len() as u64); - gossip.pull.purged_values.extend( - purged_values - .into_iter() - .map(|v| (v.value_hash, v.local_timestamp)), - ); + .add_relaxed(num_purged as u64); } } } @@ -1911,13 +1809,18 @@ impl ClusterInfo { last_contact_info_save = start; } - let stakes: HashMap<_, _> = match bank_forks { + let (stakes, feature_set) = match bank_forks { Some(ref bank_forks) => { - bank_forks.read().unwrap().root_bank().staked_nodes() + let root_bank = bank_forks.read().unwrap().root_bank(); + ( + root_bank.staked_nodes(), + Some(root_bank.feature_set.clone()), + ) } - None => HashMap::new(), + None => (HashMap::new(), None), }; - + let require_stake_for_gossip = + self.require_stake_for_gossip(feature_set.as_deref(), &stakes); let _ = self.run_gossip( &thread_pool, gossip_validators.as_ref(), @@ -1925,15 +1828,13 @@ impl ClusterInfo { &stakes, &sender, generate_pull_requests, + require_stake_for_gossip, ); if exit.load(Ordering::Relaxed) { return; } - - self.handle_purge(&thread_pool, &bank_forks, &stakes); - - self.process_entrypoints(&mut entrypoints_processed); - + self.handle_purge(&thread_pool, bank_forks.as_deref(), &stakes); + entrypoints_processed = entrypoints_processed || self.process_entrypoints(); //TODO: possibly tune this parameter //we saw a deadlock passing an self.read().unwrap().timeout into sleep if start - last_push > CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2 { @@ -2004,7 +1905,7 @@ impl ClusterInfo { recycler: &PacketsRecycler, stakes: &HashMap, response_sender: &PacketSender, - feature_set: Option<&FeatureSet>, + require_stake_for_gossip: bool, ) { let _st = ScopedTimer::from(&self.stats.handle_batch_pull_requests_time); if requests.is_empty() { @@ -2046,7 +1947,8 @@ impl ClusterInfo { self.stats .pull_requests_count .add_relaxed(requests.len() as u64); - let response = self.handle_pull_requests(recycler, requests, stakes, feature_set); + let response = + self.handle_pull_requests(recycler, requests, stakes, require_stake_for_gossip); if !response.is_empty() { self.stats .packets_sent_pull_responses_count @@ -2078,16 +1980,13 @@ impl ClusterInfo { now: Instant, mut rng: &'a mut R, packets: &'a mut Packets, - feature_set: Option<&FeatureSet>, ) -> impl FnMut(&PullData) -> bool + 'a where R: Rng + CryptoRng, { - let check_enabled = matches!(feature_set, Some(feature_set) if - feature_set.is_active(&feature_set::pull_request_ping_pong_check::id())); let mut cache = HashMap::<(Pubkey, SocketAddr), bool>::new(); let mut pingf = move || Ping::new_rand(&mut rng, &self.keypair).ok(); - let mut ping_cache = self.ping_cache.write().unwrap(); + let mut ping_cache = self.ping_cache.lock().unwrap(); let mut hard_check = move |node| { let (check, ping) = ping_cache.check(now, node, &mut pingf); if let Some(ping) = ping { @@ -2102,7 +2001,7 @@ impl ClusterInfo { .pull_request_ping_pong_check_failed_count .add_relaxed(1) } - check || !check_enabled + check }; // Because pull-responses are sent back to packet.meta.addr() of // incoming pull-requests, pings are also sent to request.from_addr (as @@ -2122,8 +2021,9 @@ impl ClusterInfo { recycler: &PacketsRecycler, requests: Vec, stakes: &HashMap, - feature_set: Option<&FeatureSet>, + require_stake_for_gossip: bool, ) -> Packets { + const DEFAULT_EPOCH_DURATION_MS: u64 = DEFAULT_SLOTS_PER_EPOCH * DEFAULT_MS_PER_SLOT; let mut time = Measure::start("handle_pull_requests"); let callers = crds_value::filter_current(requests.iter().map(|r| &r.caller)); self.time_gossip_write_lock("process_pull_reqs", &self.stats.process_pull_requests) @@ -2134,7 +2034,7 @@ impl ClusterInfo { let (caller_and_filters, addrs): (Vec<_>, Vec<_>) = { let mut rng = rand::thread_rng(); let check_pull_request = - self.check_pull_request(Instant::now(), &mut rng, &mut packets, feature_set); + self.check_pull_request(Instant::now(), &mut rng, &mut packets); requests .into_iter() .filter(check_pull_request) @@ -2144,70 +2044,59 @@ impl ClusterInfo { let now = timestamp(); let self_id = self.id(); - let pull_responses = self + let mut pull_responses = self .time_gossip_read_lock( "generate_pull_responses", &self.stats.generate_pull_responses, ) .generate_pull_responses(&caller_and_filters, output_size_limit, now); - - let pull_responses: Vec<_> = pull_responses - .into_iter() - .zip(addrs.into_iter()) - .filter(|(response, _)| !response.is_empty()) - .collect(); - - if pull_responses.is_empty() { - return packets; + if require_stake_for_gossip { + for resp in &mut pull_responses { + retain_staked(resp, stakes); + } } - - let mut stats: Vec<_> = pull_responses + let (responses, scores): (Vec<_>, Vec<_>) = addrs .iter() - .enumerate() - .map(|(i, (responses, _from_addr))| { - let score: u64 = if stakes.get(&responses[0].pubkey()).is_some() { - 2 + .zip(pull_responses) + .flat_map(|(addr, responses)| repeat(addr).zip(responses)) + .map(|(addr, response)| { + let age = now.saturating_sub(response.wallclock()); + let score = DEFAULT_EPOCH_DURATION_MS + .saturating_sub(age) + .div(CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS) + .max(1); + let score = if stakes.contains_key(&response.pubkey()) { + 2 * score } else { - 1 + score }; - responses - .iter() - .enumerate() - .map(|(j, _response)| ResponseScore { - to: i, - responses_index: j, - score, - }) - .collect::>() + let score = match response.data { + CrdsData::ContactInfo(_) => 2 * score, + _ => score, + }; + ((addr, response), score) }) - .flatten() - .collect(); - - stats.sort_by(|a, b| a.score.cmp(&b.score)); - let weights: Vec<_> = stats.iter().map(|stat| stat.score).collect(); - - let seed = [48u8; 32]; - let rng = &mut ChaChaRng::from_seed(seed); - let weighted_index = WeightedIndex::new(weights).unwrap(); - + .unzip(); + if responses.is_empty() { + return packets; + } + let shuffle = { + let mut seed = [0; 32]; + rand::thread_rng().fill(&mut seed[..]); + weighted_shuffle(&scores, seed).into_iter() + }; let mut total_bytes = 0; - let mut sent = HashSet::new(); - while sent.len() < stats.len() { - let index = weighted_index.sample(rng); - if sent.contains(&index) { - continue; - } - let stat = &stats[index]; - let from_addr = pull_responses[stat.to].1; - let response = pull_responses[stat.to].0[stat.responses_index].clone(); - let protocol = Protocol::PullResponse(self_id, vec![response]); - match Packet::from_data(Some(&from_addr), protocol) { + let mut sent = 0; + for (addr, response) in shuffle.map(|i| &responses[i]) { + let response = vec![response.clone()]; + let response = Protocol::PullResponse(self_id, response); + match Packet::from_data(Some(addr), response) { Err(err) => error!("failed to write pull-response packet: {:?}", err), Ok(packet) => { if self.outbound_budget.take(packet.meta.size) { - sent.insert(index); total_bytes += packet.meta.size; - packets.packets.push(packet) + packets.packets.push(packet); + sent += 1; } else { inc_new_counter_info!("gossip_pull_request-no_budget", 1); break; @@ -2216,16 +2105,14 @@ impl ClusterInfo { } } time.stop(); - inc_new_counter_info!("gossip_pull_request-sent_requests", sent.len()); - inc_new_counter_info!( - "gossip_pull_request-dropped_requests", - stats.len() - sent.len() - ); + let dropped_responses = responses.len() - sent; + inc_new_counter_info!("gossip_pull_request-sent_requests", sent); + inc_new_counter_info!("gossip_pull_request-dropped_requests", dropped_responses); debug!( "handle_pull_requests: {} sent: {} total: {} total_bytes: {}", time, - sent.len(), - stats.len(), + sent, + responses.len(), total_bytes ); packets @@ -2236,7 +2123,7 @@ impl ClusterInfo { responses: Vec<(Pubkey, Vec)>, thread_pool: &ThreadPool, stakes: &HashMap, - epoch_time_ms: u64, + epoch_duration: Duration, ) { let _st = ScopedTimer::from(&self.stats.handle_batch_pull_responses_time); if responses.is_empty() { @@ -2285,11 +2172,10 @@ impl ClusterInfo { .reduce(HashMap::new, merge) }); if !responses.is_empty() { - let timeouts = self - .gossip - .read() - .unwrap() - .make_timeouts(&stakes, epoch_time_ms); + let timeouts = { + let gossip = self.gossip.read().unwrap(); + gossip.make_timeouts(&stakes, epoch_duration) + }; for (from, data) in responses { self.handle_pull_response(&from, data, &timeouts); } @@ -2427,7 +2313,7 @@ impl ClusterInfo { let _st = ScopedTimer::from(&self.stats.handle_batch_pong_messages_time); let mut pongs = pongs.into_iter().peekable(); if pongs.peek().is_some() { - let mut ping_cache = self.ping_cache.write().unwrap(); + let mut ping_cache = self.ping_cache.lock().unwrap(); for (addr, pong) in pongs { ping_cache.add(&pong, addr, now); } @@ -2441,6 +2327,7 @@ impl ClusterInfo { recycler: &PacketsRecycler, stakes: &HashMap, response_sender: &PacketSender, + require_stake_for_gossip: bool, ) { let _st = ScopedTimer::from(&self.stats.handle_batch_push_messages_time); if messages.is_empty() { @@ -2487,8 +2374,8 @@ impl ClusterInfo { self.stats .skip_push_message_shred_version .add_relaxed(num_crds_values - num_filtered_crds_values); - // Update crds values and obtain updated keys. - let updated_labels: Vec<_> = { + // Origins' pubkeys of upserted crds values. + let origins: HashSet<_> = { let mut gossip = self.time_gossip_write_lock("process_push", &self.stats.process_push_message); let now = timestamp(); @@ -2497,17 +2384,16 @@ impl ClusterInfo { .flat_map(|(from, crds_values)| { gossip.process_push_message(&from, crds_values, now) }) - .map(|v| v.value.label()) .collect() }; // Generate prune messages. let prunes = self .time_gossip_write_lock("prune_received_cache", &self.stats.prune_received_cache) - .prune_received_cache(updated_labels, stakes); + .prune_received_cache(origins, stakes); let prunes: Vec<(Pubkey /*from*/, Vec /*origins*/)> = prunes .into_iter() .flat_map(|(from, prunes)| { - std::iter::repeat(from).zip( + repeat(from).zip( prunes .into_iter() .chunks(MAX_PRUNE_DATA_NODES) @@ -2550,7 +2436,7 @@ impl ClusterInfo { self.stats .push_response_count .add_relaxed(packets.packets.len() as u64); - let new_push_requests = self.new_push_requests(); + let new_push_requests = self.new_push_requests(stakes, require_stake_for_gossip); inc_new_counter_debug!("cluster_info-push_message-pushes", new_push_requests.len()); for (address, request) in new_push_requests { if ContactInfo::is_valid_address(&address) { @@ -2571,24 +2457,29 @@ impl ClusterInfo { let _ = response_sender.send(packets); } - fn get_stakes_and_epoch_time( - bank_forks: Option<&Arc>>, - ) -> ( - HashMap, // staked nodes - u64, // epoch time ms - ) { - match bank_forks { - Some(ref bank_forks) => { - let bank = bank_forks.read().unwrap().root_bank(); - let epoch = bank.epoch(); - ( - bank.staked_nodes(), - bank.get_slots_in_epoch(epoch) * DEFAULT_MS_PER_SLOT, - ) - } + fn require_stake_for_gossip( + &self, + feature_set: Option<&FeatureSet>, + stakes: &HashMap, + ) -> bool { + match feature_set { None => { - inc_new_counter_info!("cluster_info-purge-no_working_bank", 1); - (HashMap::new(), CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS) + self.stats + .require_stake_for_gossip_unknown_feature_set + .add_relaxed(1); + false + } + Some(feature_set) => { + if !feature_set.is_active(&feature_set::require_stake_for_gossip::id()) { + false + } else if stakes.len() < MIN_NUM_STAKED_NODES { + self.stats + .require_stake_for_gossip_unknown_stakes + .add_relaxed(1); + false + } else { + true + } } } } @@ -2599,9 +2490,9 @@ impl ClusterInfo { thread_pool: &ThreadPool, recycler: &PacketsRecycler, response_sender: &PacketSender, - stakes: HashMap, + stakes: &HashMap, feature_set: Option<&FeatureSet>, - epoch_time_ms: u64, + epoch_duration: Duration, should_check_duplicate_instance: bool, ) -> Result<()> { let _st = ScopedTimer::from(&self.stats.process_gossip_packets_time); @@ -2672,25 +2563,37 @@ impl ClusterInfo { self.stats .packets_received_prune_messages_count .add_relaxed(prune_messages.len() as u64); + let require_stake_for_gossip = self.require_stake_for_gossip(feature_set, stakes); + if require_stake_for_gossip { + for (_, data) in &mut pull_responses { + retain_staked(data, stakes); + } + for (_, data) in &mut push_messages { + retain_staked(data, stakes); + } + pull_responses.retain(|(_, data)| !data.is_empty()); + push_messages.retain(|(_, data)| !data.is_empty()); + } self.handle_batch_ping_messages(ping_messages, recycler, response_sender); self.handle_batch_prune_messages(prune_messages); self.handle_batch_push_messages( push_messages, thread_pool, recycler, - &stakes, + stakes, response_sender, + require_stake_for_gossip, ); - self.handle_batch_pull_responses(pull_responses, thread_pool, &stakes, epoch_time_ms); - self.trim_crds_table(CRDS_UNIQUE_PUBKEY_CAPACITY, &stakes); + self.handle_batch_pull_responses(pull_responses, thread_pool, stakes, epoch_duration); + self.trim_crds_table(CRDS_UNIQUE_PUBKEY_CAPACITY, stakes); self.handle_batch_pong_messages(pong_messages, Instant::now()); self.handle_batch_pull_requests( pull_requests, thread_pool, recycler, - &stakes, + stakes, response_sender, - feature_set, + require_stake_for_gossip, ); Ok(()) } @@ -2699,7 +2602,7 @@ impl ClusterInfo { fn run_listen( &self, recycler: &PacketsRecycler, - bank_forks: Option<&Arc>>, + bank_forks: Option<&RwLock>, requests_receiver: &PacketReceiver, response_sender: &PacketSender, thread_pool: &ThreadPool, @@ -2707,6 +2610,7 @@ impl ClusterInfo { should_check_duplicate_instance: bool, ) -> Result<()> { const RECV_TIMEOUT: Duration = Duration::from_secs(1); + const SUBMIT_GOSSIP_STATS_INTERVAL: Duration = Duration::from_secs(2); let packets: Vec<_> = requests_receiver.recv_timeout(RECV_TIMEOUT)?.packets.into(); let mut packets = VecDeque::from(packets); while let Ok(packet) = requests_receiver.try_recv() { @@ -2719,330 +2623,32 @@ impl ClusterInfo { .add_relaxed(excess_count as u64); } } - let (stakes, epoch_time_ms) = Self::get_stakes_and_epoch_time(bank_forks); // Using root_bank instead of working_bank here so that an enbaled // feature does not roll back (if the feature happens to get enabled in // a minority fork). - let feature_set = bank_forks.map(|bank_forks| { - bank_forks - .read() - .unwrap() - .root_bank() - .deref() - .feature_set - .clone() - }); + let (feature_set, stakes) = match bank_forks { + None => (None, HashMap::default()), + Some(bank_forks) => { + let bank = bank_forks.read().unwrap().root_bank(); + let feature_set = bank.feature_set.clone(); + (Some(feature_set), bank.staked_nodes()) + } + }; self.process_packets( packets, thread_pool, recycler, response_sender, - stakes, + &stakes, feature_set.as_deref(), - epoch_time_ms, + get_epoch_duration(bank_forks), should_check_duplicate_instance, )?; - - self.print_reset_stats(last_print); - - Ok(()) - } - - fn print_reset_stats(&self, last_print: &mut Instant) { - if last_print.elapsed().as_millis() > 2000 { - let (table_size, purged_values_size, failed_inserts_size) = { - let r_gossip = self.gossip.read().unwrap(); - ( - r_gossip.crds.len(), - r_gossip.pull.purged_values.len(), - r_gossip.pull.failed_inserts.len(), - ) - }; - datapoint_info!( - "cluster_info_stats", - ("entrypoint", self.stats.entrypoint.clear(), i64), - ("entrypoint2", self.stats.entrypoint2.clear(), i64), - ("push_vote_read", self.stats.push_vote_read.clear(), i64), - ("get_votes", self.stats.get_votes.clear(), i64), - ( - "get_accounts_hash", - self.stats.get_accounts_hash.clear(), - i64 - ), - ("all_tvu_peers", self.stats.all_tvu_peers.clear(), i64), - ("tvu_peers", self.stats.tvu_peers.clear(), i64), - ( - "new_push_requests_num", - self.stats.new_push_requests_num.clear(), - i64 - ), - ("table_size", table_size as i64, i64), - ("purged_values_size", purged_values_size as i64, i64), - ("failed_inserts_size", failed_inserts_size as i64, i64), - ); - datapoint_info!( - "cluster_info_stats2", - ( - "gossip_packets_dropped_count", - self.stats.gossip_packets_dropped_count.clear(), - i64 - ), - ("repair_peers", self.stats.repair_peers.clear(), i64), - ( - "new_push_requests", - self.stats.new_push_requests.clear(), - i64 - ), - ( - "new_push_requests2", - self.stats.new_push_requests2.clear(), - i64 - ), - ("purge", self.stats.purge.clear(), i64), - ( - "process_gossip_packets_time", - self.stats.process_gossip_packets_time.clear(), - i64 - ), - ( - "handle_batch_ping_messages_time", - self.stats.handle_batch_ping_messages_time.clear(), - i64 - ), - ( - "handle_batch_pong_messages_time", - self.stats.handle_batch_pong_messages_time.clear(), - i64 - ), - ( - "handle_batch_prune_messages_time", - self.stats.handle_batch_prune_messages_time.clear(), - i64 - ), - ( - "handle_batch_pull_requests_time", - self.stats.handle_batch_pull_requests_time.clear(), - i64 - ), - ( - "handle_batch_pull_responses_time", - self.stats.handle_batch_pull_responses_time.clear(), - i64 - ), - ( - "handle_batch_push_messages_time", - self.stats.handle_batch_push_messages_time.clear(), - i64 - ), - ( - "process_pull_resp", - self.stats.process_pull_response.clear(), - i64 - ), - ( - "filter_pull_resp", - self.stats.filter_pull_response.clear(), - i64 - ), - ( - "process_pull_resp_count", - self.stats.process_pull_response_count.clear(), - i64 - ), - ( - "pull_response_fail_insert", - self.stats.process_pull_response_fail_insert.clear(), - i64 - ), - ( - "pull_response_fail_timeout", - self.stats.process_pull_response_fail_timeout.clear(), - i64 - ), - ( - "pull_response_success", - self.stats.process_pull_response_success.clear(), - i64 - ), - ( - "process_pull_resp_timeout", - self.stats.process_pull_response_timeout.clear(), - i64 - ), - ( - "push_response_count", - self.stats.push_response_count.clear(), - i64 - ), - ); - datapoint_info!( - "cluster_info_stats3", - ( - "process_pull_resp_len", - self.stats.process_pull_response_len.clear(), - i64 - ), - ( - "process_pull_requests", - self.stats.process_pull_requests.clear(), - i64 - ), - ( - "pull_request_ping_pong_check_failed_count", - self.stats.pull_request_ping_pong_check_failed_count.clear(), - i64 - ), - ( - "generate_pull_responses", - self.stats.generate_pull_responses.clear(), - i64 - ), - ("process_prune", self.stats.process_prune.clear(), i64), - ( - "process_push_message", - self.stats.process_push_message.clear(), - i64 - ), - ( - "prune_received_cache", - self.stats.prune_received_cache.clear(), - i64 - ), - ( - "epoch_slots_lookup", - self.stats.epoch_slots_lookup.clear(), - i64 - ), - ( - "new_pull_requests", - self.stats.new_pull_requests.clear(), - i64 - ), - ( - "mark_pull_request", - self.stats.mark_pull_request.clear(), - i64 - ), - ); - datapoint_info!( - "cluster_info_stats4", - ( - "skip_push_message_shred_version", - self.stats.skip_push_message_shred_version.clear(), - i64 - ), - ( - "skip_pull_response_shred_version", - self.stats.skip_pull_response_shred_version.clear(), - i64 - ), - ( - "skip_pull_shred_version", - self.stats.skip_pull_shred_version.clear(), - i64 - ), - ( - "push_message_count", - self.stats.push_message_count.clear(), - i64 - ), - ( - "push_message_value_count", - self.stats.push_message_value_count.clear(), - i64 - ), - ( - "new_pull_requests_count", - self.stats.new_pull_requests_count.clear(), - i64 - ), - ( - "prune_message_count", - self.stats.prune_message_count.clear(), - i64 - ), - ( - "prune_message_len", - self.stats.prune_message_len.clear(), - i64 - ), - ); - datapoint_info!( - "cluster_info_stats5", - ( - "pull_requests_count", - self.stats.pull_requests_count.clear(), - i64 - ), - ( - "packets_received_count", - self.stats.packets_received_count.clear(), - i64 - ), - ( - "packets_received_prune_messages_count", - self.stats.packets_received_prune_messages_count.clear(), - i64 - ), - ( - "packets_received_pull_requests_count", - self.stats.packets_received_pull_requests_count.clear(), - i64 - ), - ( - "packets_received_pull_responses_count", - self.stats.packets_received_pull_responses_count.clear(), - i64 - ), - ( - "packets_received_push_messages_count", - self.stats.packets_received_push_messages_count.clear(), - i64 - ), - ( - "packets_received_verified_count", - self.stats.packets_received_verified_count.clear(), - i64 - ), - ( - "packets_sent_gossip_requests_count", - self.stats.packets_sent_gossip_requests_count.clear(), - i64 - ), - ( - "packets_sent_prune_messages_count", - self.stats.packets_sent_prune_messages_count.clear(), - i64 - ), - ( - "packets_sent_pull_requests_count", - self.stats.packets_sent_pull_requests_count.clear(), - i64 - ), - ( - "packets_sent_pull_responses_count", - self.stats.packets_sent_pull_responses_count.clear(), - i64 - ), - ( - "packets_sent_push_messages_count", - self.stats.packets_sent_push_messages_count.clear(), - i64 - ), - ( - "trim_crds_table_failed", - self.stats.trim_crds_table_failed.clear(), - i64 - ), - ( - "trim_crds_table_purged_values_count", - self.stats.trim_crds_table_purged_values_count.clear(), - i64 - ), - ); - + if last_print.elapsed() > SUBMIT_GOSSIP_STATS_INTERVAL { + submit_gossip_stats(&self.stats, &self.gossip, &stakes); *last_print = Instant::now(); } + Ok(()) } pub fn listen( @@ -3068,7 +2674,7 @@ impl ClusterInfo { while !exit.load(Ordering::Relaxed) { if let Err(err) = self.run_listen( &recycler, - bank_forks.as_ref(), + bank_forks.as_deref(), &requests_receiver, &response_sender, &thread_pool, @@ -3141,6 +2747,23 @@ impl ClusterInfo { } } +// Returns root bank's epoch duration. Falls back on +// DEFAULT_SLOTS_PER_EPOCH * DEFAULT_MS_PER_SLOT +// if there are no working banks. +fn get_epoch_duration(bank_forks: Option<&RwLock>) -> Duration { + let num_slots = match bank_forks { + None => { + inc_new_counter_info!("cluster_info-purge-no_working_bank", 1); + DEFAULT_SLOTS_PER_EPOCH + } + Some(bank_forks) => { + let bank = bank_forks.read().unwrap().root_bank(); + bank.get_slots_in_epoch(bank.epoch()) + } + }; + Duration::from_millis(num_slots * DEFAULT_MS_PER_SLOT) +} + /// Turbine logic /// 1 - For the current node find out if it is in layer 1 /// 1.1 - If yes, then broadcast to all layer 1 nodes @@ -3251,6 +2874,7 @@ impl Node { }, } } + fn get_gossip_port( gossip_addr: &SocketAddr, port_range: PortRange, @@ -3271,6 +2895,60 @@ impl Node { bind_in_range(bind_ip_addr, port_range).expect("Failed to bind") } + pub fn new_single_bind( + pubkey: &Pubkey, + gossip_addr: &SocketAddr, + port_range: PortRange, + bind_ip_addr: IpAddr, + ) -> Self { + let (gossip_port, (gossip, ip_echo)) = + Self::get_gossip_port(gossip_addr, port_range, bind_ip_addr); + let (tvu_port, tvu) = Self::bind(bind_ip_addr, port_range); + let (tvu_forwards_port, tvu_forwards) = Self::bind(bind_ip_addr, port_range); + let (tpu_port, tpu) = Self::bind(bind_ip_addr, port_range); + let (tpu_forwards_port, tpu_forwards) = Self::bind(bind_ip_addr, port_range); + let (_, retransmit_socket) = Self::bind(bind_ip_addr, port_range); + let (repair_port, repair) = Self::bind(bind_ip_addr, port_range); + let (serve_repair_port, serve_repair) = Self::bind(bind_ip_addr, port_range); + let (_, broadcast) = Self::bind(bind_ip_addr, port_range); + + let rpc_port = find_available_port_in_range(bind_ip_addr, port_range).unwrap(); + let rpc_pubsub_port = find_available_port_in_range(bind_ip_addr, port_range).unwrap(); + + let info = ContactInfo { + id: *pubkey, + gossip: SocketAddr::new(gossip_addr.ip(), gossip_port), + tvu: SocketAddr::new(gossip_addr.ip(), tvu_port), + tvu_forwards: SocketAddr::new(gossip_addr.ip(), tvu_forwards_port), + repair: SocketAddr::new(gossip_addr.ip(), repair_port), + tpu: SocketAddr::new(gossip_addr.ip(), tpu_port), + tpu_forwards: SocketAddr::new(gossip_addr.ip(), tpu_forwards_port), + unused: socketaddr_any!(), + rpc: SocketAddr::new(gossip_addr.ip(), rpc_port), + rpc_pubsub: SocketAddr::new(gossip_addr.ip(), rpc_pubsub_port), + serve_repair: SocketAddr::new(gossip_addr.ip(), serve_repair_port), + wallclock: timestamp(), + shred_version: 0, + }; + trace!("new ContactInfo: {:?}", info); + + Node { + info, + sockets: Sockets { + gossip, + ip_echo: Some(ip_echo), + tvu: vec![tvu], + tvu_forwards: vec![tvu_forwards], + tpu: vec![tpu], + tpu_forwards: vec![tpu_forwards], + broadcast: vec![broadcast], + repair, + retransmit_sockets: vec![retransmit_socket], + serve_repair, + }, + } + } + pub fn new_with_external_ip( pubkey: &Pubkey, gossip_addr: &SocketAddr, @@ -3348,12 +3026,13 @@ pub fn stake_weight_peers( mod tests { use super::*; use crate::{ + crds_gossip_pull::tests::MIN_NUM_BLOOM_FILTERS, crds_value::{CrdsValue, CrdsValueLabel, Vote as CrdsVote}, duplicate_shred::{self, tests::new_rand_shred, MAX_DUPLICATE_SHREDS}, }; use itertools::izip; - use rand::seq::SliceRandom; - use serial_test::serial; + use rand::{seq::SliceRandom, SeedableRng}; + use rand_chacha::ChaChaRng; use solana_ledger::shred::Shredder; use solana_sdk::signature::{Keypair, Signer}; use solana_vote_program::{vote_instruction, vote_state::Vote}; @@ -3441,7 +3120,7 @@ mod tests { .take(128) .collect(); let pings: Vec<_> = { - let mut ping_cache = cluster_info.ping_cache.write().unwrap(); + let mut ping_cache = cluster_info.ping_cache.lock().unwrap(); let mut pingf = || Ping::new_rand(&mut rng, &this_node).ok(); remote_nodes .iter() @@ -3464,7 +3143,7 @@ mod tests { cluster_info.handle_batch_pong_messages(pongs, now); // Assert that remote nodes now pass the ping/pong check. { - let mut ping_cache = cluster_info.ping_cache.write().unwrap(); + let mut ping_cache = cluster_info.ping_cache.lock().unwrap(); for (keypair, socket) in &remote_nodes { let node = (keypair.pubkey(), *socket); let (check, _) = ping_cache.check(now, node, || -> Option { None }); @@ -3473,7 +3152,7 @@ mod tests { } // Assert that a new random remote node still will not pass the check. { - let mut ping_cache = cluster_info.ping_cache.write().unwrap(); + let mut ping_cache = cluster_info.ping_cache.lock().unwrap(); let (keypair, socket) = new_rand_remote_node(&mut rng); let node = (keypair.pubkey(), socket); let (check, _) = ping_cache.check(now, node, || -> Option { None }); @@ -3646,17 +3325,9 @@ mod tests { let mut rng = rand::thread_rng(); let leader = Arc::new(Keypair::new()); let keypair = Keypair::new(); - let (slot, parent_slot, fec_rate, reference_tick, version) = - (53084024, 53084023, 0.0, 0, 0); - let shredder = Shredder::new( - slot, - parent_slot, - fec_rate, - leader.clone(), - reference_tick, - version, - ) - .unwrap(); + let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0); + let shredder = + Shredder::new(slot, parent_slot, leader.clone(), reference_tick, version).unwrap(); let next_shred_index = rng.gen(); let shred = new_rand_shred(&mut rng, next_shred_index, &shredder); let other_payload = new_rand_shred(&mut rng, next_shred_index, &shredder).payload; @@ -3716,8 +3387,13 @@ mod tests { .write() .unwrap() .refresh_push_active_set(&HashMap::new(), None); - let reqs = - cluster_info.generate_new_gossip_requests(&thread_pool, None, &HashMap::new(), true); + let reqs = cluster_info.generate_new_gossip_requests( + &thread_pool, + None, // gossip_validators + &HashMap::new(), + true, // generate_pull_requests + false, // require_stake_for_gossip + ); //assert none of the addrs are invalid. reqs.iter().all(|(addr, _)| { let res = ContactInfo::is_valid_address(addr); @@ -3740,13 +3416,8 @@ mod tests { let d = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp()); let label = CrdsValueLabel::ContactInfo(d.id); cluster_info.insert_info(d); - assert!(cluster_info - .gossip - .read() - .unwrap() - .crds - .lookup(&label) - .is_some()); + let gossip = cluster_info.gossip.read().unwrap(); + assert!(gossip.crds.get(&label).is_some()); } fn assert_in_range(x: u16, range: (u16, u16)) { @@ -3821,6 +3492,11 @@ mod tests { let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0); let peer = ContactInfo::new_localhost(&peer_keypair.pubkey(), 0); let cluster_info = ClusterInfo::new(contact_info, Arc::new(keypair)); + cluster_info + .ping_cache + .lock() + .unwrap() + .mock_pong(peer.id, peer.gossip, Instant::now()); cluster_info.insert_info(peer); cluster_info .gossip @@ -3828,7 +3504,7 @@ mod tests { .unwrap() .refresh_push_active_set(&HashMap::new(), None); //check that all types of gossip messages are signed correctly - let (_, push_messages) = cluster_info + let push_messages = cluster_info .gossip .write() .unwrap() @@ -3839,20 +3515,114 @@ mod tests { .values() .for_each(|v| v.par_iter().for_each(|v| assert!(v.verify()))); - let (_, _, val) = cluster_info + let mut pings = Vec::new(); + cluster_info .gossip .write() .unwrap() .new_pull_request( &thread_pool, + cluster_info.keypair.deref(), timestamp(), None, &HashMap::new(), MAX_BLOOM_SIZE, + &cluster_info.ping_cache, + &mut pings, ) .ok() .unwrap(); - assert!(val.verify()); + } + + #[test] + fn test_refresh_vote() { + let keys = Keypair::new(); + let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0); + let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info); + + // Construct and push a vote for some other slot + let unrefresh_slot = 5; + let unrefresh_tower = vec![1, 3, unrefresh_slot]; + let unrefresh_vote = Vote::new(unrefresh_tower.clone(), Hash::new_unique()); + let unrefresh_ix = vote_instruction::vote( + &Pubkey::new_unique(), // vote_pubkey + &Pubkey::new_unique(), // authorized_voter_pubkey + unrefresh_vote, + ); + let unrefresh_tx = Transaction::new_with_payer( + &[unrefresh_ix], // instructions + None, // payer + ); + cluster_info.push_vote(&unrefresh_tower, unrefresh_tx.clone()); + cluster_info.flush_push_queue(); + let mut cursor = Cursor::default(); + let (_, votes) = cluster_info.get_votes(&mut cursor); + assert_eq!(votes, vec![unrefresh_tx.clone()]); + + // Now construct vote for the slot to be refreshed later + let refresh_slot = 7; + let refresh_tower = vec![1, 3, unrefresh_slot, refresh_slot]; + let refresh_vote = Vote::new(refresh_tower.clone(), Hash::new_unique()); + let refresh_ix = vote_instruction::vote( + &Pubkey::new_unique(), // vote_pubkey + &Pubkey::new_unique(), // authorized_voter_pubkey + refresh_vote.clone(), + ); + let refresh_tx = Transaction::new_with_payer( + &[refresh_ix], // instructions + None, // payer + ); + + // Trying to refresh vote when it doesn't yet exist in gossip + // shouldn't add the vote + cluster_info.refresh_vote(refresh_tx.clone(), refresh_slot); + cluster_info.flush_push_queue(); + let (_, votes) = cluster_info.get_votes(&mut cursor); + assert_eq!(votes, vec![]); + let (_, votes) = cluster_info.get_votes(&mut Cursor::default()); + assert_eq!(votes.len(), 1); + assert!(votes.contains(&unrefresh_tx)); + + // Push the new vote for `refresh_slot` + cluster_info.push_vote(&refresh_tower, refresh_tx.clone()); + cluster_info.flush_push_queue(); + + // Should be two votes in gossip + let (_, votes) = cluster_info.get_votes(&mut Cursor::default()); + assert_eq!(votes.len(), 2); + assert!(votes.contains(&unrefresh_tx)); + assert!(votes.contains(&refresh_tx)); + + // Refresh a few times, we should only have the latest update + let mut latest_refresh_tx = refresh_tx; + for _ in 0..10 { + let latest_refreshed_recent_blockhash = Hash::new_unique(); + let new_signer = Keypair::new(); + let refresh_ix = vote_instruction::vote( + &new_signer.pubkey(), // vote_pubkey + &new_signer.pubkey(), // authorized_voter_pubkey + refresh_vote.clone(), + ); + latest_refresh_tx = Transaction::new_signed_with_payer( + &[refresh_ix], + None, + &[&new_signer], + latest_refreshed_recent_blockhash, + ); + cluster_info.refresh_vote(latest_refresh_tx.clone(), refresh_slot); + } + cluster_info.flush_push_queue(); + + // The diff since `max_ts` should only be the latest refreshed vote + let (_, votes) = cluster_info.get_votes(&mut cursor); + assert_eq!(votes.len(), 1); + assert_eq!(votes[0], latest_refresh_tx); + + // Should still be two votes in gossip + let (_, votes) = cluster_info.get_votes(&mut Cursor::default()); + assert_eq!(votes.len(), 2); + assert!(votes.contains(&unrefresh_tx)); + assert!(votes.contains(&latest_refresh_tx)); } #[test] @@ -3863,10 +3633,9 @@ mod tests { let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info); // make sure empty crds is handled correctly - let now = timestamp(); - let (_, votes, max_ts) = cluster_info.get_votes(now); + let mut cursor = Cursor::default(); + let (_, votes) = cluster_info.get_votes(&mut cursor); assert_eq!(votes, vec![]); - assert_eq!(max_ts, now); // add a vote let vote = Vote::new( @@ -3886,8 +3655,7 @@ mod tests { cluster_info.push_vote(&tower, tx.clone()); cluster_info.flush_push_queue(); - // -1 to make sure that the clock is strictly lower then when insert occurred - let (labels, votes, max_ts) = cluster_info.get_votes(now - 1); + let (labels, votes) = cluster_info.get_votes(&mut cursor); assert_eq!(votes, vec![tx]); assert_eq!(labels.len(), 1); match labels[0] { @@ -3897,12 +3665,83 @@ mod tests { _ => panic!("Bad match"), } - assert!(max_ts >= now - 1); - // make sure timestamp filter works - let (_, votes, new_max_ts) = cluster_info.get_votes(max_ts); + let (_, votes) = cluster_info.get_votes(&mut cursor); assert_eq!(votes, vec![]); - assert_eq!(max_ts, new_max_ts); + } + + fn new_vote_transaction(rng: &mut R, slots: Vec) -> Transaction { + let vote = Vote::new(slots, solana_sdk::hash::new_rand(rng)); + let ix = vote_instruction::vote( + &Pubkey::new_unique(), // vote_pubkey + &Pubkey::new_unique(), // authorized_voter_pubkey + vote, + ); + Transaction::new_with_payer( + &[ix], // instructions + None, // payer + ) + } + + #[test] + fn test_push_votes_with_tower() { + let get_vote_slots = |cluster_info: &ClusterInfo| -> Vec { + let (labels, _) = cluster_info.get_votes(&mut Cursor::default()); + let gossip = cluster_info.gossip.read().unwrap(); + let mut vote_slots = HashSet::new(); + for label in labels { + match &gossip.crds.get(&label).unwrap().value.data { + CrdsData::Vote(_, vote) => { + assert!(vote_slots.insert(vote.slot().unwrap())); + } + _ => panic!("this should not happen!"), + } + } + vote_slots.into_iter().collect() + }; + let mut rng = rand::thread_rng(); + let keys = Keypair::new(); + let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0); + let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info); + let mut tower = Vec::new(); + for k in 0..MAX_LOCKOUT_HISTORY { + let slot = k as Slot; + tower.push(slot); + let vote = new_vote_transaction(&mut rng, vec![slot]); + cluster_info.push_vote(&tower, vote); + } + let vote_slots = get_vote_slots(&cluster_info); + assert_eq!(vote_slots.len(), MAX_LOCKOUT_HISTORY); + for vote_slot in vote_slots { + assert!(vote_slot < MAX_LOCKOUT_HISTORY as u64); + } + // Push a new vote evicting one. + let slot = MAX_LOCKOUT_HISTORY as Slot; + tower.push(slot); + tower.remove(23); + let vote = new_vote_transaction(&mut rng, vec![slot]); + cluster_info.push_vote(&tower, vote); + let vote_slots = get_vote_slots(&cluster_info); + assert_eq!(vote_slots.len(), MAX_LOCKOUT_HISTORY); + for vote_slot in vote_slots { + assert!(vote_slot <= slot); + assert!(vote_slot != 23); + } + // Push a new vote evicting two. + // Older one should be evicted from the crds table. + let slot = slot + 1; + tower.push(slot); + tower.remove(17); + tower.remove(5); + let vote = new_vote_transaction(&mut rng, vec![slot]); + cluster_info.push_vote(&tower, vote); + let vote_slots = get_vote_slots(&cluster_info); + assert_eq!(vote_slots.len(), MAX_LOCKOUT_HISTORY); + for vote_slot in vote_slots { + assert!(vote_slot <= slot); + assert!(vote_slot != 23); + assert!(vote_slot != 5); + } } fn new_vote_transaction(rng: &mut R, slots: Vec) -> Transaction { @@ -3985,23 +3824,17 @@ mod tests { let keys = Keypair::new(); let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0); let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info); - let (slots, since) = cluster_info.get_epoch_slots_since(0); + let slots = cluster_info.get_epoch_slots(&mut Cursor::default()); assert!(slots.is_empty()); - assert!(since.is_none()); cluster_info.push_epoch_slots(&[0]); cluster_info.flush_push_queue(); - let (slots, since) = cluster_info.get_epoch_slots_since(std::u64::MAX); - assert!(slots.is_empty()); - assert_eq!(since, None); - - let (slots, since) = cluster_info.get_epoch_slots_since(0); + let mut cursor = Cursor::default(); + let slots = cluster_info.get_epoch_slots(&mut cursor); assert_eq!(slots.len(), 1); - assert!(since.is_some()); - let (slots, since2) = cluster_info.get_epoch_slots_since(since.unwrap() + 1); + let slots = cluster_info.get_epoch_slots(&mut cursor); assert!(slots.is_empty()); - assert_eq!(since2, None); } #[test] @@ -4015,35 +3848,39 @@ mod tests { let entrypoint_pubkey = solana_sdk::pubkey::new_rand(); let entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp()); cluster_info.set_entrypoint(entrypoint.clone()); - let pulls = cluster_info.new_pull_requests(&thread_pool, None, &HashMap::new()); - assert_eq!(1, pulls.len() as u64); - match pulls.get(0) { - Some((addr, msg)) => { - assert_eq!(*addr, entrypoint.gossip); - match msg { - Protocol::PullRequest(_, value) => { - assert!(value.verify()); - assert_eq!(value.pubkey(), cluster_info.id()) - } - _ => panic!("wrong protocol"), + let (pings, pulls) = cluster_info.new_pull_requests(&thread_pool, None, &HashMap::new()); + assert!(pings.is_empty()); + assert_eq!(pulls.len(), MIN_NUM_BLOOM_FILTERS); + for (addr, msg) in pulls { + assert_eq!(addr, entrypoint.gossip); + match msg { + Protocol::PullRequest(_, value) => { + assert!(value.verify()); + assert_eq!(value.pubkey(), cluster_info.id()) } + _ => panic!("wrong protocol"), } - None => panic!("entrypoint should be a pull destination"), } - // now add this message back to the table and make sure after the next pull, the entrypoint is unset let entrypoint_crdsvalue = CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint.clone())); let cluster_info = Arc::new(cluster_info); - let timeouts = cluster_info.gossip.read().unwrap().make_timeouts_test(); + let timeouts = { + let gossip = cluster_info.gossip.read().unwrap(); + gossip.make_timeouts( + &HashMap::default(), // stakes, + Duration::from_millis(gossip.pull.crds_timeout), + ) + }; ClusterInfo::handle_pull_response( &cluster_info, &entrypoint_pubkey, vec![entrypoint_crdsvalue], &timeouts, ); - let pulls = cluster_info.new_pull_requests(&thread_pool, None, &HashMap::new()); - assert_eq!(1, pulls.len() as u64); + let (pings, pulls) = cluster_info.new_pull_requests(&thread_pool, None, &HashMap::new()); + assert_eq!(pings.len(), 1); + assert_eq!(pulls.len(), MIN_NUM_BLOOM_FILTERS); assert_eq!(*cluster_info.entrypoints.read().unwrap(), vec![entrypoint]); } @@ -4066,7 +3903,7 @@ mod tests { fn test_split_gossip_messages() { const NUM_CRDS_VALUES: usize = 2048; let mut rng = rand::thread_rng(); - let values: Vec<_> = std::iter::repeat_with(|| CrdsValue::new_rand(&mut rng, None)) + let values: Vec<_> = repeat_with(|| CrdsValue::new_rand(&mut rng, None)) .take(NUM_CRDS_VALUES) .collect(); let splits: Vec<_> = @@ -4218,28 +4055,42 @@ mod tests { let other_node_pubkey = solana_sdk::pubkey::new_rand(); let other_node = ContactInfo::new_localhost(&other_node_pubkey, timestamp()); assert_ne!(other_node.gossip, entrypoint.gossip); + cluster_info.ping_cache.lock().unwrap().mock_pong( + other_node.id, + other_node.gossip, + Instant::now(), + ); cluster_info.insert_info(other_node.clone()); stakes.insert(other_node_pubkey, 10); // Pull request 1: `other_node` is present but `entrypoint` was just added (so it has a // fresh timestamp). There should only be one pull request to `other_node` - let pulls = cluster_info.new_pull_requests(&thread_pool, None, &stakes); - assert_eq!(1, pulls.len() as u64); - assert_eq!(pulls.get(0).unwrap().0, other_node.gossip); + let (pings, pulls) = cluster_info.new_pull_requests(&thread_pool, None, &stakes); + assert!(pings.is_empty()); + assert_eq!(pulls.len(), MIN_NUM_BLOOM_FILTERS); + assert!(pulls.into_iter().all(|(addr, _)| addr == other_node.gossip)); // Pull request 2: pretend it's been a while since we've pulled from `entrypoint`. There should // now be two pull requests cluster_info.entrypoints.write().unwrap()[0].wallclock = 0; - let pulls = cluster_info.new_pull_requests(&thread_pool, None, &stakes); - assert_eq!(2, pulls.len() as u64); - assert_eq!(pulls.get(0).unwrap().0, other_node.gossip); - assert_eq!(pulls.get(1).unwrap().0, entrypoint.gossip); + let (pings, pulls) = cluster_info.new_pull_requests(&thread_pool, None, &stakes); + assert!(pings.is_empty()); + assert_eq!(pulls.len(), 2 * MIN_NUM_BLOOM_FILTERS); + assert!(pulls + .iter() + .take(MIN_NUM_BLOOM_FILTERS) + .all(|(addr, _)| *addr == other_node.gossip)); + assert!(pulls + .iter() + .skip(MIN_NUM_BLOOM_FILTERS) + .all(|(addr, _)| *addr == entrypoint.gossip)); // Pull request 3: `other_node` is present and `entrypoint` was just pulled from. There should // only be one pull request to `other_node` - let pulls = cluster_info.new_pull_requests(&thread_pool, None, &stakes); - assert_eq!(1, pulls.len() as u64); - assert_eq!(pulls.get(0).unwrap().0, other_node.gossip); + let (pings, pulls) = cluster_info.new_pull_requests(&thread_pool, None, &stakes); + assert!(pings.is_empty()); + assert_eq!(pulls.len(), MIN_NUM_BLOOM_FILTERS); + assert!(pulls.into_iter().all(|(addr, _)| addr == other_node.gossip)); } #[test] @@ -4334,10 +4185,9 @@ mod tests { cluster_info.flush_push_queue(); cluster_info.push_epoch_slots(&range[16000..]); cluster_info.flush_push_queue(); - let (slots, since) = cluster_info.get_epoch_slots_since(0); + let slots = cluster_info.get_epoch_slots(&mut Cursor::default()); let slots: Vec<_> = slots.iter().flat_map(|x| x.to_slots(0)).collect(); assert_eq!(slots, range); - assert!(since.is_some()); } #[test] @@ -4404,8 +4254,7 @@ mod tests { .any(|entrypoint| *entrypoint == gossiped_entrypoint1_info)); // Adopt the entrypoint's gossiped contact info and verify - let mut entrypoints_processed = false; - ClusterInfo::process_entrypoints(&cluster_info, &mut entrypoints_processed); + let entrypoints_processed = ClusterInfo::process_entrypoints(&cluster_info); assert_eq!(cluster_info.entrypoints.read().unwrap().len(), 2); assert!(cluster_info .entrypoints @@ -4433,8 +4282,7 @@ mod tests { // Adopt the entrypoint's gossiped contact info and verify error!("Adopt the entrypoint's gossiped contact info and verify"); - let mut entrypoints_processed = false; - ClusterInfo::process_entrypoints(&cluster_info, &mut entrypoints_processed); + let entrypoints_processed = ClusterInfo::process_entrypoints(&cluster_info); assert_eq!(cluster_info.entrypoints.read().unwrap().len(), 2); assert!(cluster_info .entrypoints @@ -4477,8 +4325,7 @@ mod tests { cluster_info.insert_info(gossiped_entrypoint_info.clone()); // Adopt the entrypoint's gossiped contact info and verify - let mut entrypoints_processed = false; - ClusterInfo::process_entrypoints(&cluster_info, &mut entrypoints_processed); + let entrypoints_processed = ClusterInfo::process_entrypoints(&cluster_info); assert_eq!(cluster_info.entrypoints.read().unwrap().len(), 1); assert_eq!( cluster_info.entrypoints.read().unwrap()[0], @@ -4627,7 +4474,7 @@ mod tests { } #[test] - #[serial] + #[ignore] // TODO: debug why this is flaky on buildkite! fn test_pull_request_time_pruning() { let node = Node::new_localhost(); let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info)); @@ -4650,7 +4497,8 @@ mod tests { }) .take(NO_ENTRIES) .collect(); - let timeouts = cluster_info.gossip.read().unwrap().make_timeouts_test(); + let mut timeouts = HashMap::new(); + timeouts.insert(Pubkey::default(), CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS * 4); assert_eq!( (0, 0, NO_ENTRIES), cluster_info.handle_pull_response(&entrypoint_pubkey, data, &timeouts) @@ -4662,7 +4510,7 @@ mod tests { .gossip .write() .unwrap() - .mark_pull_request_creation_time(&peer, now); + .mark_pull_request_creation_time(peer, now); } assert_eq!( cluster_info @@ -4675,4 +4523,12 @@ mod tests { CRDS_UNIQUE_PUBKEY_CAPACITY ); } + + #[test] + fn test_get_epoch_millis_no_bank() { + assert_eq!( + get_epoch_duration(/*bank_forks=*/ None).as_millis() as u64, + DEFAULT_SLOTS_PER_EPOCH * DEFAULT_MS_PER_SLOT // 48 hours + ); + } } diff --git a/core/src/cluster_info_metrics.rs b/core/src/cluster_info_metrics.rs new file mode 100644 index 0000000000..832e335ea6 --- /dev/null +++ b/core/src/cluster_info_metrics.rs @@ -0,0 +1,404 @@ +use crate::crds_gossip::CrdsGossip; +use solana_measure::measure::Measure; +use solana_sdk::pubkey::Pubkey; +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicU64, Ordering}, + RwLock, + }, + time::Instant, +}; + +#[derive(Default)] +pub(crate) struct Counter(AtomicU64); + +impl Counter { + pub(crate) fn add_measure(&self, x: &mut Measure) { + x.stop(); + self.0.fetch_add(x.as_us(), Ordering::Relaxed); + } + pub(crate) fn add_relaxed(&self, x: u64) { + self.0.fetch_add(x, Ordering::Relaxed); + } + fn clear(&self) -> u64 { + self.0.swap(0, Ordering::Relaxed) + } +} + +pub(crate) struct ScopedTimer<'a> { + clock: Instant, + metric: &'a AtomicU64, +} + +impl<'a> From<&'a Counter> for ScopedTimer<'a> { + // Output should be assigned to a *named* variable, otherwise it is + // immediately dropped. + #[must_use] + fn from(counter: &'a Counter) -> Self { + Self { + clock: Instant::now(), + metric: &counter.0, + } + } +} + +impl Drop for ScopedTimer<'_> { + fn drop(&mut self) { + let micros = self.clock.elapsed().as_micros(); + self.metric.fetch_add(micros as u64, Ordering::Relaxed); + } +} + +#[derive(Default)] +pub(crate) struct GossipStats { + pub(crate) all_tvu_peers: Counter, + pub(crate) entrypoint2: Counter, + pub(crate) entrypoint: Counter, + pub(crate) epoch_slots_lookup: Counter, + pub(crate) filter_pull_response: Counter, + pub(crate) generate_pull_responses: Counter, + pub(crate) get_accounts_hash: Counter, + pub(crate) get_votes: Counter, + pub(crate) gossip_packets_dropped_count: Counter, + pub(crate) handle_batch_ping_messages_time: Counter, + pub(crate) handle_batch_pong_messages_time: Counter, + pub(crate) handle_batch_prune_messages_time: Counter, + pub(crate) handle_batch_pull_requests_time: Counter, + pub(crate) handle_batch_pull_responses_time: Counter, + pub(crate) handle_batch_push_messages_time: Counter, + pub(crate) mark_pull_request: Counter, + pub(crate) new_pull_requests: Counter, + pub(crate) new_pull_requests_count: Counter, + pub(crate) new_pull_requests_pings_count: Counter, + pub(crate) new_push_requests2: Counter, + pub(crate) new_push_requests: Counter, + pub(crate) new_push_requests_num: Counter, + pub(crate) packets_received_count: Counter, + pub(crate) packets_received_prune_messages_count: Counter, + pub(crate) packets_received_pull_requests_count: Counter, + pub(crate) packets_received_pull_responses_count: Counter, + pub(crate) packets_received_push_messages_count: Counter, + pub(crate) packets_received_verified_count: Counter, + pub(crate) packets_sent_gossip_requests_count: Counter, + pub(crate) packets_sent_prune_messages_count: Counter, + pub(crate) packets_sent_pull_requests_count: Counter, + pub(crate) packets_sent_pull_responses_count: Counter, + pub(crate) packets_sent_push_messages_count: Counter, + pub(crate) process_gossip_packets_time: Counter, + pub(crate) process_prune: Counter, + pub(crate) process_pull_requests: Counter, + pub(crate) process_pull_response: Counter, + pub(crate) process_pull_response_count: Counter, + pub(crate) process_pull_response_fail_insert: Counter, + pub(crate) process_pull_response_fail_timeout: Counter, + pub(crate) process_pull_response_len: Counter, + pub(crate) process_pull_response_success: Counter, + pub(crate) process_pull_response_timeout: Counter, + pub(crate) process_push_message: Counter, + pub(crate) prune_message_count: Counter, + pub(crate) prune_message_len: Counter, + pub(crate) prune_received_cache: Counter, + pub(crate) pull_from_entrypoint_count: Counter, + pub(crate) pull_request_ping_pong_check_failed_count: Counter, + pub(crate) pull_requests_count: Counter, + pub(crate) purge: Counter, + pub(crate) push_message_count: Counter, + pub(crate) push_message_value_count: Counter, + pub(crate) push_response_count: Counter, + pub(crate) push_vote_read: Counter, + pub(crate) repair_peers: Counter, + pub(crate) require_stake_for_gossip_unknown_feature_set: Counter, + pub(crate) require_stake_for_gossip_unknown_stakes: Counter, + pub(crate) skip_pull_response_shred_version: Counter, + pub(crate) skip_pull_shred_version: Counter, + pub(crate) skip_push_message_shred_version: Counter, + pub(crate) trim_crds_table_failed: Counter, + pub(crate) trim_crds_table_purged_values_count: Counter, + pub(crate) tvu_peers: Counter, +} + +pub(crate) fn submit_gossip_stats( + stats: &GossipStats, + gossip: &RwLock, + stakes: &HashMap, +) { + let (table_size, num_nodes, purged_values_size, failed_inserts_size) = { + let gossip = gossip.read().unwrap(); + ( + gossip.crds.len(), + gossip.crds.num_nodes(), + gossip.crds.num_purged(), + gossip.pull.failed_inserts.len(), + ) + }; + let num_nodes_staked = stakes.values().filter(|stake| **stake > 0).count(); + datapoint_info!( + "cluster_info_stats", + ("entrypoint", stats.entrypoint.clear(), i64), + ("entrypoint2", stats.entrypoint2.clear(), i64), + ("push_vote_read", stats.push_vote_read.clear(), i64), + ("get_votes", stats.get_votes.clear(), i64), + ("get_accounts_hash", stats.get_accounts_hash.clear(), i64), + ("all_tvu_peers", stats.all_tvu_peers.clear(), i64), + ("tvu_peers", stats.tvu_peers.clear(), i64), + ( + "new_push_requests_num", + stats.new_push_requests_num.clear(), + i64 + ), + ("table_size", table_size as i64, i64), + ("purged_values_size", purged_values_size as i64, i64), + ("failed_inserts_size", failed_inserts_size as i64, i64), + ("num_nodes", num_nodes as i64, i64), + ("num_nodes_staked", num_nodes_staked as i64, i64), + ); + datapoint_info!( + "cluster_info_stats2", + ( + "gossip_packets_dropped_count", + stats.gossip_packets_dropped_count.clear(), + i64 + ), + ("repair_peers", stats.repair_peers.clear(), i64), + ("new_push_requests", stats.new_push_requests.clear(), i64), + ("new_push_requests2", stats.new_push_requests2.clear(), i64), + ("purge", stats.purge.clear(), i64), + ( + "process_gossip_packets_time", + stats.process_gossip_packets_time.clear(), + i64 + ), + ( + "handle_batch_ping_messages_time", + stats.handle_batch_ping_messages_time.clear(), + i64 + ), + ( + "handle_batch_pong_messages_time", + stats.handle_batch_pong_messages_time.clear(), + i64 + ), + ( + "handle_batch_prune_messages_time", + stats.handle_batch_prune_messages_time.clear(), + i64 + ), + ( + "handle_batch_pull_requests_time", + stats.handle_batch_pull_requests_time.clear(), + i64 + ), + ( + "handle_batch_pull_responses_time", + stats.handle_batch_pull_responses_time.clear(), + i64 + ), + ( + "handle_batch_push_messages_time", + stats.handle_batch_push_messages_time.clear(), + i64 + ), + ( + "process_pull_resp", + stats.process_pull_response.clear(), + i64 + ), + ("filter_pull_resp", stats.filter_pull_response.clear(), i64), + ( + "process_pull_resp_count", + stats.process_pull_response_count.clear(), + i64 + ), + ( + "pull_response_fail_insert", + stats.process_pull_response_fail_insert.clear(), + i64 + ), + ( + "pull_response_fail_timeout", + stats.process_pull_response_fail_timeout.clear(), + i64 + ), + ( + "pull_response_success", + stats.process_pull_response_success.clear(), + i64 + ), + ( + "process_pull_resp_timeout", + stats.process_pull_response_timeout.clear(), + i64 + ), + ( + "push_response_count", + stats.push_response_count.clear(), + i64 + ), + ); + datapoint_info!( + "cluster_info_stats3", + ( + "process_pull_resp_len", + stats.process_pull_response_len.clear(), + i64 + ), + ( + "process_pull_requests", + stats.process_pull_requests.clear(), + i64 + ), + ( + "pull_request_ping_pong_check_failed_count", + stats.pull_request_ping_pong_check_failed_count.clear(), + i64 + ), + ( + "new_pull_requests_pings_count", + stats.new_pull_requests_pings_count.clear(), + i64 + ), + ( + "generate_pull_responses", + stats.generate_pull_responses.clear(), + i64 + ), + ("process_prune", stats.process_prune.clear(), i64), + ( + "process_push_message", + stats.process_push_message.clear(), + i64 + ), + ( + "prune_received_cache", + stats.prune_received_cache.clear(), + i64 + ), + ("epoch_slots_lookup", stats.epoch_slots_lookup.clear(), i64), + ("new_pull_requests", stats.new_pull_requests.clear(), i64), + ("mark_pull_request", stats.mark_pull_request.clear(), i64), + ); + datapoint_info!( + "cluster_info_stats4", + ( + "skip_push_message_shred_version", + stats.skip_push_message_shred_version.clear(), + i64 + ), + ( + "skip_pull_response_shred_version", + stats.skip_pull_response_shred_version.clear(), + i64 + ), + ( + "skip_pull_shred_version", + stats.skip_pull_shred_version.clear(), + i64 + ), + ("push_message_count", stats.push_message_count.clear(), i64), + ( + "push_message_value_count", + stats.push_message_value_count.clear(), + i64 + ), + ( + "new_pull_requests_count", + stats.new_pull_requests_count.clear(), + i64 + ), + ( + "pull_from_entrypoint_count", + stats.pull_from_entrypoint_count.clear(), + i64 + ), + ( + "prune_message_count", + stats.prune_message_count.clear(), + i64 + ), + ("prune_message_len", stats.prune_message_len.clear(), i64), + ); + datapoint_info!( + "cluster_info_stats5", + ( + "pull_requests_count", + stats.pull_requests_count.clear(), + i64 + ), + ( + "packets_received_count", + stats.packets_received_count.clear(), + i64 + ), + ( + "packets_received_prune_messages_count", + stats.packets_received_prune_messages_count.clear(), + i64 + ), + ( + "packets_received_pull_requests_count", + stats.packets_received_pull_requests_count.clear(), + i64 + ), + ( + "packets_received_pull_responses_count", + stats.packets_received_pull_responses_count.clear(), + i64 + ), + ( + "packets_received_push_messages_count", + stats.packets_received_push_messages_count.clear(), + i64 + ), + ( + "packets_received_verified_count", + stats.packets_received_verified_count.clear(), + i64 + ), + ( + "packets_sent_gossip_requests_count", + stats.packets_sent_gossip_requests_count.clear(), + i64 + ), + ( + "packets_sent_prune_messages_count", + stats.packets_sent_prune_messages_count.clear(), + i64 + ), + ( + "packets_sent_pull_requests_count", + stats.packets_sent_pull_requests_count.clear(), + i64 + ), + ( + "packets_sent_pull_responses_count", + stats.packets_sent_pull_responses_count.clear(), + i64 + ), + ( + "packets_sent_push_messages_count", + stats.packets_sent_push_messages_count.clear(), + i64 + ), + ( + "require_stake_for_gossip_unknown_feature_set", + stats.require_stake_for_gossip_unknown_feature_set.clear(), + i64 + ), + ( + "require_stake_for_gossip_unknown_stakes", + stats.require_stake_for_gossip_unknown_stakes.clear(), + i64 + ), + ( + "trim_crds_table_failed", + stats.trim_crds_table_failed.clear(), + i64 + ), + ( + "trim_crds_table_purged_values_count", + stats.trim_crds_table_purged_values_count.clear(), + i64 + ), + ); +} diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index d3a143f7ae..20a4b15aed 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -1,9 +1,11 @@ use crate::{ cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS}, + crds::Cursor, crds_value::CrdsValueLabel, optimistic_confirmation_verifier::OptimisticConfirmationVerifier, optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender}, poh_recorder::PohRecorder, + replay_stage::DUPLICATE_THRESHOLD, result::{Error, Result}, rpc_subscriptions::RpcSubscriptions, sigverify, @@ -21,6 +23,7 @@ use solana_perf::packet::{self, Packets}; use solana_runtime::{ bank::Bank, bank_forks::BankForks, + commitment::VOTE_THRESHOLD_SIZE, epoch_stakes::{EpochAuthorizedVoters, EpochStakes}, stakes::Stakes, vote_sender_types::{ReplayVoteReceiver, ReplayedVote}, @@ -44,12 +47,20 @@ use std::{ }; // Map from a vote account to the authorized voter for an epoch +pub type ThresholdConfirmedSlots = Vec<(Slot, Hash)>; +pub type VotedHashUpdates = HashMap>; pub type VerifiedLabelVotePacketsSender = CrossbeamSender>; pub type VerifiedLabelVotePacketsReceiver = CrossbeamReceiver>; pub type VerifiedVoteTransactionsSender = CrossbeamSender>; pub type VerifiedVoteTransactionsReceiver = CrossbeamReceiver>; pub type VerifiedVoteSender = CrossbeamSender<(Pubkey, Vec)>; pub type VerifiedVoteReceiver = CrossbeamReceiver<(Pubkey, Vec)>; +pub type GossipVerifiedVoteHashSender = CrossbeamSender<(Pubkey, Slot, Hash)>; +pub type GossipVerifiedVoteHashReceiver = CrossbeamReceiver<(Pubkey, Slot, Hash)>; +pub type GossipDuplicateConfirmedSlotsSender = CrossbeamSender; +pub type GossipDuplicateConfirmedSlotsReceiver = CrossbeamReceiver; + +const THRESHOLDS_TO_CHECK: [f64; 2] = [DUPLICATE_THRESHOLD, VOTE_THRESHOLD_SIZE]; #[derive(Default)] pub struct SlotVoteTracker { @@ -58,14 +69,13 @@ pub struct SlotVoteTracker { // True if seen on gossip, false if only seen in replay. voted: HashMap, optimistic_votes_tracker: HashMap, - updates: Option>, + voted_slot_updates: Option>, gossip_only_stake: u64, } impl SlotVoteTracker { - #[allow(dead_code)] - pub fn get_updates(&mut self) -> Option> { - self.updates.take() + pub fn get_voted_slot_updates(&mut self) -> Option> { + self.voted_slot_updates.take() } pub fn get_or_insert_optimistic_votes_tracker(&mut self, hash: Hash) -> &mut VoteStakeTracker { @@ -112,7 +122,7 @@ impl VoteTracker { let new_slot_tracker = Arc::new(RwLock::new(SlotVoteTracker { voted: HashMap::new(), optimistic_votes_tracker: HashMap::default(), - updates: None, + voted_slot_updates: None, gossip_only_stake: 0, })); self.slot_vote_trackers @@ -163,10 +173,10 @@ impl VoteTracker { let mut w_slot_vote_tracker = slot_vote_tracker.write().unwrap(); w_slot_vote_tracker.voted.insert(pubkey, true); - if let Some(ref mut updates) = w_slot_vote_tracker.updates { - updates.push(pubkey) + if let Some(ref mut voted_slot_updates) = w_slot_vote_tracker.voted_slot_updates { + voted_slot_updates.push(pubkey) } else { - w_slot_vote_tracker.updates = Some(vec![pubkey]); + w_slot_vote_tracker.voted_slot_updates = Some(vec![pubkey]); } } @@ -242,9 +252,11 @@ impl ClusterInfoVoteListener { bank_forks: Arc>, subscriptions: Arc, verified_vote_sender: VerifiedVoteSender, + gossip_verified_vote_hash_sender: GossipVerifiedVoteHashSender, replay_votes_receiver: ReplayVoteReceiver, blockstore: Arc, bank_notification_sender: Option, + cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender, ) -> Self { let exit_ = exit.clone(); @@ -287,10 +299,12 @@ impl ClusterInfoVoteListener { vote_tracker, bank_forks, subscriptions, + gossip_verified_vote_hash_sender, verified_vote_sender, replay_votes_receiver, blockstore, bank_notification_sender, + cluster_confirmed_slot_sender, ); }) .unwrap(); @@ -313,23 +327,18 @@ impl ClusterInfoVoteListener { verified_vote_label_packets_sender: VerifiedLabelVotePacketsSender, verified_vote_transactions_sender: VerifiedVoteTransactionsSender, ) -> Result<()> { - let mut last_ts = 0; - loop { - if exit.load(Ordering::Relaxed) { - return Ok(()); - } - let (labels, votes, new_ts) = cluster_info.get_votes(last_ts); + let mut cursor = Cursor::default(); + while !exit.load(Ordering::Relaxed) { + let (labels, votes) = cluster_info.get_votes(&mut cursor); inc_new_counter_debug!("cluster_info_vote_listener-recv_count", votes.len()); - - last_ts = new_ts; if !votes.is_empty() { let (vote_txs, packets) = Self::verify_votes(votes, labels); verified_vote_transactions_sender.send(vote_txs)?; verified_vote_label_packets_sender.send(packets)?; } - sleep(Duration::from_millis(GOSSIP_SLEEP_MILLIS)); } + Ok(()) } #[allow(clippy::type_complexity)] @@ -406,20 +415,24 @@ impl ClusterInfoVoteListener { } } + #[allow(clippy::too_many_arguments)] fn process_votes_loop( exit: Arc, gossip_vote_txs_receiver: VerifiedVoteTransactionsReceiver, vote_tracker: Arc, bank_forks: Arc>, subscriptions: Arc, + gossip_verified_vote_hash_sender: GossipVerifiedVoteHashSender, verified_vote_sender: VerifiedVoteSender, replay_votes_receiver: ReplayVoteReceiver, blockstore: Arc, bank_notification_sender: Option, + cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender, ) -> Result<()> { let mut confirmation_verifier = OptimisticConfirmationVerifier::new(bank_forks.read().unwrap().root()); let mut last_process_root = Instant::now(); + let cluster_confirmed_slot_sender = Some(cluster_confirmed_slot_sender); loop { if exit.load(Ordering::Relaxed) { return Ok(()); @@ -445,13 +458,16 @@ impl ClusterInfoVoteListener { &vote_tracker, &root_bank, &subscriptions, + &gossip_verified_vote_hash_sender, &verified_vote_sender, &replay_votes_receiver, &bank_notification_sender, + &cluster_confirmed_slot_sender, ); match confirmed_slots { Ok(confirmed_slots) => { - confirmation_verifier.add_new_optimistic_confirmed_slots(confirmed_slots); + confirmation_verifier + .add_new_optimistic_confirmed_slots(confirmed_slots.clone()); } Err(e) => match e { Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout) @@ -470,17 +486,20 @@ impl ClusterInfoVoteListener { vote_tracker: &VoteTracker, root_bank: &Bank, subscriptions: &RpcSubscriptions, + gossip_verified_vote_hash_sender: &GossipVerifiedVoteHashSender, verified_vote_sender: &VerifiedVoteSender, replay_votes_receiver: &ReplayVoteReceiver, - ) -> Result> { + ) -> Result { Self::listen_and_confirm_votes( gossip_vote_txs_receiver, vote_tracker, root_bank, subscriptions, + gossip_verified_vote_hash_sender, verified_vote_sender, replay_votes_receiver, &None, + &None, ) } @@ -489,10 +508,12 @@ impl ClusterInfoVoteListener { vote_tracker: &VoteTracker, root_bank: &Bank, subscriptions: &RpcSubscriptions, + gossip_verified_vote_hash_sender: &GossipVerifiedVoteHashSender, verified_vote_sender: &VerifiedVoteSender, replay_votes_receiver: &ReplayVoteReceiver, bank_notification_sender: &Option, - ) -> Result> { + cluster_confirmed_slot_sender: &Option, + ) -> Result { let mut sel = Select::new(); sel.recv(gossip_vote_txs_receiver); sel.recv(replay_votes_receiver); @@ -519,8 +540,10 @@ impl ClusterInfoVoteListener { replay_votes, root_bank, subscriptions, + gossip_verified_vote_hash_sender, verified_vote_sender, bank_notification_sender, + cluster_confirmed_slot_sender, )); } else { remaining_wait_time = remaining_wait_time @@ -538,10 +561,12 @@ impl ClusterInfoVoteListener { root_bank: &Bank, subscriptions: &RpcSubscriptions, verified_vote_sender: &VerifiedVoteSender, + gossip_verified_vote_hash_sender: &GossipVerifiedVoteHashSender, diff: &mut HashMap>, - new_optimistic_confirmed_slots: &mut Vec<(Slot, Hash)>, + new_optimistic_confirmed_slots: &mut ThresholdConfirmedSlots, is_gossip_vote: bool, bank_notification_sender: &Option, + cluster_confirmed_slot_sender: &Option, ) { if vote.slots.is_empty() { return; @@ -577,7 +602,7 @@ impl ClusterInfoVoteListener { // Fast track processing of the last slot in a vote transactions // so that notifications for optimistic confirmation can be sent // as soon as possible. - let (is_confirmed, is_new) = Self::track_optimistic_confirmation_vote( + let (reached_threshold_results, is_new) = Self::track_optimistic_confirmation_vote( vote_tracker, last_vote_slot, last_vote_hash, @@ -586,7 +611,20 @@ impl ClusterInfoVoteListener { total_stake, ); - if is_confirmed { + if is_gossip_vote && is_new && stake > 0 { + let _ = gossip_verified_vote_hash_sender.send(( + *vote_pubkey, + last_vote_slot, + last_vote_hash, + )); + } + + if reached_threshold_results[0] { + if let Some(sender) = cluster_confirmed_slot_sender { + let _ = sender.send(vec![(last_vote_slot, last_vote_hash)]); + } + } + if reached_threshold_results[1] { new_optimistic_confirmed_slots.push((last_vote_slot, last_vote_hash)); // Notify subscribers about new optimistic confirmation if let Some(sender) = bank_notification_sender { @@ -668,9 +706,11 @@ impl ClusterInfoVoteListener { replayed_votes: Vec, root_bank: &Bank, subscriptions: &RpcSubscriptions, + gossip_verified_vote_hash_sender: &GossipVerifiedVoteHashSender, verified_vote_sender: &VerifiedVoteSender, bank_notification_sender: &Option, - ) -> Vec<(Slot, Hash)> { + cluster_confirmed_slot_sender: &Option, + ) -> ThresholdConfirmedSlots { let mut diff: HashMap> = HashMap::new(); let mut new_optimistic_confirmed_slots = vec![]; @@ -693,10 +733,12 @@ impl ClusterInfoVoteListener { root_bank, subscriptions, verified_vote_sender, + gossip_verified_vote_hash_sender, &mut diff, &mut new_optimistic_confirmed_slots, is_gossip, bank_notification_sender, + cluster_confirmed_slot_sender, ); } @@ -717,8 +759,8 @@ impl ClusterInfoVoteListener { }); } let mut w_slot_tracker = slot_tracker.write().unwrap(); - if w_slot_tracker.updates.is_none() { - w_slot_tracker.updates = Some(vec![]); + if w_slot_tracker.voted_slot_updates.is_none() { + w_slot_tracker.voted_slot_updates = Some(vec![]); } let mut gossip_only_stake = 0; let epoch = root_bank.epoch_schedule().get_epoch(slot); @@ -739,7 +781,11 @@ impl ClusterInfoVoteListener { // `is_new || is_new_from_gossip`. In both cases we want to record // `is_new_from_gossip` for the `pubkey` entry. w_slot_tracker.voted.insert(pubkey, seen_in_gossip_above); - w_slot_tracker.updates.as_mut().unwrap().push(pubkey); + w_slot_tracker + .voted_slot_updates + .as_mut() + .unwrap() + .push(pubkey); } w_slot_tracker.gossip_only_stake += gossip_only_stake @@ -756,14 +802,14 @@ impl ClusterInfoVoteListener { pubkey: Pubkey, stake: u64, total_epoch_stake: u64, - ) -> (bool, bool) { + ) -> (Vec, bool) { let slot_tracker = vote_tracker.get_or_insert_slot_tracker(slot); // Insert vote and check for optimistic confirmation let mut w_slot_tracker = slot_tracker.write().unwrap(); w_slot_tracker .get_or_insert_optimistic_votes_tracker(hash) - .add_vote_pubkey(pubkey, stake, total_epoch_stake) + .add_vote_pubkey(pubkey, stake, total_epoch_stake, &THRESHOLDS_TO_CHECK) } fn sum_stake(sum: &mut u64, epoch_stakes: Option<&EpochStakes>, pubkey: &Pubkey) { @@ -972,6 +1018,7 @@ mod tests { let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup(); let (votes_sender, votes_receiver) = unbounded(); let (verified_vote_sender, _verified_vote_receiver) = unbounded(); + let (gossip_verified_vote_hash_sender, _gossip_verified_vote_hash_receiver) = unbounded(); let (replay_votes_sender, replay_votes_receiver) = unbounded(); let GenesisConfigInfo { genesis_config, .. } = @@ -1002,9 +1049,11 @@ mod tests { &vote_tracker, &bank3, &subscriptions, + &gossip_verified_vote_hash_sender, &verified_vote_sender, &replay_votes_receiver, &None, + &None, ) .unwrap(); @@ -1031,9 +1080,11 @@ mod tests { &vote_tracker, &bank3, &subscriptions, + &gossip_verified_vote_hash_sender, &verified_vote_sender, &replay_votes_receiver, &None, + &None, ) .unwrap(); @@ -1082,6 +1133,7 @@ mod tests { let (vote_tracker, _, validator_voting_keypairs, subscriptions) = setup(); let (votes_txs_sender, votes_txs_receiver) = unbounded(); let (replay_votes_sender, replay_votes_receiver) = unbounded(); + let (gossip_verified_vote_hash_sender, gossip_verified_vote_hash_receiver) = unbounded(); let (verified_vote_sender, verified_vote_receiver) = unbounded(); let GenesisConfigInfo { genesis_config, .. } = @@ -1109,17 +1161,52 @@ mod tests { &vote_tracker, &bank0, &subscriptions, + &gossip_verified_vote_hash_sender, &verified_vote_sender, &replay_votes_receiver, &None, + &None, ) .unwrap(); + let mut gossip_verified_votes: HashMap>> = HashMap::new(); + for (pubkey, slot, hash) in gossip_verified_vote_hash_receiver.try_iter() { + // send_vote_txs() will send each vote twice, but we should only get a notification + // once for each via this channel + let exists = gossip_verified_votes + .get(&slot) + .and_then(|slot_hashes| slot_hashes.get(&hash)) + .map(|slot_hash_voters| slot_hash_voters.contains(&pubkey)) + .unwrap_or(false); + assert!(!exists); + gossip_verified_votes + .entry(slot) + .or_default() + .entry(hash) + .or_default() + .push(pubkey); + } + + // Only the last vote in the `gossip_vote` set should count towards + // the `voted_hash_updates` set. Important to note here that replay votes + // should not count + let last_gossip_vote_slot = *gossip_vote_slots.last().unwrap(); + assert_eq!(gossip_verified_votes.len(), 1); + let slot_hashes = gossip_verified_votes.get(&last_gossip_vote_slot).unwrap(); + assert_eq!(slot_hashes.len(), 1); + let slot_hash_votes = slot_hashes.get(&Hash::default()).unwrap(); + assert_eq!(slot_hash_votes.len(), validator_voting_keypairs.len()); + for voting_keypairs in &validator_voting_keypairs { + let pubkey = voting_keypairs.vote_keypair.pubkey(); + assert!(slot_hash_votes.contains(&pubkey)); + } + // Check that the received votes were pushed to other commponents // subscribing via `verified_vote_receiver` let all_expected_slots: BTreeSet<_> = gossip_vote_slots + .clone() .into_iter() - .chain(replay_vote_slots.into_iter()) + .chain(replay_vote_slots.clone().into_iter()) .collect(); let mut pubkey_to_votes: HashMap> = HashMap::new(); for (received_pubkey, new_votes) in verified_vote_receiver.try_iter() { @@ -1147,15 +1234,17 @@ mod tests { let pubkey = voting_keypairs.vote_keypair.pubkey(); assert!(r_slot_vote_tracker.voted.contains_key(&pubkey)); assert!(r_slot_vote_tracker - .updates + .voted_slot_updates .as_ref() .unwrap() .contains(&Arc::new(pubkey))); - // Only the last vote in the stack of `gossip_votes` should count towards - // the `optimistic` vote set. + // Only the last vote in the stack of `gossip_vote` and `replay_vote_slots` + // should count towards the `optimistic` vote set, let optimistic_votes_tracker = r_slot_vote_tracker.optimistic_votes_tracker(&Hash::default()); - if vote_slot == 2 || vote_slot == 4 { + if vote_slot == *gossip_vote_slots.last().unwrap() + || vote_slot == *replay_vote_slots.last().unwrap() + { let optimistic_votes_tracker = optimistic_votes_tracker.unwrap(); assert!(optimistic_votes_tracker.voted().contains(&pubkey)); assert_eq!( @@ -1192,6 +1281,7 @@ mod tests { // Send some votes to process let (votes_txs_sender, votes_txs_receiver) = unbounded(); + let (gossip_verified_vote_hash_sender, _gossip_verified_vote_hash_receiver) = unbounded(); let (verified_vote_sender, verified_vote_receiver) = unbounded(); let (_replay_votes_sender, replay_votes_receiver) = unbounded(); @@ -1228,9 +1318,11 @@ mod tests { &vote_tracker, &bank0, &subscriptions, + &gossip_verified_vote_hash_sender, &verified_vote_sender, &replay_votes_receiver, &None, + &None, ) .unwrap(); @@ -1252,7 +1344,7 @@ mod tests { let pubkey = voting_keypairs.vote_keypair.pubkey(); assert!(r_slot_vote_tracker.voted.contains_key(&pubkey)); assert!(r_slot_vote_tracker - .updates + .voted_slot_updates .as_ref() .unwrap() .contains(&Arc::new(pubkey))); @@ -1273,6 +1365,7 @@ mod tests { fn run_test_process_votes3(switch_proof_hash: Option) { let (votes_sender, votes_receiver) = unbounded(); let (verified_vote_sender, _verified_vote_receiver) = unbounded(); + let (gossip_verified_vote_hash_sender, _gossip_verified_vote_hash_receiver) = unbounded(); let (replay_votes_sender, replay_votes_receiver) = unbounded(); let vote_slot = 1; @@ -1323,34 +1416,29 @@ mod tests { &vote_tracker, &bank, &subscriptions, + &gossip_verified_vote_hash_sender, &verified_vote_sender, &replay_votes_receiver, &None, + &None, ); } let slot_vote_tracker = vote_tracker.get_slot_vote_tracker(vote_slot).unwrap(); let r_slot_vote_tracker = &slot_vote_tracker.read().unwrap(); + assert_eq!( + r_slot_vote_tracker + .optimistic_votes_tracker(&vote_bank_hash) + .unwrap() + .stake(), + 100 + ); if events == vec![1] { // Check `gossip_only_stake` is not incremented - assert_eq!( - r_slot_vote_tracker - .optimistic_votes_tracker(&vote_bank_hash) - .unwrap() - .stake(), - 100 - ); assert_eq!(r_slot_vote_tracker.gossip_only_stake, 0); } else { // Check that both the `gossip_only_stake` and `total_voted_stake` both // increased - assert_eq!( - r_slot_vote_tracker - .optimistic_votes_tracker(&vote_bank_hash) - .unwrap() - .stake(), - 100 - ); assert_eq!(r_slot_vote_tracker.gossip_only_stake, 100); } } @@ -1457,6 +1545,7 @@ mod tests { )]; let (verified_vote_sender, _verified_vote_receiver) = unbounded(); + let (gossip_verified_vote_hash_sender, _gossip_verified_vote_hash_receiver) = unbounded(); ClusterInfoVoteListener::filter_and_confirm_with_new_votes( &vote_tracker, vote_tx, @@ -1468,8 +1557,10 @@ mod tests { )], &bank, &subscriptions, + &gossip_verified_vote_hash_sender, &verified_vote_sender, &None, + &None, ); // Setup next epoch @@ -1522,8 +1613,10 @@ mod tests { )], &new_root_bank, &subscriptions, + &gossip_verified_vote_hash_sender, &verified_vote_sender, &None, + &None, ); } diff --git a/core/src/cluster_slot_state_verifier.rs b/core/src/cluster_slot_state_verifier.rs new file mode 100644 index 0000000000..5099b6487c --- /dev/null +++ b/core/src/cluster_slot_state_verifier.rs @@ -0,0 +1,893 @@ +use crate::{ + fork_choice::ForkChoice, heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, + progress_map::ProgressMap, +}; +use solana_sdk::{clock::Slot, hash::Hash}; +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; + +pub(crate) type DuplicateSlotsTracker = BTreeSet; +pub(crate) type GossipDuplicateConfirmedSlots = BTreeMap; +type SlotStateHandler = fn(Slot, &Hash, Option<&Hash>, bool, bool) -> Vec; + +#[derive(PartialEq, Debug)] +pub enum SlotStateUpdate { + Frozen, + DuplicateConfirmed, + Dead, + Duplicate, +} + +#[derive(PartialEq, Debug)] +pub enum ResultingStateChange { + // Hash of our current frozen version of the slot + MarkSlotDuplicate(Hash), + // Hash of the cluster confirmed slot that is not equivalent + // to our frozen version of the slot + RepairDuplicateConfirmedVersion(Hash), + // Hash of our current frozen version of the slot + DuplicateConfirmedSlotMatchesCluster(Hash), +} + +impl SlotStateUpdate { + fn to_handler(&self) -> SlotStateHandler { + match self { + SlotStateUpdate::Dead => on_dead_slot, + SlotStateUpdate::Frozen => on_frozen_slot, + SlotStateUpdate::DuplicateConfirmed => on_cluster_update, + SlotStateUpdate::Duplicate => on_cluster_update, + } + } +} + +fn repair_correct_version(_slot: Slot, _hash: &Hash) {} + +fn on_dead_slot( + slot: Slot, + bank_frozen_hash: &Hash, + cluster_duplicate_confirmed_hash: Option<&Hash>, + _is_slot_duplicate: bool, + is_dead: bool, +) -> Vec { + assert!(is_dead); + // Bank should not have been frozen if the slot was marked dead + assert_eq!(*bank_frozen_hash, Hash::default()); + if let Some(cluster_duplicate_confirmed_hash) = cluster_duplicate_confirmed_hash { + // If the cluster duplicate_confirmed some version of this slot, then + // there's another version + warn!( + "Cluster duplicate_confirmed slot {} with hash {}, but we marked slot dead", + slot, cluster_duplicate_confirmed_hash + ); + // No need to check `is_slot_duplicate` and modify fork choice as dead slots + // are never frozen, and thus never added to fork choice. The state change for + // `MarkSlotDuplicate` will try to modify fork choice, but won't find the slot + // in the fork choice tree, so is equivalent to a no-op + return vec![ + ResultingStateChange::MarkSlotDuplicate(Hash::default()), + ResultingStateChange::RepairDuplicateConfirmedVersion( + *cluster_duplicate_confirmed_hash, + ), + ]; + } + + vec![] +} + +fn on_frozen_slot( + slot: Slot, + bank_frozen_hash: &Hash, + cluster_duplicate_confirmed_hash: Option<&Hash>, + is_slot_duplicate: bool, + is_dead: bool, +) -> Vec { + // If a slot is marked frozen, the bank hash should not be default, + // and the slot should not be dead + assert!(*bank_frozen_hash != Hash::default()); + assert!(!is_dead); + + if let Some(cluster_duplicate_confirmed_hash) = cluster_duplicate_confirmed_hash { + // If the cluster duplicate_confirmed some version of this slot, then + // confirm our version agrees with the cluster, + if cluster_duplicate_confirmed_hash != bank_frozen_hash { + // If the versions do not match, modify fork choice rule + // to exclude our version from being voted on and also + // repair correct version + warn!( + "Cluster duplicate_confirmed slot {} with hash {}, but we froze slot with hash {}", + slot, cluster_duplicate_confirmed_hash, bank_frozen_hash + ); + return vec![ + ResultingStateChange::MarkSlotDuplicate(*bank_frozen_hash), + ResultingStateChange::RepairDuplicateConfirmedVersion( + *cluster_duplicate_confirmed_hash, + ), + ]; + } else { + // If the versions match, then add the slot to the candidate + // set to account for the case where it was removed earlier + // by the `on_duplicate_slot()` handler + return vec![ResultingStateChange::DuplicateConfirmedSlotMatchesCluster( + *bank_frozen_hash, + )]; + } + } + + if is_slot_duplicate { + // If we detected a duplicate, but have not yet seen any version + // of the slot duplicate_confirmed (i.e. block above did not execute), then + // remove the slot from fork choice until we get confirmation. + + // If we get here, we either detected duplicate from + // 1) WindowService + // 2) A gossip duplicate_confirmed version that didn't match our frozen + // version. + // In both cases, mark the progress map for this slot as duplicate + return vec![ResultingStateChange::MarkSlotDuplicate(*bank_frozen_hash)]; + } + + vec![] +} + +// Called when we receive either: +// 1) A duplicate slot signal from WindowStage, +// 2) Confirmation of a slot by observing votes from replay or gossip. +// +// This signals external information about this slot, which affects +// this validator's understanding of the validity of this slot +fn on_cluster_update( + slot: Slot, + bank_frozen_hash: &Hash, + cluster_duplicate_confirmed_hash: Option<&Hash>, + is_slot_duplicate: bool, + is_dead: bool, +) -> Vec { + if is_dead { + on_dead_slot( + slot, + bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead, + ) + } else if *bank_frozen_hash != Hash::default() { + // This case is mutually exclusive with is_dead case above because if a slot is dead, + // it cannot have been frozen, and thus cannot have a non-default bank hash. + on_frozen_slot( + slot, + bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead, + ) + } else { + vec![] + } +} + +fn get_cluster_duplicate_confirmed_hash<'a>( + slot: Slot, + gossip_duplicate_confirmed_hash: Option<&'a Hash>, + local_frozen_hash: &'a Hash, + is_local_replay_duplicate_confirmed: bool, +) -> Option<&'a Hash> { + let local_duplicate_confirmed_hash = if is_local_replay_duplicate_confirmed { + // If local replay has duplicate_confirmed this slot, this slot must have + // descendants with votes for this slot, hence this slot must be + // frozen. + assert!(*local_frozen_hash != Hash::default()); + Some(local_frozen_hash) + } else { + None + }; + + match ( + local_duplicate_confirmed_hash, + gossip_duplicate_confirmed_hash, + ) { + (Some(local_duplicate_confirmed_hash), Some(gossip_duplicate_confirmed_hash)) => { + if local_duplicate_confirmed_hash != gossip_duplicate_confirmed_hash { + error!( + "For slot {}, the gossip duplicate confirmed hash {}, is not equal + to the confirmed hash we replayed: {}", + slot, gossip_duplicate_confirmed_hash, local_duplicate_confirmed_hash + ); + } + Some(&local_frozen_hash) + } + (Some(local_frozen_hash), None) => Some(local_frozen_hash), + _ => gossip_duplicate_confirmed_hash, + } +} + +fn apply_state_changes( + slot: Slot, + progress: &mut ProgressMap, + fork_choice: &mut HeaviestSubtreeForkChoice, + ancestors: &HashMap>, + descendants: &HashMap>, + state_changes: Vec, +) { + for state_change in state_changes { + match state_change { + ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash) => { + progress.set_unconfirmed_duplicate_slot( + slot, + descendants.get(&slot).unwrap_or(&HashSet::default()), + ); + fork_choice.mark_fork_invalid_candidate(&(slot, bank_frozen_hash)); + } + ResultingStateChange::RepairDuplicateConfirmedVersion( + cluster_duplicate_confirmed_hash, + ) => { + // TODO: Should consider moving the updating of the duplicate slots in the + // progress map from ReplayStage::confirm_forks to here. + repair_correct_version(slot, &cluster_duplicate_confirmed_hash); + } + ResultingStateChange::DuplicateConfirmedSlotMatchesCluster(bank_frozen_hash) => { + progress.set_confirmed_duplicate_slot( + slot, + ancestors.get(&slot).unwrap_or(&HashSet::default()), + descendants.get(&slot).unwrap_or(&HashSet::default()), + ); + fork_choice.mark_fork_valid_candidate(&(slot, bank_frozen_hash)); + } + } + } +} + +#[allow(clippy::too_many_arguments)] +pub(crate) fn check_slot_agrees_with_cluster( + slot: Slot, + root: Slot, + frozen_hash: Option, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, + gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + ancestors: &HashMap>, + descendants: &HashMap>, + progress: &mut ProgressMap, + fork_choice: &mut HeaviestSubtreeForkChoice, + slot_state_update: SlotStateUpdate, +) { + info!( + "check_slot_agrees_with_cluster() + slot: {}, + root: {}, + frozen_hash: {:?}, + update: {:?}", + slot, root, frozen_hash, slot_state_update + ); + + if slot <= root { + return; + } + + // Needs to happen before the frozen_hash.is_none() check below to account for duplicate + // signals arriving before the bank is constructed in replay. + if matches!(slot_state_update, SlotStateUpdate::Duplicate) { + // If this slot has already been processed before, return + if !duplicate_slots_tracker.insert(slot) { + return; + } + } + + if frozen_hash.is_none() { + // If the bank doesn't even exist in BankForks yet, + // then there's nothing to do as replay of the slot + // hasn't even started + return; + } + + let frozen_hash = frozen_hash.unwrap(); + let gossip_duplicate_confirmed_hash = gossip_duplicate_confirmed_slots.get(&slot); + + let is_local_replay_duplicate_confirmed = progress.is_duplicate_confirmed(slot).expect("If the frozen hash exists, then the slot must exist in bank forks and thus in progress map"); + let cluster_duplicate_confirmed_hash = get_cluster_duplicate_confirmed_hash( + slot, + gossip_duplicate_confirmed_hash, + &frozen_hash, + is_local_replay_duplicate_confirmed, + ); + let is_slot_duplicate = duplicate_slots_tracker.contains(&slot); + let is_dead = progress.is_dead(slot).expect("If the frozen hash exists, then the slot must exist in bank forks and thus in progress map"); + + info!( + "check_slot_agrees_with_cluster() state + is_local_replay_duplicate_confirmed: {:?}, + cluster_duplicate_confirmed_hash: {:?}, + is_slot_duplicate: {:?}, + is_dead: {:?}", + is_local_replay_duplicate_confirmed, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead, + ); + + let state_handler = slot_state_update.to_handler(); + let state_changes = state_handler( + slot, + &frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead, + ); + apply_state_changes( + slot, + progress, + fork_choice, + ancestors, + descendants, + state_changes, + ); +} + +#[cfg(test)] +mod test { + use super::*; + use crate::consensus::test::VoteSimulator; + use solana_runtime::bank_forks::BankForks; + use std::sync::RwLock; + use trees::tr; + + struct InitialState { + heaviest_subtree_fork_choice: HeaviestSubtreeForkChoice, + progress: ProgressMap, + ancestors: HashMap>, + descendants: HashMap>, + slot: Slot, + bank_forks: RwLock, + } + + fn setup() -> InitialState { + // Create simple fork 0 -> 1 -> 2 -> 3 + let forks = tr(0) / (tr(1) / (tr(2) / tr(3))); + let mut vote_simulator = VoteSimulator::new(1); + vote_simulator.fill_bank_forks(forks, &HashMap::new()); + let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors(); + + let descendants = vote_simulator + .bank_forks + .read() + .unwrap() + .descendants() + .clone(); + + InitialState { + heaviest_subtree_fork_choice: vote_simulator.heaviest_subtree_fork_choice, + progress: vote_simulator.progress, + ancestors, + descendants, + slot: 0, + bank_forks: vote_simulator.bank_forks, + } + } + + #[test] + fn test_frozen_duplicate() { + // Common state + let slot = 0; + let cluster_duplicate_confirmed_hash = None; + let is_dead = false; + + // Slot is not detected as duplicate yet + let mut is_slot_duplicate = false; + + // Simulate freezing the bank, add a + // new non-default hash, should return + // no actionable state changes yet + let bank_frozen_hash = Hash::new_unique(); + assert!(on_frozen_slot( + slot, + &bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead + ) + .is_empty()); + + // Now mark the slot as duplicate, should + // trigger marking the slot as a duplicate + is_slot_duplicate = true; + assert_eq!( + on_cluster_update( + slot, + &bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead + ), + vec![ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash)] + ); + } + + #[test] + fn test_frozen_duplicate_confirmed() { + // Common state + let slot = 0; + let is_slot_duplicate = false; + let is_dead = false; + + // No cluster duplicate_confirmed hash yet + let mut cluster_duplicate_confirmed_hash = None; + + // Simulate freezing the bank, add a + // new non-default hash, should return + // no actionable state changes + let bank_frozen_hash = Hash::new_unique(); + assert!(on_frozen_slot( + slot, + &bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead + ) + .is_empty()); + + // Now mark the same frozen slot hash as duplicate_confirmed by the cluster, + // should just confirm the slot + cluster_duplicate_confirmed_hash = Some(&bank_frozen_hash); + assert_eq!( + on_cluster_update( + slot, + &bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead + ), + vec![ResultingStateChange::DuplicateConfirmedSlotMatchesCluster( + bank_frozen_hash + ),] + ); + + // If the cluster_duplicate_confirmed_hash does not match, then we + // should trigger marking the slot as a duplicate, and also + // try to repair correct version + let mismatched_hash = Hash::new_unique(); + cluster_duplicate_confirmed_hash = Some(&mismatched_hash); + assert_eq!( + on_cluster_update( + slot, + &bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead + ), + vec![ + ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash), + ResultingStateChange::RepairDuplicateConfirmedVersion(mismatched_hash), + ] + ); + } + + #[test] + fn test_duplicate_frozen_duplicate_confirmed() { + // Common state + let slot = 0; + let is_dead = false; + let is_slot_duplicate = true; + + // Bank is not frozen yet + let mut cluster_duplicate_confirmed_hash = None; + let mut bank_frozen_hash = Hash::default(); + + // Mark the slot as duplicate. Because our version of the slot is not + // frozen yet, we don't know which version we have, so no action is + // taken. + assert!(on_cluster_update( + slot, + &bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead + ) + .is_empty()); + + // Freeze the bank, should now mark the slot as duplicate since we have + // not seen confirmation yet. + bank_frozen_hash = Hash::new_unique(); + assert_eq!( + on_cluster_update( + slot, + &bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead + ), + vec![ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash),] + ); + + // If the cluster_duplicate_confirmed_hash matches, we just confirm + // the slot + cluster_duplicate_confirmed_hash = Some(&bank_frozen_hash); + assert_eq!( + on_cluster_update( + slot, + &bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead + ), + vec![ResultingStateChange::DuplicateConfirmedSlotMatchesCluster( + bank_frozen_hash + ),] + ); + + // If the cluster_duplicate_confirmed_hash does not match, then we + // should trigger marking the slot as a duplicate, and also + // try to repair correct version + let mismatched_hash = Hash::new_unique(); + cluster_duplicate_confirmed_hash = Some(&mismatched_hash); + assert_eq!( + on_cluster_update( + slot, + &bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead + ), + vec![ + ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash), + ResultingStateChange::RepairDuplicateConfirmedVersion(mismatched_hash), + ] + ); + } + + #[test] + fn test_duplicate_duplicate_confirmed() { + let slot = 0; + let correct_hash = Hash::new_unique(); + let cluster_duplicate_confirmed_hash = Some(&correct_hash); + let is_dead = false; + // Bank is not frozen yet + let bank_frozen_hash = Hash::default(); + + // Because our version of the slot is not frozen yet, then even though + // the cluster has duplicate_confirmed a hash, we don't know which version we + // have, so no action is taken. + let is_slot_duplicate = true; + assert!(on_cluster_update( + slot, + &bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead + ) + .is_empty()); + } + + #[test] + fn test_duplicate_dead() { + let slot = 0; + let cluster_duplicate_confirmed_hash = None; + let is_dead = true; + // Bank is not frozen yet + let bank_frozen_hash = Hash::default(); + + // Even though our version of the slot is dead, the cluster has not + // duplicate_confirmed a hash, we don't know which version we have, so no action + // is taken. + let is_slot_duplicate = true; + assert!(on_cluster_update( + slot, + &bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead + ) + .is_empty()); + } + + #[test] + fn test_duplicate_confirmed_dead_duplicate() { + let slot = 0; + let correct_hash = Hash::new_unique(); + // Cluster has duplicate_confirmed some version of the slot + let cluster_duplicate_confirmed_hash = Some(&correct_hash); + // Our version of the slot is dead + let is_dead = true; + let bank_frozen_hash = Hash::default(); + + // Even if the duplicate signal hasn't come in yet, + // we can deduce the slot is duplicate AND we have, + // the wrong version, so should mark the slot as duplicate, + // and repair the correct version + let mut is_slot_duplicate = false; + assert_eq!( + on_cluster_update( + slot, + &bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead + ), + vec![ + ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash), + ResultingStateChange::RepairDuplicateConfirmedVersion(correct_hash), + ] + ); + + // If the duplicate signal comes in, nothing should change + is_slot_duplicate = true; + assert_eq!( + on_cluster_update( + slot, + &bank_frozen_hash, + cluster_duplicate_confirmed_hash, + is_slot_duplicate, + is_dead + ), + vec![ + ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash), + ResultingStateChange::RepairDuplicateConfirmedVersion(correct_hash), + ] + ); + } + + #[test] + fn test_apply_state_changes() { + // Common state + let InitialState { + mut heaviest_subtree_fork_choice, + mut progress, + ancestors, + descendants, + slot, + bank_forks, + } = setup(); + + // MarkSlotDuplicate should mark progress map and remove + // the slot from fork choice + let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash(); + apply_state_changes( + slot, + &mut progress, + &mut heaviest_subtree_fork_choice, + &ancestors, + &descendants, + vec![ResultingStateChange::MarkSlotDuplicate(slot_hash)], + ); + assert!(!heaviest_subtree_fork_choice + .is_candidate_slot(&(slot, slot_hash)) + .unwrap()); + for child_slot in descendants + .get(&slot) + .unwrap() + .iter() + .chain(std::iter::once(&slot)) + { + assert_eq!( + progress + .latest_unconfirmed_duplicate_ancestor(*child_slot) + .unwrap(), + slot + ); + } + + // DuplicateConfirmedSlotMatchesCluster should re-enable fork choice + apply_state_changes( + slot, + &mut progress, + &mut heaviest_subtree_fork_choice, + &ancestors, + &descendants, + vec![ResultingStateChange::DuplicateConfirmedSlotMatchesCluster( + slot_hash, + )], + ); + for child_slot in descendants + .get(&slot) + .unwrap() + .iter() + .chain(std::iter::once(&slot)) + { + assert!(progress + .latest_unconfirmed_duplicate_ancestor(*child_slot) + .is_none()); + } + assert!(heaviest_subtree_fork_choice + .is_candidate_slot(&(slot, slot_hash)) + .unwrap()); + } + + fn run_test_state_duplicate_then_bank_frozen(initial_bank_hash: Option) { + // Common state + let InitialState { + mut heaviest_subtree_fork_choice, + mut progress, + ancestors, + descendants, + bank_forks, + .. + } = setup(); + + // Setup a duplicate slot state transition with the initial bank state of the duplicate slot + // determined by `initial_bank_hash`, which can be: + // 1) A default hash (unfrozen bank), + // 2) None (a slot that hasn't even started replay yet). + let root = 0; + let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); + let gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + let duplicate_slot = 2; + check_slot_agrees_with_cluster( + duplicate_slot, + root, + initial_bank_hash, + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &ancestors, + &descendants, + &mut progress, + &mut heaviest_subtree_fork_choice, + SlotStateUpdate::Duplicate, + ); + + // Now freeze the bank + let frozen_duplicate_slot_hash = bank_forks + .read() + .unwrap() + .get(duplicate_slot) + .unwrap() + .hash(); + check_slot_agrees_with_cluster( + duplicate_slot, + root, + Some(frozen_duplicate_slot_hash), + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &ancestors, + &descendants, + &mut progress, + &mut heaviest_subtree_fork_choice, + SlotStateUpdate::Frozen, + ); + + // Progress map should have the correct updates, fork choice should mark duplicate + // as unvotable + assert!(progress.is_unconfirmed_duplicate(duplicate_slot).unwrap()); + + // The ancestor of the duplicate slot should be the best slot now + let (duplicate_ancestor, duplicate_parent_hash) = { + let r_bank_forks = bank_forks.read().unwrap(); + let parent_bank = r_bank_forks.get(duplicate_slot).unwrap().parent().unwrap(); + (parent_bank.slot(), parent_bank.hash()) + }; + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + (duplicate_ancestor, duplicate_parent_hash) + ); + } + + #[test] + fn test_state_unfrozen_bank_duplicate_then_bank_frozen() { + run_test_state_duplicate_then_bank_frozen(Some(Hash::default())); + } + + #[test] + fn test_state_unreplayed_bank_duplicate_then_bank_frozen() { + run_test_state_duplicate_then_bank_frozen(None); + } + + #[test] + fn test_state_ancestor_confirmed_descendant_duplicate() { + // Common state + let InitialState { + mut heaviest_subtree_fork_choice, + mut progress, + ancestors, + descendants, + bank_forks, + .. + } = setup(); + + let slot3_hash = bank_forks.read().unwrap().get(3).unwrap().hash(); + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + (3, slot3_hash) + ); + let root = 0; + let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); + let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + + // Mark slot 2 as duplicate confirmed + let slot2_hash = bank_forks.read().unwrap().get(2).unwrap().hash(); + gossip_duplicate_confirmed_slots.insert(2, slot2_hash); + check_slot_agrees_with_cluster( + 2, + root, + Some(slot2_hash), + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &ancestors, + &descendants, + &mut progress, + &mut heaviest_subtree_fork_choice, + SlotStateUpdate::DuplicateConfirmed, + ); + + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + (3, slot3_hash) + ); + + // Mark 3 as duplicate, should not remove the duplicate confirmed slot 2 from + // fork choice + check_slot_agrees_with_cluster( + 3, + root, + Some(slot3_hash), + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &ancestors, + &descendants, + &mut progress, + &mut heaviest_subtree_fork_choice, + SlotStateUpdate::Duplicate, + ); + + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + (2, slot2_hash) + ); + } + + #[test] + fn test_state_ancestor_duplicate_descendant_confirmed() { + // Common state + let InitialState { + mut heaviest_subtree_fork_choice, + mut progress, + ancestors, + descendants, + bank_forks, + .. + } = setup(); + + let slot3_hash = bank_forks.read().unwrap().get(3).unwrap().hash(); + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + (3, slot3_hash) + ); + let root = 0; + let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); + let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + + // Mark 2 as duplicate + check_slot_agrees_with_cluster( + 2, + root, + Some(bank_forks.read().unwrap().get(2).unwrap().hash()), + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &ancestors, + &descendants, + &mut progress, + &mut heaviest_subtree_fork_choice, + SlotStateUpdate::Duplicate, + ); + + let slot1_hash = bank_forks.read().unwrap().get(1).unwrap().hash(); + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + (1, slot1_hash) + ); + + // Mark slot 3 as duplicate confirmed, should mark slot 2 as duplicate confirmed as well + gossip_duplicate_confirmed_slots.insert(3, slot3_hash); + check_slot_agrees_with_cluster( + 3, + root, + Some(slot3_hash), + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &ancestors, + &descendants, + &mut progress, + &mut heaviest_subtree_fork_choice, + SlotStateUpdate::DuplicateConfirmed, + ); + + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + (3, slot3_hash) + ); + } +} diff --git a/core/src/cluster_slots.rs b/core/src/cluster_slots.rs index 7c3beeb408..82e940331f 100644 --- a/core/src/cluster_slots.rs +++ b/core/src/cluster_slots.rs @@ -1,5 +1,5 @@ use crate::{ - cluster_info::ClusterInfo, contact_info::ContactInfo, epoch_slots::EpochSlots, + cluster_info::ClusterInfo, contact_info::ContactInfo, crds::Cursor, epoch_slots::EpochSlots, serve_repair::RepairType, }; use itertools::Itertools; @@ -7,10 +7,7 @@ use solana_runtime::{bank_forks::BankForks, epoch_stakes::NodeIdToVoteAccounts}; use solana_sdk::{clock::Slot, pubkey::Pubkey}; use std::{ collections::{BTreeMap, HashMap, HashSet}, - sync::{ - atomic::{AtomicU64, Ordering}, - Arc, RwLock, - }, + sync::{Arc, Mutex, RwLock}, }; // Limit the size of cluster-slots map in case @@ -22,22 +19,26 @@ pub type SlotPubkeys = HashMap; #[derive(Default)] pub struct ClusterSlots { cluster_slots: RwLock>>>, - since: AtomicU64, validator_stakes: RwLock>, epoch: RwLock>, + cursor: Mutex, } impl ClusterSlots { pub fn lookup(&self, slot: Slot) -> Option>> { self.cluster_slots.read().unwrap().get(&slot).cloned() } + pub fn update(&self, root: Slot, cluster_info: &ClusterInfo, bank_forks: &RwLock) { self.update_peers(bank_forks); - let since = self.since.load(Ordering::Relaxed); - let (epoch_slots, since) = cluster_info.get_epoch_slots_since(since); - self.update_internal(root, epoch_slots, since); + let epoch_slots = { + let mut cursor = self.cursor.lock().unwrap(); + cluster_info.get_epoch_slots(&mut cursor) + }; + self.update_internal(root, epoch_slots); } - fn update_internal(&self, root: Slot, epoch_slots_list: Vec, since: Option) { + + fn update_internal(&self, root: Slot, epoch_slots_list: Vec) { // Attach validator's total stake. let epoch_slots_list: Vec<_> = { let validator_stakes = self.validator_stakes.read().unwrap(); @@ -86,9 +87,6 @@ impl ClusterSlots { cluster_slots.split_off(&key); } } - if let Some(since) = since { - self.since.store(since + 1, Ordering::Relaxed); - } } pub fn collect(&self, id: &Pubkey) -> HashSet { @@ -206,23 +204,20 @@ mod tests { fn test_default() { let cs = ClusterSlots::default(); assert!(cs.cluster_slots.read().unwrap().is_empty()); - assert_eq!(cs.since.load(Ordering::Relaxed), 0); } #[test] fn test_update_noop() { let cs = ClusterSlots::default(); - cs.update_internal(0, vec![], None); + cs.update_internal(0, vec![]); assert!(cs.cluster_slots.read().unwrap().is_empty()); - assert_eq!(cs.since.load(Ordering::Relaxed), 0); } #[test] fn test_update_empty() { let cs = ClusterSlots::default(); let epoch_slot = EpochSlots::default(); - cs.update_internal(0, vec![epoch_slot], Some(0)); - assert_eq!(cs.since.load(Ordering::Relaxed), 1); + cs.update_internal(0, vec![epoch_slot]); assert!(cs.lookup(0).is_none()); } @@ -232,8 +227,7 @@ mod tests { let cs = ClusterSlots::default(); let mut epoch_slot = EpochSlots::default(); epoch_slot.fill(&[0], 0); - cs.update_internal(0, vec![epoch_slot], Some(0)); - assert_eq!(cs.since.load(Ordering::Relaxed), 1); + cs.update_internal(0, vec![epoch_slot]); assert!(cs.lookup(0).is_none()); } @@ -242,8 +236,7 @@ mod tests { let cs = ClusterSlots::default(); let mut epoch_slot = EpochSlots::default(); epoch_slot.fill(&[1], 0); - cs.update_internal(0, vec![epoch_slot], Some(0)); - assert_eq!(cs.since.load(Ordering::Relaxed), 1); + cs.update_internal(0, vec![epoch_slot]); assert!(cs.lookup(0).is_none()); assert!(cs.lookup(1).is_some()); assert_eq!( @@ -373,7 +366,7 @@ mod tests { ); *cs.validator_stakes.write().unwrap() = map; - cs.update_internal(0, vec![epoch_slot], None); + cs.update_internal(0, vec![epoch_slot]); assert!(cs.lookup(1).is_some()); assert_eq!( cs.lookup(1) @@ -390,7 +383,7 @@ mod tests { let cs = ClusterSlots::default(); let mut epoch_slot = EpochSlots::default(); epoch_slot.fill(&[1], 0); - cs.update_internal(0, vec![epoch_slot], None); + cs.update_internal(0, vec![epoch_slot]); let self_id = solana_sdk::pubkey::new_rand(); assert_eq!( cs.generate_repairs_for_missing_slots(&self_id, 0), @@ -404,7 +397,7 @@ mod tests { let mut epoch_slot = EpochSlots::default(); epoch_slot.fill(&[1], 0); let self_id = epoch_slot.from; - cs.update_internal(0, vec![epoch_slot], None); + cs.update_internal(0, vec![epoch_slot]); let slots: Vec = cs.collect(&self_id).into_iter().collect(); assert_eq!(slots, vec![1]); } @@ -415,7 +408,7 @@ mod tests { let mut epoch_slot = EpochSlots::default(); epoch_slot.fill(&[1], 0); let self_id = epoch_slot.from; - cs.update_internal(0, vec![epoch_slot], None); + cs.update_internal(0, vec![epoch_slot]); assert!(cs .generate_repairs_for_missing_slots(&self_id, 0) .is_empty()); diff --git a/core/src/cluster_slots_service.rs b/core/src/cluster_slots_service.rs index db77bc5cdb..ce362541c6 100644 --- a/core/src/cluster_slots_service.rs +++ b/core/src/cluster_slots_service.rs @@ -185,19 +185,21 @@ impl ClusterSlotsService { #[cfg(test)] mod test { use super::*; - use crate::cluster_info::Node; + use crate::{cluster_info::Node, crds_value::CrdsValueLabel}; #[test] pub fn test_update_lowest_slot() { - let node_info = Node::new_localhost_with_pubkey(&Pubkey::default()); + let pubkey = Pubkey::new_unique(); + let node_info = Node::new_localhost_with_pubkey(&pubkey); let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info); - ClusterSlotsService::update_lowest_slot(&Pubkey::default(), 5, &cluster_info); + ClusterSlotsService::update_lowest_slot(&pubkey, 5, &cluster_info); cluster_info.flush_push_queue(); - let lowest = cluster_info - .get_lowest_slot_for_node(&Pubkey::default(), None, |lowest_slot, _| { - lowest_slot.clone() - }) - .unwrap(); + let lowest = { + let label = CrdsValueLabel::LowestSlot(pubkey); + let gossip = cluster_info.gossip.read().unwrap(); + let entry = gossip.crds.get(&label).unwrap(); + entry.value.lowest_slot().unwrap().clone() + }; assert_eq!(lowest.lowest, 5); } } diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index d597385ad6..dc0f4299ee 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -253,7 +253,7 @@ mod tests { bank_forks::BankForks, genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs}, }; - use solana_sdk::{pubkey::Pubkey, signature::Signer}; + use solana_sdk::{account::Account, pubkey::Pubkey, signature::Signer}; use solana_stake_program::stake_state; use solana_vote_program::{ vote_state::{self, VoteStateVersions}, @@ -315,15 +315,13 @@ mod tests { ); for a in ancestors { + let mut expected = BlockCommitment::default(); if a <= root { - let mut expected = BlockCommitment::default(); expected.increase_rooted_stake(lamports); - assert_eq!(*commitment.get(&a).unwrap(), expected); } else { - let mut expected = BlockCommitment::default(); expected.increase_confirmation_stake(1, lamports); - assert_eq!(*commitment.get(&a).unwrap(), expected); } + assert_eq!(*commitment.get(&a).unwrap(), expected); } assert_eq!(rooted_stake[0], (root, lamports)); } @@ -411,16 +409,20 @@ mod tests { rooted_stake_amount, ); - genesis_config.accounts.extend(vec![ - (pk1, vote_account1.clone()), - (sk1, stake_account1), - (pk2, vote_account2.clone()), - (sk2, stake_account2), - (pk3, vote_account3.clone()), - (sk3, stake_account3), - (pk4, vote_account4.clone()), - (sk4, stake_account4), - ]); + genesis_config.accounts.extend( + vec![ + (pk1, vote_account1.clone()), + (sk1, stake_account1), + (pk2, vote_account2.clone()), + (sk2, stake_account2), + (pk3, vote_account3.clone()), + (sk3, stake_account3), + (pk4, vote_account4.clone()), + (sk4, stake_account4), + ] + .into_iter() + .map(|(key, account)| (key, Account::from(account))), + ); // Create bank let bank = Arc::new(Bank::new(&genesis_config)); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 5a0fec9291..931073c76d 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -1,4 +1,7 @@ -use crate::progress_map::{LockoutIntervals, ProgressMap}; +use crate::{ + latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks, + progress_map::{LockoutIntervals, ProgressMap}, +}; use chrono::prelude::*; use solana_ledger::{ancestor_iterator::AncestorIterator, blockstore::Blockstore, blockstore_db}; use solana_measure::measure::Measure; @@ -37,6 +40,7 @@ pub enum SwitchForkDecision { SwitchProof(Hash), SameFork, FailedSwitchThreshold(u64, u64), + FailedSwitchDuplicateRollback(Slot), } impl SwitchForkDecision { @@ -51,6 +55,7 @@ impl SwitchForkDecision { assert_ne!(*total_stake, 0); None } + SwitchForkDecision::FailedSwitchDuplicateRollback(_) => None, SwitchForkDecision::SameFork => Some(vote_instruction::vote( vote_account_pubkey, authorized_voter_pubkey, @@ -68,7 +73,12 @@ impl SwitchForkDecision { } pub fn can_vote(&self) -> bool { - !matches!(self, SwitchForkDecision::FailedSwitchThreshold(_, _)) + match self { + SwitchForkDecision::FailedSwitchThreshold(_, _) => false, + SwitchForkDecision::FailedSwitchDuplicateRollback(_) => false, + SwitchForkDecision::SameFork => true, + SwitchForkDecision::SwitchProof(_) => true, + } } } @@ -88,7 +98,7 @@ pub(crate) struct ComputedBankState { // Tree of intervals of lockouts of the form [slot, slot + slot.lockout], // keyed by end of the range pub lockout_intervals: LockoutIntervals, - pub pubkey_votes: Arc, + pub my_latest_landed_vote: Option, } #[frozen_abi(digest = "Eay84NBbJqiMBfE7HHH2o6e51wcvoU79g8zCi5sw6uj3")] @@ -99,6 +109,12 @@ pub struct Tower { threshold_size: f64, lockouts: VoteState, last_vote: Vote, + #[serde(skip)] + // The blockhash used in the last vote transaction, may or may not equal the + // blockhash of the voted block itself, depending if the vote slot was refreshed. + // For instance, a vote for slot 5, may be refreshed/resubmitted for inclusion in + // block 10, in which case `last_vote_tx_blockhash` equals the blockhash of 10, not 5. + last_vote_tx_blockhash: Hash, last_timestamp: BlockTimestamp, #[serde(skip)] path: PathBuf, @@ -125,6 +141,7 @@ impl Default for Tower { lockouts: VoteState::default(), last_vote: Vote::default(), last_timestamp: BlockTimestamp::default(), + last_vote_tx_blockhash: Hash::default(), path: PathBuf::default(), tmp_path: PathBuf::default(), stray_restored_slot: Option::default(), @@ -190,8 +207,9 @@ impl Tower { ); let root = root_bank.slot(); + let (best_slot, best_hash) = heaviest_subtree_fork_choice.best_overall_slot(); let heaviest_bank = bank_forks - .get(heaviest_subtree_fork_choice.best_overall_slot()) + .get_with_checked_hash((best_slot, best_hash)) .expect( "The best overall slot must be one of `frozen_banks` which all exist in bank_forks", ) @@ -207,10 +225,12 @@ impl Tower { } pub(crate) fn collect_vote_lockouts( - node_pubkey: &Pubkey, + vote_account_pubkey: &Pubkey, bank_slot: Slot, vote_accounts: F, ancestors: &HashMap>, + get_frozen_hash: impl Fn(Slot) -> Option, + latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, ) -> ComputedBankState where F: IntoIterator, @@ -222,12 +242,12 @@ impl Tower { // Tree of intervals of lockouts of the form [slot, slot + slot.lockout], // keyed by end of the range let mut lockout_intervals = LockoutIntervals::new(); - let mut pubkey_votes = vec![]; + let mut my_latest_landed_vote = None; for (key, (voted_stake, account)) in vote_accounts { if voted_stake == 0 { continue; } - trace!("{} {} with stake {}", node_pubkey, key, voted_stake); + trace!("{} {} with stake {}", vote_account_pubkey, key, voted_stake); let mut vote_state = match account.vote_state().as_ref() { Err(_) => { datapoint_warn!( @@ -249,7 +269,8 @@ impl Tower { .push((vote.slot, key)); } - if key == *node_pubkey || vote_state.node_pubkey == *node_pubkey { + if key == *vote_account_pubkey { + my_latest_landed_vote = vote_state.nth_recent_vote(0).map(|v| v.slot); debug!("vote state {:?}", vote_state); debug!( "observed slot {}", @@ -269,8 +290,13 @@ impl Tower { let start_root = vote_state.root_slot; // Add the last vote to update the `heaviest_subtree_fork_choice` - if let Some(last_voted_slot) = vote_state.last_voted_slot() { - pubkey_votes.push((key, last_voted_slot)); + if let Some(last_landed_voted_slot) = vote_state.last_voted_slot() { + latest_validator_votes_for_frozen_banks.check_add_vote( + key, + last_landed_voted_slot, + get_frozen_hash(last_landed_voted_slot), + true, + ); } vote_state.process_slot_vote_unchecked(bank_slot); @@ -333,7 +359,7 @@ impl Tower { total_stake, bank_weight, lockout_intervals, - pubkey_votes: Arc::new(pubkey_votes), + my_latest_landed_vote, } } @@ -349,13 +375,24 @@ impl Tower { .unwrap_or(false) } - fn new_vote( - local_vote_state: &VoteState, + pub fn tower_slots(&self) -> Vec { + self.lockouts.tower() + } + + pub fn last_vote_tx_blockhash(&self) -> Hash { + self.last_vote_tx_blockhash + } + + pub fn refresh_last_vote_tx_blockhash(&mut self, new_vote_tx_blockhash: Hash) { + self.last_vote_tx_blockhash = new_vote_tx_blockhash; + } + + fn apply_vote_and_generate_vote_diff( + local_vote_state: &mut VoteState, slot: Slot, hash: Hash, last_voted_slot_in_bank: Option, - ) -> (Vote, Vec /*VoteState.tower*/) { - let mut local_vote_state = local_vote_state.clone(); + ) -> Vote { let vote = Vote::new(vec![slot], hash); local_vote_state.process_vote_unchecked(&vote); let slots = if let Some(last_voted_slot_in_bank) = last_voted_slot_in_bank { @@ -374,33 +411,48 @@ impl Tower { slots, local_vote_state.votes ); - (Vote::new(slots, hash), local_vote_state.tower()) + Vote::new(slots, hash) } - fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option { + pub fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option { let (_stake, vote_account) = bank.get_vote_account(vote_account_pubkey)?; let slot = vote_account.vote_state().as_ref().ok()?.last_voted_slot(); slot } - pub fn new_vote_from_bank( - &self, - bank: &Bank, - vote_account_pubkey: &Pubkey, - ) -> (Vote, Vec /*VoteState.tower*/) { - let voted_slot = Self::last_voted_slot_in_bank(bank, vote_account_pubkey); - Self::new_vote(&self.lockouts, bank.slot(), bank.hash(), voted_slot) + pub fn record_bank_vote(&mut self, bank: &Bank, vote_account_pubkey: &Pubkey) -> Option { + let last_voted_slot_in_bank = Self::last_voted_slot_in_bank(bank, vote_account_pubkey); + + // Returns the new root if one is made after applying a vote for the given bank to + // `self.lockouts` + self.record_bank_vote_and_update_lockouts(bank.slot(), bank.hash(), last_voted_slot_in_bank) } - pub fn record_bank_vote(&mut self, vote: Vote) -> Option { - let slot = vote.last_voted_slot().unwrap_or(0); - trace!("{} record_vote for {}", self.node_pubkey, slot); + fn record_bank_vote_and_update_lockouts( + &mut self, + vote_slot: Slot, + vote_hash: Hash, + last_voted_slot_in_bank: Option, + ) -> Option { + trace!("{} record_vote for {}", self.node_pubkey, vote_slot); let old_root = self.root(); - self.lockouts.process_vote_unchecked(&vote); - self.last_vote = vote; + let mut new_vote = Self::apply_vote_and_generate_vote_diff( + &mut self.lockouts, + vote_slot, + vote_hash, + last_voted_slot_in_bank, + ); + + new_vote.timestamp = self.maybe_timestamp(self.last_vote.last_voted_slot().unwrap_or(0)); + self.last_vote = new_vote; + let new_root = self.root(); - datapoint_info!("tower-vote", ("latest", slot, i64), ("root", new_root, i64)); + datapoint_info!( + "tower-vote", + ("latest", vote_slot, i64), + ("root", new_root, i64) + ); if old_root != new_root { Some(new_root) } else { @@ -410,22 +462,23 @@ impl Tower { #[cfg(test)] pub fn record_vote(&mut self, slot: Slot, hash: Hash) -> Option { - let vote = Vote::new(vec![slot], hash); - self.record_bank_vote(vote) + self.record_bank_vote_and_update_lockouts(slot, hash, self.last_voted_slot()) } pub fn last_voted_slot(&self) -> Option { self.last_vote.last_voted_slot() } + pub fn last_voted_slot_hash(&self) -> Option<(Slot, Hash)> { + self.last_vote.last_voted_slot_hash() + } + pub fn stray_restored_slot(&self) -> Option { self.stray_restored_slot } - pub fn last_vote_and_timestamp(&mut self) -> Vote { - let mut last_vote = self.last_vote.clone(); - last_vote.timestamp = self.maybe_timestamp(last_vote.last_voted_slot().unwrap_or(0)); - last_vote + pub fn last_vote(&mut self) -> Vote { + self.last_vote.clone() } fn maybe_timestamp(&mut self, current_slot: Slot) -> Option { @@ -575,9 +628,13 @@ impl Tower { SwitchForkDecision::FailedSwitchThreshold(0, total_stake) }; + let rollback_due_to_to_to_duplicate_ancestor = |latest_duplicate_ancestor| { + SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor) + }; + let last_vote_ancestors = ancestors.get(&last_voted_slot).unwrap_or_else(|| { - if !self.is_stray_last_vote() { + if self.is_stray_last_vote() { // Unless last vote is stray and stale, ancestors.get(last_voted_slot) must // return Some(_), justifying to panic! here. // Also, adjust_lockouts_after_replay() correctly makes last_voted_slot None, @@ -586,9 +643,9 @@ impl Tower { // In other words, except being stray, all other slots have been voted on while // this validator has been running, so we must be able to fetch ancestors for // all of them. - panic!("no ancestors found with slot: {}", last_voted_slot); - } else { empty_ancestors_due_to_minor_unsynced_ledger() + } else { + panic!("no ancestors found with slot: {}", last_voted_slot); } }); @@ -601,15 +658,23 @@ impl Tower { } if last_vote_ancestors.contains(&switch_slot) { - if !self.is_stray_last_vote() { + if self.is_stray_last_vote() { + return suspended_decision_due_to_major_unsynced_ledger(); + } else if let Some(latest_duplicate_ancestor) = progress.latest_unconfirmed_duplicate_ancestor(last_voted_slot) { + // We're rolling back because one of the ancestors of the last vote was a duplicate. In this + // case, it's acceptable if the switch candidate is one of ancestors of the previous vote, + // just fail the switch check because there's no point in voting on an ancestor. ReplayStage + // should then have a special case continue building an alternate fork from this ancestor, NOT + // the `last_voted_slot`. This is in contrast to usual SwitchFailure where ReplayStage continues to build blocks + // on latest vote. See `select_vote_and_reset_forks()` for more details. + return rollback_due_to_to_to_duplicate_ancestor(latest_duplicate_ancestor); + } else { panic!( - "Should never consider switching to slot ({}), which is ancestors({:?}) of last vote: {}", + "Should never consider switching to ancestor ({}) of last vote: {}, ancestors({:?})", switch_slot, + last_voted_slot, last_vote_ancestors, - last_voted_slot ); - } else { - return suspended_decision_due_to_major_unsynced_ledger(); } } @@ -1237,11 +1302,13 @@ pub mod test { use super::*; use crate::{ cluster_info_vote_listener::VoteTracker, + cluster_slot_state_verifier::{DuplicateSlotsTracker, GossipDuplicateConfirmedSlots}, cluster_slots::ClusterSlots, fork_choice::SelectVoteAndResetForkResult, - heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, - progress_map::ForkProgress, + heaviest_subtree_fork_choice::{HeaviestSubtreeForkChoice, SlotHashKey}, + progress_map::{DuplicateStats, ForkProgress}, replay_stage::{HeaviestForkFailures, ReplayStage}, + unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes, }; use solana_ledger::{blockstore::make_slot_entries, get_tmp_ledger_path}; use solana_runtime::{ @@ -1253,7 +1320,11 @@ pub mod test { }, }; use solana_sdk::{ - account::Account, clock::Slot, hash::Hash, pubkey::Pubkey, signature::Signer, + account::{Account, AccountSharedData, WritableAccount}, + clock::Slot, + hash::Hash, + pubkey::Pubkey, + signature::Signer, slot_history::SlotHistory, }; use solana_vote_program::{ @@ -1276,6 +1347,7 @@ pub mod test { pub bank_forks: RwLock, pub progress: ProgressMap, pub heaviest_subtree_fork_choice: HeaviestSubtreeForkChoice, + pub latest_validator_votes_for_frozen_banks: LatestValidatorVotesForFrozenBanks, } impl VoteSimulator { @@ -1295,6 +1367,8 @@ pub mod test { bank_forks: RwLock::new(bank_forks), progress, heaviest_subtree_fork_choice, + latest_validator_votes_for_frozen_banks: + LatestValidatorVotesForFrozenBanks::default(), } } pub(crate) fn fill_bank_forks( @@ -1309,9 +1383,9 @@ pub mod test { while let Some(visit) = walk.get() { let slot = visit.node().data; - self.progress - .entry(slot) - .or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0)); + self.progress.entry(slot).or_insert_with(|| { + ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0) + }); if self.bank_forks.read().unwrap().get(slot).is_some() { walk.forward(); continue; @@ -1338,8 +1412,10 @@ pub mod test { } } new_bank.freeze(); - self.heaviest_subtree_fork_choice - .add_new_leaf_slot(new_bank.slot(), Some(new_bank.parent_slot())); + self.heaviest_subtree_fork_choice.add_new_leaf_slot( + (new_bank.slot(), new_bank.hash()), + Some((new_bank.parent_slot(), new_bank.parent_hash())), + ); self.bank_forks.write().unwrap().insert(new_bank); walk.forward(); } @@ -1374,6 +1450,7 @@ pub mod test { &ClusterSlots::default(), &self.bank_forks, &mut self.heaviest_subtree_fork_choice, + &mut self.latest_validator_votes_for_frozen_banks, ); let vote_bank = self @@ -1391,7 +1468,7 @@ pub mod test { .. } = ReplayStage::select_vote_and_reset_forks( &vote_bank, - &None, + None, &ancestors, &descendants, &self.progress, @@ -1403,8 +1480,9 @@ pub mod test { if !heaviest_fork_failures.is_empty() { return heaviest_fork_failures; } - let vote = tower.new_vote_from_bank(&vote_bank, &my_vote_pubkey).0; - if let Some(new_root) = tower.record_bank_vote(vote) { + + let new_root = tower.record_bank_vote(&vote_bank, &my_vote_pubkey); + if let Some(new_root) = new_root { self.set_root(new_root); } @@ -1419,6 +1497,11 @@ pub mod test { &AbsRequestSender::default(), None, &mut self.heaviest_subtree_fork_choice, + &mut DuplicateSlotsTracker::default(), + &mut GossipDuplicateConfirmedSlots::default(), + &mut UnfrozenGossipVerifiedVoteHashes::default(), + &mut true, + &mut Vec::new(), ) } @@ -1453,7 +1536,9 @@ pub mod test { ) { self.progress .entry(slot) - .or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0)) + .or_insert_with(|| { + ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0) + }) .fork_stats .lockout_intervals .entry(lockout_interval.1) @@ -1560,7 +1645,14 @@ pub mod test { let mut progress = ProgressMap::default(); progress.insert( 0, - ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0), + ForkProgress::new( + bank0.last_blockhash(), + None, + DuplicateStats::default(), + None, + 0, + 0, + ), ); let bank_forks = BankForks::new(bank0); let heaviest_subtree_fork_choice = @@ -1571,10 +1663,10 @@ pub mod test { fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, (u64, ArcVoteAccount))> { let mut stakes = vec![]; for (lamports, votes) in stake_votes { - let mut account = Account { + let mut account = AccountSharedData { data: vec![0; VoteState::size_of()], lamports: *lamports, - ..Account::default() + ..AccountSharedData::default() }; let mut vote_state = VoteState::default(); for slot in *votes { @@ -1582,7 +1674,7 @@ pub mod test { } VoteState::serialize( &VoteStateVersions::new_current(vote_state), - &mut account.data, + &mut account.data_as_mut_slice(), ) .expect("serialize state"); stakes.push(( @@ -1600,6 +1692,12 @@ pub mod test { assert!(decision .to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()) .is_none()); + + decision = SwitchForkDecision::FailedSwitchDuplicateRollback(0); + assert!(decision + .to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()) + .is_none()); + decision = SwitchForkDecision::SameFork; assert_eq!( decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()), @@ -1609,6 +1707,7 @@ pub mod test { vote.clone(), )) ); + decision = SwitchForkDecision::SwitchProof(Hash::default()); assert_eq!( decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()), @@ -1651,11 +1750,20 @@ pub mod test { } #[test] - fn test_switch_threshold() { + fn test_switch_threshold_duplicate_rollback() { + run_test_switch_threshold_duplicate_rollback(false); + } + + #[test] + #[should_panic] + fn test_switch_threshold_duplicate_rollback_panic() { + run_test_switch_threshold_duplicate_rollback(true); + } + + fn setup_switch_test(num_accounts: usize) -> (Arc, VoteSimulator, u64) { // Init state - let mut vote_simulator = VoteSimulator::new(2); - let my_pubkey = vote_simulator.node_pubkeys[0]; - let other_vote_account = vote_simulator.vote_pubkeys[1]; + assert!(num_accounts > 1); + let mut vote_simulator = VoteSimulator::new(num_accounts); let bank0 = vote_simulator .bank_forks .read() @@ -1686,6 +1794,82 @@ pub mod test { for (_, fork_progress) in vote_simulator.progress.iter_mut() { fork_progress.fork_stats.computed = true; } + + (bank0, vote_simulator, total_stake) + } + + fn run_test_switch_threshold_duplicate_rollback(should_panic: bool) { + let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2); + let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors(); + let descendants = vote_simulator + .bank_forks + .read() + .unwrap() + .descendants() + .clone(); + let mut tower = Tower::new_with_key(&vote_simulator.node_pubkeys[0]); + + // Last vote is 47 + tower.record_vote(47, Hash::default()); + + // Trying to switch to an ancestor of last vote should only not panic + // if the current vote has a duplicate ancestor + let ancestor_of_voted_slot = 43; + let duplicate_ancestor1 = 44; + let duplicate_ancestor2 = 45; + vote_simulator.progress.set_unconfirmed_duplicate_slot( + duplicate_ancestor1, + &descendants.get(&duplicate_ancestor1).unwrap(), + ); + vote_simulator.progress.set_unconfirmed_duplicate_slot( + duplicate_ancestor2, + &descendants.get(&duplicate_ancestor2).unwrap(), + ); + assert_eq!( + tower.check_switch_threshold( + ancestor_of_voted_slot, + &ancestors, + &descendants, + &vote_simulator.progress, + total_stake, + bank0.epoch_vote_accounts(0).unwrap(), + ), + SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2) + ); + let mut confirm_ancestors = vec![duplicate_ancestor1]; + if should_panic { + // Adding the last duplicate ancestor will + // 1) Cause loop below to confirm last ancestor + // 2) Check switch threshold on a vote ancestor when there + // are no duplicates on that fork, which will cause a panic + confirm_ancestors.push(duplicate_ancestor2); + } + for (i, duplicate_ancestor) in confirm_ancestors.into_iter().enumerate() { + vote_simulator.progress.set_confirmed_duplicate_slot( + duplicate_ancestor, + ancestors.get(&duplicate_ancestor).unwrap(), + &descendants.get(&duplicate_ancestor).unwrap(), + ); + let res = tower.check_switch_threshold( + ancestor_of_voted_slot, + &ancestors, + &descendants, + &vote_simulator.progress, + total_stake, + bank0.epoch_vote_accounts(0).unwrap(), + ); + if i == 0 { + assert_eq!( + res, + SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2) + ); + } + } + } + + #[test] + fn test_switch_threshold() { + let (bank0, mut vote_simulator, total_stake) = setup_switch_test(2); let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors(); let mut descendants = vote_simulator .bank_forks @@ -1693,7 +1877,8 @@ pub mod test { .unwrap() .descendants() .clone(); - let mut tower = Tower::new_with_key(&my_pubkey); + let mut tower = Tower::new_with_key(&vote_simulator.node_pubkeys[0]); + let other_vote_account = vote_simulator.vote_pubkeys[1]; // Last vote is 47 tower.record_vote(47, Hash::default()); @@ -1982,24 +2167,34 @@ pub mod test { //two accounts voting for slot 0 with 1 token staked let mut accounts = gen_stakes(&[(1, &[0]), (1, &[0])]); accounts.sort_by_key(|(pk, _)| *pk); - let account_latest_votes: PubkeyVotes = - accounts.iter().map(|(pubkey, _)| (*pubkey, 0)).collect(); + let account_latest_votes: Vec<(Pubkey, SlotHashKey)> = accounts + .iter() + .map(|(pubkey, _)| (*pubkey, (0, Hash::default()))) + .collect(); let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())] .into_iter() .collect(); + let mut latest_validator_votes_for_frozen_banks = + LatestValidatorVotesForFrozenBanks::default(); let ComputedBankState { voted_stakes, total_stake, bank_weight, - pubkey_votes, .. - } = Tower::collect_vote_lockouts(&Pubkey::default(), 1, accounts.into_iter(), &ancestors); + } = Tower::collect_vote_lockouts( + &Pubkey::default(), + 1, + accounts.into_iter(), + &ancestors, + |_| Some(Hash::default()), + &mut latest_validator_votes_for_frozen_banks, + ); assert_eq!(voted_stakes[&0], 2); assert_eq!(total_stake, 2); - let mut pubkey_votes = Arc::try_unwrap(pubkey_votes).unwrap(); - pubkey_votes.sort(); - assert_eq!(pubkey_votes, account_latest_votes); + let mut new_votes = latest_validator_votes_for_frozen_banks.take_votes_dirty_set(0); + new_votes.sort(); + assert_eq!(new_votes, account_latest_votes); // Each account has 1 vote in it. After simulating a vote in collect_vote_lockouts, // the account will have 2 votes, with lockout 2 + 4 = 6. So expected weight for @@ -2012,9 +2207,14 @@ pub mod test { //two accounts voting for slots 0..MAX_LOCKOUT_HISTORY with 1 token staked let mut accounts = gen_stakes(&[(1, &votes), (1, &votes)]); accounts.sort_by_key(|(pk, _)| *pk); - let account_latest_votes: PubkeyVotes = accounts + let account_latest_votes: Vec<(Pubkey, SlotHashKey)> = accounts .iter() - .map(|(pubkey, _)| (*pubkey, (MAX_LOCKOUT_HISTORY - 1) as Slot)) + .map(|(pubkey, _)| { + ( + *pubkey, + ((MAX_LOCKOUT_HISTORY - 1) as Slot, Hash::default()), + ) + }) .collect(); let mut tower = Tower::new_for_tests(0, 0.67); let mut ancestors = HashMap::new(); @@ -2036,16 +2236,19 @@ pub mod test { + root_weight; let expected_bank_weight = 2 * vote_account_expected_weight; assert_eq!(tower.lockouts.root_slot, Some(0)); + let mut latest_validator_votes_for_frozen_banks = + LatestValidatorVotesForFrozenBanks::default(); let ComputedBankState { voted_stakes, bank_weight, - pubkey_votes, .. } = Tower::collect_vote_lockouts( &Pubkey::default(), MAX_LOCKOUT_HISTORY as u64, accounts.into_iter(), &ancestors, + |_| Some(Hash::default()), + &mut latest_validator_votes_for_frozen_banks, ); for i in 0..MAX_LOCKOUT_HISTORY { assert_eq!(voted_stakes[&(i as u64)], 2); @@ -2053,9 +2256,9 @@ pub mod test { // should be the sum of all the weights for root assert_eq!(bank_weight, expected_bank_weight); - let mut pubkey_votes = Arc::try_unwrap(pubkey_votes).unwrap(); - pubkey_votes.sort(); - assert_eq!(pubkey_votes, account_latest_votes); + let mut new_votes = latest_validator_votes_for_frozen_banks.take_votes_dirty_set(root.slot); + new_votes.sort(); + assert_eq!(new_votes, account_latest_votes); } #[test] @@ -2251,10 +2454,10 @@ pub mod test { #[test] fn test_stake_is_updated_for_entire_branch() { let mut voted_stakes = HashMap::new(); - let account = Account { + let account = AccountSharedData::from(Account { lamports: 1, ..Account::default() - }; + }); let set: HashSet = vec![0u64, 1u64].into_iter().collect(); let ancestors: HashMap> = [(2u64, set)].iter().cloned().collect(); Tower::update_ancestor_voted_stakes(&mut voted_stakes, 2, account.lamports, &ancestors); @@ -2264,23 +2467,26 @@ pub mod test { } #[test] - fn test_new_vote() { - let local = VoteState::default(); - let (vote, tower_slots) = Tower::new_vote(&local, 0, Hash::default(), None); - assert_eq!(local.votes.len(), 0); + fn test_apply_vote_and_generate_vote_diff() { + let mut local = VoteState::default(); + let vote = Tower::apply_vote_and_generate_vote_diff(&mut local, 0, Hash::default(), None); + assert_eq!(local.votes.len(), 1); assert_eq!(vote.slots, vec![0]); - assert_eq!(tower_slots, vec![0]); + assert_eq!(local.tower(), vec![0]); } #[test] - fn test_new_vote_dup_vote() { - let local = VoteState::default(); - let vote = Tower::new_vote(&local, 0, Hash::default(), Some(0)); - assert!(vote.0.slots.is_empty()); + fn test_apply_vote_and_generate_vote_diff_dup_vote() { + let mut local = VoteState::default(); + // If `latest_voted_slot_in_bank == Some(0)`, then we already have a vote for 0. Adding + // another vote for slot 0 should return an empty vote as the diff. + let vote = + Tower::apply_vote_and_generate_vote_diff(&mut local, 0, Hash::default(), Some(0)); + assert!(vote.slots.is_empty()); } #[test] - fn test_new_vote_next_vote() { + fn test_apply_vote_and_generate_vote_diff_next_vote() { let mut local = VoteState::default(); let vote = Vote { slots: vec![0], @@ -2289,13 +2495,14 @@ pub mod test { }; local.process_vote_unchecked(&vote); assert_eq!(local.votes.len(), 1); - let (vote, tower_slots) = Tower::new_vote(&local, 1, Hash::default(), Some(0)); + let vote = + Tower::apply_vote_and_generate_vote_diff(&mut local, 1, Hash::default(), Some(0)); assert_eq!(vote.slots, vec![1]); - assert_eq!(tower_slots, vec![0, 1]); + assert_eq!(local.tower(), vec![0, 1]); } #[test] - fn test_new_vote_next_after_expired_vote() { + fn test_apply_vote_and_generate_vote_diff_next_after_expired_vote() { let mut local = VoteState::default(); let vote = Vote { slots: vec![0], @@ -2304,10 +2511,14 @@ pub mod test { }; local.process_vote_unchecked(&vote); assert_eq!(local.votes.len(), 1); - let (vote, tower_slots) = Tower::new_vote(&local, 3, Hash::default(), Some(0)); + + // First vote expired, so should be evicted from tower. Thus even with + // `latest_voted_slot_in_bank == Some(0)`, the first vote slot won't be + // observable in any of the results. + let vote = + Tower::apply_vote_and_generate_vote_diff(&mut local, 3, Hash::default(), Some(0)); assert_eq!(vote.slots, vec![3]); - // First vote expired, so should be evicted from tower. - assert_eq!(tower_slots, vec![3]); + assert_eq!(local.tower(), vec![3]); } #[test] @@ -2352,6 +2563,8 @@ pub mod test { vote_to_evaluate, accounts.clone().into_iter(), &ancestors, + |_| None, + &mut LatestValidatorVotesForFrozenBanks::default(), ); assert!(tower.check_vote_stake_threshold(vote_to_evaluate, &voted_stakes, total_stake,)); @@ -2368,6 +2581,8 @@ pub mod test { vote_to_evaluate, accounts.into_iter(), &ancestors, + |_| None, + &mut LatestValidatorVotesForFrozenBanks::default(), ); assert!(!tower.check_vote_stake_threshold(vote_to_evaluate, &voted_stakes, total_stake,)); } @@ -2379,10 +2594,12 @@ pub mod test { } else { vec![] }; - let expected = Vote::new(slots, Hash::default()); + let mut expected = Vote::new(slots, Hash::default()); for i in 0..num_votes { tower.record_vote(i as u64, Hash::default()); } + + expected.timestamp = tower.last_vote.timestamp; assert_eq!(expected, tower.last_vote) } diff --git a/core/src/contact_info.rs b/core/src/contact_info.rs index 59c4179937..eafaf636ba 100644 --- a/core/src/contact_info.rs +++ b/core/src/contact_info.rs @@ -6,11 +6,10 @@ use solana_sdk::sanitize::{Sanitize, SanitizeError}; #[cfg(test)] use solana_sdk::signature::{Keypair, Signer}; use solana_sdk::timing::timestamp; -use std::cmp::{Ord, Ordering, PartialEq, PartialOrd}; use std::net::{IpAddr, SocketAddr}; /// Structure representing a node on the network -#[derive(Serialize, Deserialize, Clone, Debug, AbiExample)] +#[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd, AbiExample, Deserialize, Serialize)] pub struct ContactInfo { pub id: Pubkey, /// gossip address @@ -48,34 +47,13 @@ impl Sanitize for ContactInfo { } } -impl Ord for ContactInfo { - fn cmp(&self, other: &Self) -> Ordering { - self.id.cmp(&other.id) - } -} - -impl PartialOrd for ContactInfo { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl PartialEq for ContactInfo { - fn eq(&self, other: &Self) -> bool { - self.id == other.id - } -} - -impl Eq for ContactInfo {} - #[macro_export] macro_rules! socketaddr { ($ip:expr, $port:expr) => { std::net::SocketAddr::from((std::net::Ipv4Addr::from($ip), $port)) }; ($str:expr) => {{ - let a: std::net::SocketAddr = $str.parse().unwrap(); - a + $str.parse::().unwrap() }}; } #[macro_export] diff --git a/core/src/crds.rs b/core/src/crds.rs index 426d813587..f03848bc18 100644 --- a/core/src/crds.rs +++ b/core/src/crds.rs @@ -19,7 +19,7 @@ //! CrdsValue enums. //! //! Merge strategy is implemented in: -//! impl PartialOrd for VersionedCrdsValue +//! fn overrides(value: &CrdsValue, other: &VersionedCrdsValue) -> bool //! //! A value is updated to a new version if the labels match, and the value //! wallclock is later, or the value hash is greater. @@ -28,34 +28,36 @@ use crate::contact_info::ContactInfo; use crate::crds_shards::CrdsShards; use crate::crds_value::{CrdsData, CrdsValue, CrdsValueLabel, LowestSlot}; use bincode::serialize; -use indexmap::map::{rayon::ParValues, Entry, IndexMap, Values}; +use indexmap::map::{rayon::ParValues, Entry, IndexMap}; use indexmap::set::IndexSet; use rayon::{prelude::*, ThreadPool}; use solana_sdk::hash::{hash, Hash}; use solana_sdk::pubkey::Pubkey; -use solana_sdk::signature::Keypair; -use solana_sdk::timing::timestamp; -use std::cmp; -use std::collections::{hash_map, BTreeSet, HashMap}; -use std::ops::{Bound, Index, IndexMut}; +use std::{ + cmp::Ordering, + collections::{hash_map, BTreeMap, HashMap, VecDeque}, + ops::{Bound, Index, IndexMut}, +}; const CRDS_SHARDS_BITS: u32 = 8; -// Limit number of crds values associated with each unique pubkey. This -// excludes crds values which by label design are limited per each pubkey. -const MAX_CRDS_VALUES_PER_PUBKEY: usize = 32; #[derive(Clone)] pub struct Crds { /// Stores the map of labels and values table: IndexMap, - pub num_inserts: usize, // Only used in tests. + cursor: Cursor, // Next insert ordinal location. shards: CrdsShards, nodes: IndexSet, // Indices of nodes' ContactInfo. - votes: IndexSet, // Indices of Vote crds values. - // Indices of EpochSlots crds values ordered by insert timestamp. - epoch_slots: BTreeSet<(u64 /*insert timestamp*/, usize)>, + // Indices of Votes keyed by insert order. + votes: BTreeMap, + // Indices of EpochSlots keyed by insert order. + epoch_slots: BTreeMap, // Indices of all crds values associated with a node. records: HashMap>, + // Indices of all entries keyed by insert order. + entries: BTreeMap, + // Hash of recently purged values. + purged: VecDeque<(Hash, u64 /*timestamp*/)>, } #[derive(PartialEq, Debug)] @@ -65,152 +67,160 @@ pub enum CrdsError { } /// This structure stores some local metadata associated with the CrdsValue -/// The implementation of PartialOrd ensures that the "highest" version is always picked to be -/// stored in the Crds #[derive(PartialEq, Debug, Clone)] pub struct VersionedCrdsValue { + /// Ordinal index indicating insert order. + ordinal: u64, pub value: CrdsValue, - /// local time when inserted - pub insert_timestamp: u64, /// local time when updated - pub local_timestamp: u64, + pub(crate) local_timestamp: u64, /// value hash - pub value_hash: Hash, + pub(crate) value_hash: Hash, } -impl PartialOrd for VersionedCrdsValue { - fn partial_cmp(&self, other: &VersionedCrdsValue) -> Option { - if self.value.label() != other.value.label() { - None - } else if self.value.wallclock() == other.value.wallclock() { - Some(self.value_hash.cmp(&other.value_hash)) - } else { - Some(self.value.wallclock().cmp(&other.value.wallclock())) - } +#[derive(Clone, Copy, Default)] +pub struct Cursor(u64); + +impl Cursor { + fn ordinal(&self) -> u64 { + self.0 + } + + // Updates the cursor position given the ordinal index of value consumed. + #[inline] + fn consume(&mut self, ordinal: u64) { + self.0 = self.0.max(ordinal + 1); } } + impl VersionedCrdsValue { - pub fn new(local_timestamp: u64, value: CrdsValue) -> Self { + fn new(value: CrdsValue, cursor: Cursor, local_timestamp: u64) -> Self { let value_hash = hash(&serialize(&value).unwrap()); VersionedCrdsValue { + ordinal: cursor.ordinal(), value, - insert_timestamp: local_timestamp, local_timestamp, value_hash, } } - - /// New random VersionedCrdsValue for tests and simulations. - pub fn new_rand(rng: &mut R, keypair: Option<&Keypair>) -> Self { - let delay = 10 * 60 * 1000; // 10 minutes - let now = timestamp() - delay + rng.gen_range(0, 2 * delay); - Self::new(now, CrdsValue::new_rand(rng, keypair)) - } } impl Default for Crds { fn default() -> Self { Crds { table: IndexMap::default(), - num_inserts: 0, + cursor: Cursor::default(), shards: CrdsShards::new(CRDS_SHARDS_BITS), nodes: IndexSet::default(), - votes: IndexSet::default(), - epoch_slots: BTreeSet::default(), + votes: BTreeMap::default(), + epoch_slots: BTreeMap::default(), records: HashMap::default(), + entries: BTreeMap::default(), + purged: VecDeque::default(), } } } -impl Crds { - /// must be called atomically with `insert_versioned` - pub fn new_versioned(&self, local_timestamp: u64, value: CrdsValue) -> VersionedCrdsValue { - VersionedCrdsValue::new(local_timestamp, value) +// Returns true if the first value updates the 2nd one. +// Both values should have the same key/label. +fn overrides(value: &CrdsValue, other: &VersionedCrdsValue) -> bool { + assert_eq!(value.label(), other.value.label(), "labels mismatch!"); + // Node instances are special cased so that if there are two running + // instances of the same node, the more recent start is propagated through + // gossip regardless of wallclocks. + if let CrdsData::NodeInstance(value) = &value.data { + if let Some(out) = value.overrides(&other.value) { + return out; + } } - pub fn would_insert( - &self, - value: CrdsValue, - local_timestamp: u64, - ) -> (bool, VersionedCrdsValue) { - let new_value = self.new_versioned(local_timestamp, value); - let label = new_value.value.label(); - // New value is outdated and fails to insert, if it already exists in - // the table with a more recent wallclock. - let outdated = matches!(self.table.get(&label), Some(current) if new_value <= *current); - (!outdated, new_value) + match value.wallclock().cmp(&other.value.wallclock()) { + Ordering::Less => false, + Ordering::Greater => true, + // Ties should be broken in a deterministic way across the cluster. + // For backward compatibility this is done by comparing hash of + // serialized values. + Ordering::Equal => { + let value_hash = hash(&serialize(&value).unwrap()); + other.value_hash < value_hash + } } - /// insert the new value, returns the old value if insert succeeds - pub fn insert_versioned( - &mut self, - new_value: VersionedCrdsValue, - ) -> Result, CrdsError> { - let label = new_value.value.label(); +} + +impl Crds { + /// Returns true if the given value updates an existing one in the table. + /// The value is outdated and fails to insert, if it already exists in the + /// table with a more recent wallclock. + pub(crate) fn upserts(&self, value: &CrdsValue) -> bool { + match self.table.get(&value.label()) { + Some(other) => overrides(value, other), + None => true, + } + } + + pub fn insert(&mut self, value: CrdsValue, now: u64) -> Result<(), CrdsError> { + let label = value.label(); + let pubkey = value.pubkey(); + let value = VersionedCrdsValue::new(value, self.cursor, now); match self.table.entry(label) { Entry::Vacant(entry) => { let entry_index = entry.index(); - self.shards.insert(entry_index, &new_value); - match new_value.value.data { + self.shards.insert(entry_index, &value); + match value.value.data { CrdsData::ContactInfo(_) => { self.nodes.insert(entry_index); } CrdsData::Vote(_, _) => { - self.votes.insert(entry_index); + self.votes.insert(value.ordinal, entry_index); } CrdsData::EpochSlots(_, _) => { - self.epoch_slots - .insert((new_value.insert_timestamp, entry_index)); + self.epoch_slots.insert(value.ordinal, entry_index); } _ => (), }; - self.records - .entry(new_value.value.pubkey()) - .or_default() - .insert(entry_index); - entry.insert(new_value); - self.num_inserts += 1; - Ok(None) + self.entries.insert(value.ordinal, entry_index); + self.records.entry(pubkey).or_default().insert(entry_index); + self.cursor.consume(value.ordinal); + entry.insert(value); + Ok(()) } - Entry::Occupied(mut entry) if *entry.get() < new_value => { + Entry::Occupied(mut entry) if overrides(&value.value, entry.get()) => { let entry_index = entry.index(); self.shards.remove(entry_index, entry.get()); - self.shards.insert(entry_index, &new_value); - if let CrdsData::EpochSlots(_, _) = new_value.value.data { - self.epoch_slots - .remove(&(entry.get().insert_timestamp, entry_index)); - self.epoch_slots - .insert((new_value.insert_timestamp, entry_index)); + self.shards.insert(entry_index, &value); + match value.value.data { + CrdsData::Vote(_, _) => { + self.votes.remove(&entry.get().ordinal); + self.votes.insert(value.ordinal, entry_index); + } + CrdsData::EpochSlots(_, _) => { + self.epoch_slots.remove(&entry.get().ordinal); + self.epoch_slots.insert(value.ordinal, entry_index); + } + _ => (), } - self.num_inserts += 1; + self.entries.remove(&entry.get().ordinal); + self.entries.insert(value.ordinal, entry_index); // As long as the pubkey does not change, self.records // does not need to be updated. - debug_assert_eq!(entry.get().value.pubkey(), new_value.value.pubkey()); - Ok(Some(entry.insert(new_value))) + debug_assert_eq!(entry.get().value.pubkey(), pubkey); + self.cursor.consume(value.ordinal); + self.purged.push_back((entry.get().value_hash, now)); + entry.insert(value); + Ok(()) } - _ => { + Entry::Occupied(entry) => { trace!( "INSERT FAILED data: {} new.wallclock: {}", - new_value.value.label(), - new_value.value.wallclock(), + value.value.label(), + value.value.wallclock(), ); + if entry.get().value_hash != value.value_hash { + self.purged.push_back((value.value_hash, now)); + } Err(CrdsError::InsertFailed) } } } - pub fn insert( - &mut self, - value: CrdsValue, - local_timestamp: u64, - ) -> Result, CrdsError> { - let new_value = self.new_versioned(local_timestamp, value); - self.insert_versioned(new_value) - } - pub fn lookup(&self, label: &CrdsValueLabel) -> Option<&CrdsValue> { - self.table.get(label).map(|x| &x.value) - } - - pub fn lookup_versioned(&self, label: &CrdsValueLabel) -> Option<&VersionedCrdsValue> { - self.table.get(label) - } pub fn get(&self, label: &CrdsValueLabel) -> Option<&VersionedCrdsValue> { self.table.get(label) @@ -239,20 +249,42 @@ impl Crds { }) } - /// Returns all entries which are Vote. - pub(crate) fn get_votes(&self) -> impl Iterator { - self.votes.iter().map(move |i| self.table.index(*i)) + /// Returns all vote entries inserted since the given cursor. + /// Updates the cursor as the votes are consumed. + pub(crate) fn get_votes<'a>( + &'a self, + cursor: &'a mut Cursor, + ) -> impl Iterator { + let range = (Bound::Included(cursor.ordinal()), Bound::Unbounded); + self.votes.range(range).map(move |(ordinal, index)| { + cursor.consume(*ordinal); + self.table.index(*index) + }) } - /// Returns epoch-slots inserted since (or at) the given timestamp. - pub(crate) fn get_epoch_slots_since( - &self, - timestamp: u64, - ) -> impl Iterator { - let range = (Bound::Included((timestamp, 0)), Bound::Unbounded); - self.epoch_slots - .range(range) - .map(move |(_, i)| self.table.index(*i)) + /// Returns epoch-slots inserted since the given cursor. + /// Updates the cursor as the values are consumed. + pub(crate) fn get_epoch_slots<'a>( + &'a self, + cursor: &'a mut Cursor, + ) -> impl Iterator { + let range = (Bound::Included(cursor.ordinal()), Bound::Unbounded); + self.epoch_slots.range(range).map(move |(ordinal, index)| { + cursor.consume(*ordinal); + self.table.index(*index) + }) + } + + /// Returns all entries inserted since the given cursor. + pub(crate) fn get_entries<'a>( + &'a self, + cursor: &'a mut Cursor, + ) -> impl Iterator { + let range = (Bound::Included(cursor.ordinal()), Bound::Unbounded); + self.entries.range(range).map(move |(ordinal, index)| { + cursor.consume(*ordinal); + self.table.index(*index) + }) } /// Returns all records associated with a pubkey. @@ -277,7 +309,8 @@ impl Crds { self.table.is_empty() } - pub fn values(&self) -> Values<'_, CrdsValueLabel, VersionedCrdsValue> { + #[cfg(test)] + pub(crate) fn values(&self) -> impl Iterator { self.table.values() } @@ -285,6 +318,24 @@ impl Crds { self.table.par_values() } + pub(crate) fn num_purged(&self) -> usize { + self.purged.len() + } + + pub(crate) fn purged(&self) -> impl IndexedParallelIterator + '_ { + self.purged.par_iter().map(|(hash, _)| *hash) + } + + /// Drops purged value hashes with timestamp less than the given one. + pub(crate) fn trim_purged(&mut self, timestamp: u64) { + let count = self + .purged + .iter() + .take_while(|(_, ts)| *ts < timestamp) + .count(); + self.purged.drain(..count); + } + /// Returns all crds values which the first 'mask_bits' /// of their hash value is equal to 'mask'. pub fn filter_bitmask( @@ -299,7 +350,15 @@ impl Crds { /// Update the timestamp's of all the labels that are associated with Pubkey pub fn update_record_timestamp(&mut self, pubkey: &Pubkey, now: u64) { - if let Some(indices) = self.records.get(pubkey) { + // It suffices to only overwrite the origin's timestamp since that is + // used when purging old values. If the origin does not exist in the + // table, fallback to exhaustive update on all associated records. + let origin = CrdsValueLabel::ContactInfo(*pubkey); + if let Some(origin) = self.table.get_mut(&origin) { + if origin.local_timestamp < now { + origin.local_timestamp = now; + } + } else if let Some(indices) = self.records.get(pubkey) { for index in indices { let entry = self.table.index_mut(*index); if entry.local_timestamp < now { @@ -317,54 +376,35 @@ impl Crds { now: u64, timeouts: &HashMap, ) -> Vec { - #[rustversion::before(1.49.0)] - fn select_nth(xs: &mut Vec, _nth: usize) { - xs.sort_unstable(); - } - #[rustversion::since(1.49.0)] - fn select_nth(xs: &mut Vec, nth: usize) { - xs.select_nth_unstable(nth); - } let default_timeout = *timeouts .get(&Pubkey::default()) .expect("must have default timeout"); // Given an index of all crd values associated with a pubkey, // returns crds labels of old values to be evicted. let evict = |pubkey, index: &IndexSet| { - let timeout = *timeouts.get(pubkey).unwrap_or(&default_timeout); - let mut old_labels = Vec::new(); - // Buffer of crds values to be evicted based on their wallclock. - let mut recent_unlimited_labels: Vec<(u64 /*wallclock*/, usize /*index*/)> = index + let timeout = timeouts.get(pubkey).copied().unwrap_or(default_timeout); + let local_timestamp = { + let origin = CrdsValueLabel::ContactInfo(*pubkey); + match self.table.get(&origin) { + Some(origin) => origin.local_timestamp, + None => 0, + } + }; + index .into_iter() .filter_map(|ix| { let (label, value) = self.table.get_index(*ix).unwrap(); - if value.local_timestamp.saturating_add(timeout) <= now { - old_labels.push(label.clone()); - None + let expiry_timestamp = value + .local_timestamp + .max(local_timestamp) + .saturating_add(timeout); + if expiry_timestamp <= now { + Some(label.clone()) } else { - match label.value_space() { - Some(_) => None, - None => Some((value.value.wallclock(), *ix)), - } + None } }) - .collect(); - // Number of values to discard from the buffer: - let nth = recent_unlimited_labels - .len() - .saturating_sub(MAX_CRDS_VALUES_PER_PUBKEY); - // Partition on wallclock to discard the older ones. - if nth > 0 && nth < recent_unlimited_labels.len() { - select_nth(&mut recent_unlimited_labels, nth); - } - old_labels.extend( - recent_unlimited_labels - .split_at(nth) - .0 - .iter() - .map(|(_ /*wallclock*/, ix)| self.table.get_index(*ix).unwrap().0.clone()), - ); - old_labels + .collect::>() }; thread_pool.install(|| { self.records @@ -374,21 +414,26 @@ impl Crds { }) } - pub fn remove(&mut self, key: &CrdsValueLabel) -> Option { - let (index, _ /*label*/, value) = self.table.swap_remove_full(key)?; + pub fn remove(&mut self, key: &CrdsValueLabel, now: u64) { + let (index, _ /*label*/, value) = match self.table.swap_remove_full(key) { + Some(entry) => entry, + None => return, + }; + self.purged.push_back((value.value_hash, now)); self.shards.remove(index, &value); match value.value.data { CrdsData::ContactInfo(_) => { self.nodes.swap_remove(&index); } CrdsData::Vote(_, _) => { - self.votes.swap_remove(&index); + self.votes.remove(&value.ordinal); } CrdsData::EpochSlots(_, _) => { - self.epoch_slots.remove(&(value.insert_timestamp, index)); + self.epoch_slots.remove(&value.ordinal); } _ => (), } + self.entries.remove(&value.ordinal); // Remove the index from records associated with the value's pubkey. let pubkey = value.value.pubkey(); let mut records_entry = match self.records.entry(pubkey) { @@ -415,21 +460,19 @@ impl Crds { self.nodes.insert(index); } CrdsData::Vote(_, _) => { - self.votes.swap_remove(&size); - self.votes.insert(index); + self.votes.insert(value.ordinal, index); } CrdsData::EpochSlots(_, _) => { - self.epoch_slots.remove(&(value.insert_timestamp, size)); - self.epoch_slots.insert((value.insert_timestamp, index)); + self.epoch_slots.insert(value.ordinal, index); } _ => (), }; + self.entries.insert(value.ordinal, index); let pubkey = value.value.pubkey(); let records = self.records.get_mut(&pubkey).unwrap(); records.swap_remove(&size); records.insert(index); } - Some(value) } /// Returns true if the number of unique pubkeys in the table exceeds the @@ -450,12 +493,13 @@ impl Crds { // e.g. trusted validators, self pubkey, ... keep: &[Pubkey], stakes: &HashMap, - ) -> Result, CrdsError> { + now: u64, + ) -> Result { if self.should_trim(cap) { let size = self.records.len().saturating_sub(cap); - self.drop(size, keep, stakes) + self.drop(size, keep, stakes, now) } else { - Ok(Vec::default()) + Ok(0) } } @@ -465,7 +509,8 @@ impl Crds { size: usize, keep: &[Pubkey], stakes: &HashMap, - ) -> Result, CrdsError> { + now: u64, + ) -> Result { if stakes.is_empty() { return Err(CrdsError::UnknownStakes); } @@ -485,24 +530,31 @@ impl Crds { .flat_map(|k| &self.records[&k]) .map(|k| self.table.get_index(*k).unwrap().0.clone()) .collect(); - Ok(keys.iter().map(|k| self.remove(k).unwrap()).collect()) + for key in &keys { + self.remove(key, now); + } + Ok(keys.len()) } } #[cfg(test)] mod test { use super::*; - use crate::{contact_info::ContactInfo, crds_value::NodeInstance}; - use rand::{thread_rng, Rng}; + use crate::{ + contact_info::ContactInfo, + crds_value::{new_rand_timestamp, NodeInstance}, + }; + use rand::{thread_rng, Rng, SeedableRng}; + use rand_chacha::ChaChaRng; use rayon::ThreadPoolBuilder; - use solana_sdk::signature::Signer; + use solana_sdk::signature::{Keypair, Signer}; use std::{collections::HashSet, iter::repeat_with}; #[test] fn test_insert() { let mut crds = Crds::default(); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())); - assert_eq!(crds.insert(val.clone(), 0).ok(), Some(None)); + assert_eq!(crds.insert(val.clone(), 0), Ok(())); assert_eq!(crds.table.len(), 1); assert!(crds.table.contains_key(&val.label())); assert_eq!(crds.table[&val.label()].local_timestamp, 0); @@ -511,8 +563,9 @@ mod test { fn test_update_old() { let mut crds = Crds::default(); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())); - assert_eq!(crds.insert(val.clone(), 0), Ok(None)); + assert_eq!(crds.insert(val.clone(), 0), Ok(())); assert_eq!(crds.insert(val.clone(), 1), Err(CrdsError::InsertFailed)); + assert!(crds.purged.is_empty()); assert_eq!(crds.table[&val.label()].local_timestamp, 0); } #[test] @@ -522,15 +575,14 @@ mod test { &Pubkey::default(), 0, ))); - assert_matches!(crds.insert(original.clone(), 0), Ok(_)); + let value_hash = hash(&serialize(&original).unwrap()); + assert_matches!(crds.insert(original, 0), Ok(())); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( &Pubkey::default(), 1, ))); - assert_eq!( - crds.insert(val.clone(), 1).unwrap().unwrap().value, - original - ); + assert_eq!(crds.insert(val.clone(), 1), Ok(())); + assert_eq!(*crds.purged.back().unwrap(), (value_hash, 1)); assert_eq!(crds.table[&val.label()].local_timestamp, 1); } #[test] @@ -540,37 +592,94 @@ mod test { &Pubkey::default(), 0, ))); - assert_eq!(crds.insert(val.clone(), 0), Ok(None)); - - assert_eq!(crds.table[&val.label()].insert_timestamp, 0); + assert_eq!(crds.insert(val.clone(), 0), Ok(())); + assert_eq!(crds.table[&val.label()].ordinal, 0); let val2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())); + let value_hash = hash(&serialize(&val2).unwrap()); assert_eq!(val2.label().pubkey(), val.label().pubkey()); - assert_matches!(crds.insert(val2.clone(), 0), Ok(Some(_))); + assert_eq!(crds.insert(val2.clone(), 0), Ok(())); crds.update_record_timestamp(&val.label().pubkey(), 2); assert_eq!(crds.table[&val.label()].local_timestamp, 2); - assert_eq!(crds.table[&val.label()].insert_timestamp, 0); + assert_eq!(crds.table[&val.label()].ordinal, 1); assert_eq!(crds.table[&val2.label()].local_timestamp, 2); - assert_eq!(crds.table[&val2.label()].insert_timestamp, 0); + assert_eq!(crds.table[&val2.label()].ordinal, 1); crds.update_record_timestamp(&val.label().pubkey(), 1); assert_eq!(crds.table[&val.label()].local_timestamp, 2); - assert_eq!(crds.table[&val.label()].insert_timestamp, 0); + assert_eq!(crds.table[&val.label()].ordinal, 1); let mut ci = ContactInfo::default(); ci.wallclock += 1; let val3 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci)); - assert_matches!(crds.insert(val3, 3), Ok(Some(_))); + assert_eq!(crds.insert(val3, 3), Ok(())); + assert_eq!(*crds.purged.back().unwrap(), (value_hash, 3)); assert_eq!(crds.table[&val2.label()].local_timestamp, 3); - assert_eq!(crds.table[&val2.label()].insert_timestamp, 3); + assert_eq!(crds.table[&val2.label()].ordinal, 2); + } + + #[test] + fn test_upsert_node_instance() { + const SEED: [u8; 32] = [0x42; 32]; + let mut rng = ChaChaRng::from_seed(SEED); + fn make_crds_value(node: NodeInstance) -> CrdsValue { + CrdsValue::new_unsigned(CrdsData::NodeInstance(node)) + } + let now = 1_620_838_767_000; + let mut crds = Crds::default(); + let pubkey = Pubkey::new_unique(); + let node = NodeInstance::new(&mut rng, pubkey, now); + let node = make_crds_value(node); + assert_eq!(crds.insert(node, now), Ok(())); + // A node-instance with a different key should insert fine even with + // older timestamps. + let other = NodeInstance::new(&mut rng, Pubkey::new_unique(), now - 1); + let other = make_crds_value(other); + assert_eq!(crds.insert(other, now), Ok(())); + // A node-instance with older timestamp should fail to insert, even if + // the wallclock is more recent. + let other = NodeInstance::new(&mut rng, pubkey, now - 1); + let other = other.with_wallclock(now + 1); + let other = make_crds_value(other); + let value_hash = hash(&serialize(&other).unwrap()); + assert_eq!(crds.insert(other, now), Err(CrdsError::InsertFailed)); + assert_eq!(*crds.purged.back().unwrap(), (value_hash, now)); + // A node instance with the same timestamp should insert only if the + // random token is larger. + let mut num_overrides = 0; + for _ in 0..100 { + let other = NodeInstance::new(&mut rng, pubkey, now); + let other = make_crds_value(other); + let value_hash = hash(&serialize(&other).unwrap()); + match crds.insert(other, now) { + Ok(()) => num_overrides += 1, + Err(CrdsError::InsertFailed) => { + assert_eq!(*crds.purged.back().unwrap(), (value_hash, now)) + } + _ => panic!(), + } + } + assert_eq!(num_overrides, 5); + // A node instance with larger timestamp should insert regardless of + // its token value. + for k in 1..10 { + let other = NodeInstance::new(&mut rng, pubkey, now + k); + let other = other.with_wallclock(now - 1); + let other = make_crds_value(other); + match crds.insert(other, now) { + Ok(()) => (), + _ => panic!(), + } + } } + #[test] fn test_find_old_records_default() { let thread_pool = ThreadPoolBuilder::new().build().unwrap(); let mut crds = Crds::default(); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())); - assert_eq!(crds.insert(val.clone(), 1), Ok(None)); + assert_eq!(crds.insert(val.clone(), 1), Ok(())); let mut set = HashMap::new(); set.insert(Pubkey::default(), 0); assert!(crds.find_old_labels(&thread_pool, 0, &set).is_empty()); @@ -593,7 +702,7 @@ mod test { let mut timeouts = HashMap::new(); let val = CrdsValue::new_rand(&mut rng, None); timeouts.insert(Pubkey::default(), 3); - assert_eq!(crds.insert(val.clone(), 0), Ok(None)); + assert_eq!(crds.insert(val.clone(), 0), Ok(())); assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty()); timeouts.insert(val.pubkey(), 1); assert_eq!( @@ -611,40 +720,6 @@ mod test { ); } - #[test] - fn test_find_old_records_unlimited() { - let thread_pool = ThreadPoolBuilder::new().build().unwrap(); - let mut rng = thread_rng(); - let now = 1_610_034_423_000; - let pubkey = Pubkey::new_unique(); - let mut crds = Crds::default(); - let mut timeouts = HashMap::new(); - timeouts.insert(Pubkey::default(), 1); - timeouts.insert(pubkey, 180); - for _ in 0..1024 { - let wallclock = now - rng.gen_range(0, 240); - let val = NodeInstance::new(&mut rng, pubkey, wallclock); - let val = CrdsData::NodeInstance(val); - let val = CrdsValue::new_unsigned(val); - assert_eq!(crds.insert(val, now), Ok(None)); - } - let now = now + 1; - let labels = crds.find_old_labels(&thread_pool, now, &timeouts); - assert_eq!(crds.table.len() - labels.len(), MAX_CRDS_VALUES_PER_PUBKEY); - let max_wallclock = labels - .iter() - .map(|label| crds.lookup(label).unwrap().wallclock()) - .max() - .unwrap(); - assert!(max_wallclock > now - 180); - let labels: HashSet<_> = labels.into_iter().collect(); - for (label, value) in crds.table.iter() { - if !labels.contains(label) { - assert!(max_wallclock <= value.value.wallclock()); - } - } - } - #[test] fn test_remove_default() { let thread_pool = ThreadPoolBuilder::new().build().unwrap(); @@ -657,7 +732,7 @@ mod test { crds.find_old_labels(&thread_pool, 2, &set), vec![val.label()] ); - crds.remove(&val.label()); + crds.remove(&val.label(), /*now=*/ 0); assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty()); } #[test] @@ -665,7 +740,7 @@ mod test { let thread_pool = ThreadPoolBuilder::new().build().unwrap(); let mut crds = Crds::default(); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())); - assert_eq!(crds.insert(val.clone(), 1), Ok(None)); + assert_eq!(crds.insert(val.clone(), 1), Ok(())); let mut set = HashMap::new(); //now < timestamp set.insert(Pubkey::default(), 0); @@ -703,26 +778,19 @@ mod test { let keypairs: Vec<_> = std::iter::repeat_with(Keypair::new).take(256).collect(); let mut rng = thread_rng(); let mut num_inserts = 0; - let mut num_overrides = 0; for _ in 0..4096 { let keypair = &keypairs[rng.gen_range(0, keypairs.len())]; - let value = VersionedCrdsValue::new_rand(&mut rng, Some(keypair)); - match crds.insert_versioned(value) { - Ok(None) => { - num_inserts += 1; - check_crds_shards(&crds); - } - Ok(Some(_)) => { - num_inserts += 1; - num_overrides += 1; - check_crds_shards(&crds); - } - Err(_) => (), + let value = CrdsValue::new_rand(&mut rng, Some(keypair)); + let local_timestamp = new_rand_timestamp(&mut rng); + if let Ok(()) = crds.insert(value, local_timestamp) { + num_inserts += 1; + check_crds_shards(&crds); } } - assert_eq!(num_inserts, crds.num_inserts); + assert_eq!(num_inserts, crds.cursor.0 as usize); assert!(num_inserts > 700); - assert!(num_overrides > 500); + assert!(crds.num_purged() > 500); + assert_eq!(crds.num_purged() + crds.table.len(), 4096); assert!(crds.table.len() > 200); assert!(num_inserts > crds.table.len()); check_crds_shards(&crds); @@ -730,94 +798,156 @@ mod test { while !crds.table.is_empty() { let index = rng.gen_range(0, crds.table.len()); let key = crds.table.get_index(index).unwrap().0.clone(); - crds.remove(&key); + crds.remove(&key, /*now=*/ 0); check_crds_shards(&crds); } } - #[test] - fn test_crds_value_indices() { - fn check_crds_value_indices( - rng: &mut R, - crds: &Crds, - ) -> (usize, usize, usize) { - if !crds.table.is_empty() { - let since = crds.table[rng.gen_range(0, crds.table.len())].insert_timestamp; - let num_epoch_slots = crds - .table - .values() - .filter(|value| { - value.insert_timestamp >= since - && matches!(value.value.data, CrdsData::EpochSlots(_, _)) - }) - .count(); - assert_eq!(num_epoch_slots, crds.get_epoch_slots_since(since).count()); - for value in crds.get_epoch_slots_since(since) { - assert!(value.insert_timestamp >= since); - match value.value.data { - CrdsData::EpochSlots(_, _) => (), - _ => panic!("not an epoch-slot!"), - } - } + fn check_crds_value_indices( + rng: &mut R, + crds: &Crds, + ) -> ( + usize, // number of nodes + usize, // number of votes + usize, // number of epoch slots + ) { + let size = crds.table.len(); + let since = if size == 0 || rng.gen() { + rng.gen_range(0, crds.cursor.0 + 1) + } else { + crds.table[rng.gen_range(0, size)].ordinal + }; + let num_epoch_slots = crds + .table + .values() + .filter(|v| v.ordinal >= since) + .filter(|v| matches!(v.value.data, CrdsData::EpochSlots(_, _))) + .count(); + let mut cursor = Cursor(since); + assert_eq!(num_epoch_slots, crds.get_epoch_slots(&mut cursor).count()); + assert_eq!( + cursor.0, + crds.epoch_slots + .iter() + .last() + .map(|(k, _)| k + 1) + .unwrap_or_default() + .max(since) + ); + for value in crds.get_epoch_slots(&mut Cursor(since)) { + assert!(value.ordinal >= since); + match value.value.data { + CrdsData::EpochSlots(_, _) => (), + _ => panic!("not an epoch-slot!"), } - let num_nodes = crds - .table - .values() - .filter(|value| matches!(value.value.data, CrdsData::ContactInfo(_))) - .count(); - let num_votes = crds - .table - .values() - .filter(|value| matches!(value.value.data, CrdsData::Vote(_, _))) - .count(); - let num_epoch_slots = crds - .table + } + let num_votes = crds + .table + .values() + .filter(|v| v.ordinal >= since) + .filter(|v| matches!(v.value.data, CrdsData::Vote(_, _))) + .count(); + let mut cursor = Cursor(since); + assert_eq!(num_votes, crds.get_votes(&mut cursor).count()); + assert_eq!( + cursor.0, + crds.table .values() - .filter(|value| matches!(value.value.data, CrdsData::EpochSlots(_, _))) - .count(); - assert_eq!(num_nodes, crds.get_nodes_contact_info().count()); - assert_eq!(num_votes, crds.get_votes().count()); - assert_eq!(num_epoch_slots, crds.get_epoch_slots_since(0).count()); - for vote in crds.get_votes() { - match vote.value.data { - CrdsData::Vote(_, _) => (), - _ => panic!("not a vote!"), - } + .filter(|v| matches!(v.value.data, CrdsData::Vote(_, _))) + .map(|v| v.ordinal) + .max() + .map(|k| k + 1) + .unwrap_or_default() + .max(since) + ); + for value in crds.get_votes(&mut Cursor(since)) { + assert!(value.ordinal >= since); + match value.value.data { + CrdsData::Vote(_, _) => (), + _ => panic!("not a vote!"), } - for epoch_slots in crds.get_epoch_slots_since(0) { - match epoch_slots.value.data { - CrdsData::EpochSlots(_, _) => (), - _ => panic!("not an epoch-slot!"), - } + } + let num_entries = crds + .table + .values() + .filter(|value| value.ordinal >= since) + .count(); + let mut cursor = Cursor(since); + assert_eq!(num_entries, crds.get_entries(&mut cursor).count()); + assert_eq!( + cursor.0, + crds.entries + .iter() + .last() + .map(|(k, _)| k + 1) + .unwrap_or_default() + .max(since) + ); + for value in crds.get_entries(&mut Cursor(since)) { + assert!(value.ordinal >= since); + } + let num_nodes = crds + .table + .values() + .filter(|v| matches!(v.value.data, CrdsData::ContactInfo(_))) + .count(); + let num_votes = crds + .table + .values() + .filter(|v| matches!(v.value.data, CrdsData::Vote(_, _))) + .count(); + let num_epoch_slots = crds + .table + .values() + .filter(|v| matches!(v.value.data, CrdsData::EpochSlots(_, _))) + .count(); + assert_eq!( + crds.table.len(), + crds.get_entries(&mut Cursor::default()).count() + ); + assert_eq!(num_nodes, crds.get_nodes_contact_info().count()); + assert_eq!(num_votes, crds.get_votes(&mut Cursor::default()).count()); + assert_eq!( + num_epoch_slots, + crds.get_epoch_slots(&mut Cursor::default()).count() + ); + for vote in crds.get_votes(&mut Cursor::default()) { + match vote.value.data { + CrdsData::Vote(_, _) => (), + _ => panic!("not a vote!"), + } + } + for epoch_slots in crds.get_epoch_slots(&mut Cursor::default()) { + match epoch_slots.value.data { + CrdsData::EpochSlots(_, _) => (), + _ => panic!("not an epoch-slot!"), } - (num_nodes, num_votes, num_epoch_slots) } + (num_nodes, num_votes, num_epoch_slots) + } + + #[test] + fn test_crds_value_indices() { let mut rng = thread_rng(); let keypairs: Vec<_> = repeat_with(Keypair::new).take(128).collect(); let mut crds = Crds::default(); let mut num_inserts = 0; - let mut num_overrides = 0; for k in 0..4096 { let keypair = &keypairs[rng.gen_range(0, keypairs.len())]; - let value = VersionedCrdsValue::new_rand(&mut rng, Some(keypair)); - match crds.insert_versioned(value) { - Ok(None) => { - num_inserts += 1; - } - Ok(Some(_)) => { - num_inserts += 1; - num_overrides += 1; - } - Err(_) => (), + let value = CrdsValue::new_rand(&mut rng, Some(keypair)); + let local_timestamp = new_rand_timestamp(&mut rng); + if let Ok(()) = crds.insert(value, local_timestamp) { + num_inserts += 1; } - if k % 64 == 0 { + if k % 16 == 0 { check_crds_value_indices(&mut rng, &crds); } } - assert_eq!(num_inserts, crds.num_inserts); + assert_eq!(num_inserts, crds.cursor.0 as usize); assert!(num_inserts > 700); - assert!(num_overrides > 500); + assert!(crds.num_purged() > 500); assert!(crds.table.len() > 200); + assert_eq!(crds.num_purged() + crds.table.len(), 4096); assert!(num_inserts > crds.table.len()); let (num_nodes, num_votes, num_epoch_slots) = check_crds_value_indices(&mut rng, &crds); assert!(num_nodes * 3 < crds.table.len()); @@ -832,8 +962,8 @@ mod test { while !crds.table.is_empty() { let index = rng.gen_range(0, crds.table.len()); let key = crds.table.get_index(index).unwrap().0.clone(); - crds.remove(&key); - if crds.table.len() % 64 == 0 { + crds.remove(&key, /*now=*/ 0); + if crds.table.len() % 16 == 0 { check_crds_value_indices(&mut rng, &crds); } } @@ -858,8 +988,9 @@ mod test { let mut crds = Crds::default(); for k in 0..4096 { let keypair = &keypairs[rng.gen_range(0, keypairs.len())]; - let value = VersionedCrdsValue::new_rand(&mut rng, Some(keypair)); - let _ = crds.insert_versioned(value); + let value = CrdsValue::new_rand(&mut rng, Some(keypair)); + let local_timestamp = new_rand_timestamp(&mut rng); + let _ = crds.insert(value, local_timestamp); if k % 64 == 0 { check_crds_records(&crds); } @@ -870,7 +1001,7 @@ mod test { while !crds.table.is_empty() { let index = rng.gen_range(0, crds.table.len()); let key = crds.table.get_index(index).unwrap().0.clone(); - crds.remove(&key); + crds.remove(&key, /*now=*/ 0); if crds.table.len() % 64 == 0 { check_crds_records(&crds); } @@ -879,6 +1010,7 @@ mod test { } #[test] + #[allow(clippy::needless_collect)] fn test_drop() { fn num_unique_pubkeys<'a, I>(values: I) -> usize where @@ -899,14 +1031,23 @@ mod test { let mut crds = Crds::default(); for _ in 0..2048 { let keypair = &keypairs[rng.gen_range(0, keypairs.len())]; - let value = VersionedCrdsValue::new_rand(&mut rng, Some(keypair)); - let _ = crds.insert_versioned(value); + let value = CrdsValue::new_rand(&mut rng, Some(keypair)); + let local_timestamp = new_rand_timestamp(&mut rng); + let _ = crds.insert(value, local_timestamp); } let num_values = crds.table.len(); let num_pubkeys = num_unique_pubkeys(crds.table.values()); assert!(!crds.should_trim(num_pubkeys)); assert!(crds.should_trim(num_pubkeys * 5 / 6)); - let purged = crds.drop(16, &[], &stakes).unwrap(); + let values: Vec<_> = crds.table.values().cloned().collect(); + crds.drop(16, &[], &stakes, /*now=*/ 0).unwrap(); + let purged: Vec<_> = { + let purged: HashSet<_> = crds.purged.iter().map(|(hash, _)| hash).copied().collect(); + values + .into_iter() + .filter(|v| purged.contains(&v.value_hash)) + .collect() + }; assert_eq!(purged.len() + crds.table.len(), num_values); assert_eq!(num_unique_pubkeys(&purged), 16); assert_eq!(num_unique_pubkeys(crds.table.values()), num_pubkeys - 16); @@ -943,7 +1084,7 @@ mod test { crds.find_old_labels(&thread_pool, 2, &set), vec![val.label()] ); - crds.remove(&val.label()); + crds.remove(&val.label(), /*now=*/ 0); assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty()); } @@ -951,97 +1092,94 @@ mod test { #[allow(clippy::neg_cmp_op_on_partial_ord)] fn test_equal() { let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())); - let v1 = VersionedCrdsValue::new(1, val.clone()); - let v2 = VersionedCrdsValue::new(1, val); + let v1 = VersionedCrdsValue::new(val.clone(), Cursor::default(), 1); + let v2 = VersionedCrdsValue::new(val, Cursor::default(), 1); assert_eq!(v1, v2); assert!(!(v1 != v2)); - assert_eq!(v1.partial_cmp(&v2), Some(cmp::Ordering::Equal)); - assert_eq!(v2.partial_cmp(&v1), Some(cmp::Ordering::Equal)); + assert!(!overrides(&v1.value, &v2)); + assert!(!overrides(&v2.value, &v1)); } #[test] #[allow(clippy::neg_cmp_op_on_partial_ord)] fn test_hash_order() { let v1 = VersionedCrdsValue::new( - 1, CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( &Pubkey::default(), 0, ))), + Cursor::default(), + 1, // local_timestamp + ); + let v2 = VersionedCrdsValue::new( + { + let mut contact_info = ContactInfo::new_localhost(&Pubkey::default(), 0); + contact_info.rpc = socketaddr!("0.0.0.0:0"); + CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info)) + }, + Cursor::default(), + 1, // local_timestamp ); - let v2 = VersionedCrdsValue::new(1, { - let mut contact_info = ContactInfo::new_localhost(&Pubkey::default(), 0); - contact_info.rpc = socketaddr!("0.0.0.0:0"); - CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info)) - }); assert_eq!(v1.value.label(), v2.value.label()); assert_eq!(v1.value.wallclock(), v2.value.wallclock()); assert_ne!(v1.value_hash, v2.value_hash); assert!(v1 != v2); assert!(!(v1 == v2)); - if v1 > v2 { - assert!(v1 > v2); - assert!(v2 < v1); - assert_eq!(v1.partial_cmp(&v2), Some(cmp::Ordering::Greater)); - assert_eq!(v2.partial_cmp(&v1), Some(cmp::Ordering::Less)); - } else if v2 > v1 { - assert!(v1 < v2); - assert!(v2 > v1); - assert_eq!(v1.partial_cmp(&v2), Some(cmp::Ordering::Less)); - assert_eq!(v2.partial_cmp(&v1), Some(cmp::Ordering::Greater)); + if v1.value_hash > v2.value_hash { + assert!(overrides(&v1.value, &v2)); + assert!(!overrides(&v2.value, &v1)); } else { - panic!("bad PartialOrd implementation?"); + assert!(overrides(&v2.value, &v1)); + assert!(!overrides(&v1.value, &v2)); } } #[test] #[allow(clippy::neg_cmp_op_on_partial_ord)] fn test_wallclock_order() { let v1 = VersionedCrdsValue::new( - 1, CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( &Pubkey::default(), 1, ))), + Cursor::default(), + 1, // local_timestamp ); let v2 = VersionedCrdsValue::new( - 1, CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( &Pubkey::default(), 0, ))), + Cursor::default(), + 1, // local_timestamp ); assert_eq!(v1.value.label(), v2.value.label()); - assert!(v1 > v2); - assert!(!(v1 < v2)); + assert!(overrides(&v1.value, &v2)); + assert!(!overrides(&v2.value, &v1)); assert!(v1 != v2); assert!(!(v1 == v2)); - assert_eq!(v1.partial_cmp(&v2), Some(cmp::Ordering::Greater)); - assert_eq!(v2.partial_cmp(&v1), Some(cmp::Ordering::Less)); } #[test] + #[should_panic(expected = "labels mismatch!")] #[allow(clippy::neg_cmp_op_on_partial_ord)] fn test_label_order() { let v1 = VersionedCrdsValue::new( - 1, CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( &solana_sdk::pubkey::new_rand(), 0, ))), + Cursor::default(), + 1, // local_timestamp ); let v2 = VersionedCrdsValue::new( - 1, CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( &solana_sdk::pubkey::new_rand(), 0, ))), + Cursor::default(), + 1, // local_timestamp ); assert_ne!(v1, v2); assert!(!(v1 == v2)); - assert!(!(v1 < v2)); - assert!(!(v1 > v2)); - assert!(!(v2 < v1)); - assert!(!(v2 > v1)); - assert_eq!(v1.partial_cmp(&v2), None); - assert_eq!(v2.partial_cmp(&v1), None); + assert!(!overrides(&v2.value, &v1)); } } diff --git a/core/src/crds_gossip.rs b/core/src/crds_gossip.rs index 00d9b9230a..5a2d08a644 100644 --- a/core/src/crds_gossip.rs +++ b/core/src/crds_gossip.rs @@ -4,13 +4,17 @@ //! packet::PACKET_DATA_SIZE size. use crate::{ - crds::{Crds, VersionedCrdsValue}, + cluster_info::Ping, + contact_info::ContactInfo, + crds::Crds, crds_gossip_error::CrdsGossipError, crds_gossip_pull::{CrdsFilter, CrdsGossipPull, ProcessPullStats}, crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE}, - crds_value::{CrdsData, CrdsValue, CrdsValueLabel}, + crds_value::{CrdsData, CrdsValue}, duplicate_shred::{self, DuplicateShredIndex, LeaderScheduleFn, MAX_DUPLICATE_SHREDS}, + ping_pong::PingCache, }; +use itertools::Itertools; use rayon::ThreadPool; use solana_ledger::shred::Shred; use solana_sdk::{ @@ -19,10 +23,12 @@ use solana_sdk::{ signature::{Keypair, Signer}, timing::timestamp, }; -use std::collections::{HashMap, HashSet}; - -///The min size for bloom filters -pub const CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS: usize = 500; +use std::{ + collections::{HashMap, HashSet}, + net::SocketAddr, + sync::Mutex, + time::Duration, +}; pub struct CrdsGossip { pub crds: Crds, @@ -53,63 +59,54 @@ impl CrdsGossip { } /// process a push message to the network + /// Returns origins' pubkeys of upserted values. pub fn process_push_message( &mut self, from: &Pubkey, values: Vec, now: u64, - ) -> Vec { + ) -> Vec { values .into_iter() - .filter_map(|val| { - let res = self - .push - .process_push_message(&mut self.crds, from, val, now); - if let Ok(Some(val)) = res { - self.pull - .record_old_hash(val.value_hash, val.local_timestamp); - Some(val) - } else { - None - } + .flat_map(|val| { + let origin = val.pubkey(); + self.push + .process_push_message(&mut self.crds, from, val, now) + .ok()?; + Some(origin) }) .collect() } /// remove redundant paths in the network - pub fn prune_received_cache( + pub fn prune_received_cache( &mut self, - labels: Vec, + origins: I, // Unique pubkeys of crds values' owners. stakes: &HashMap, - ) -> HashMap> { - let id = &self.id; - let push = &mut self.push; - let mut prune_map: HashMap> = HashMap::new(); - for origin in labels.iter().map(|k| k.pubkey()) { - let peers = push.prune_received_cache(id, &origin, stakes); - for from in peers { - prune_map.entry(from).or_default().insert(origin); - } - } - prune_map - } - - pub fn process_push_messages(&mut self, pending_push_messages: Vec<(CrdsValue, u64)>) { - for (push_message, timestamp) in pending_push_messages { - let _ = + ) -> HashMap> + where + I: IntoIterator, + { + let self_pubkey = self.id; + origins + .into_iter() + .flat_map(|origin| { self.push - .process_push_message(&mut self.crds, &self.id, push_message, timestamp); - } + .prune_received_cache(&self_pubkey, &origin, stakes) + .into_iter() + .zip(std::iter::repeat(origin)) + }) + .into_group_map() } pub fn new_push_messages( &mut self, - pending_push_messages: Vec<(CrdsValue, u64)>, + pending_push_messages: Vec, now: u64, - ) -> (Pubkey, HashMap>) { - self.process_push_messages(pending_push_messages); - let push_messages = self.push.new_push_messages(&self.crds, now); - (self.id, push_messages) + ) -> HashMap> { + let self_pubkey = self.id; + self.process_push_message(&self_pubkey, pending_push_messages, now); + self.push.new_push_messages(&self.crds, now) } pub(crate) fn push_duplicate_shred( @@ -217,20 +214,25 @@ impl CrdsGossip { pub fn new_pull_request( &self, thread_pool: &ThreadPool, + self_keypair: &Keypair, now: u64, gossip_validators: Option<&HashSet>, stakes: &HashMap, bloom_size: usize, - ) -> Result<(Pubkey, Vec, CrdsValue), CrdsGossipError> { + ping_cache: &Mutex, + pings: &mut Vec<(SocketAddr, Ping)>, + ) -> Result<(ContactInfo, Vec), CrdsGossipError> { self.pull.new_pull_request( thread_pool, &self.crds, - &self.id, + self_keypair, self.shred_version, now, gossip_validators, stakes, bloom_size, + ping_cache, + pings, ) } @@ -238,7 +240,7 @@ impl CrdsGossip { /// This is used for weighted random selection during `new_pull_request` /// It's important to use the local nodes request creation time as the weight /// instead of the response received time otherwise failed nodes will increase their weight. - pub fn mark_pull_request_creation_time(&mut self, from: &Pubkey, now: u64) { + pub fn mark_pull_request_creation_time(&mut self, from: Pubkey, now: u64) { self.pull.mark_pull_request_creation_time(from, now) } /// process a pull request and create a response @@ -266,7 +268,11 @@ impl CrdsGossip { response: Vec, now: u64, process_pull_stats: &mut ProcessPullStats, - ) -> (Vec, Vec, Vec) { + ) -> ( + Vec, // valid responses. + Vec, // responses with expired timestamps. + Vec, // hash of outdated values. + ) { self.pull .filter_pull_responses(&self.crds, timeouts, response, now, process_pull_stats) } @@ -275,13 +281,13 @@ impl CrdsGossip { pub fn process_pull_responses( &mut self, from: &Pubkey, - responses: Vec, - responses_expired_timeout: Vec, + responses: Vec, + responses_expired_timeout: Vec, failed_inserts: Vec, now: u64, process_pull_stats: &mut ProcessPullStats, ) { - let success = self.pull.process_pull_responses( + self.pull.process_pull_responses( &mut self.crds, from, responses, @@ -290,19 +296,14 @@ impl CrdsGossip { now, process_pull_stats, ); - self.push.push_pull_responses(success, now); - } - - pub fn make_timeouts_test(&self) -> HashMap { - self.make_timeouts(&HashMap::new(), self.pull.crds_timeout) } pub fn make_timeouts( &self, stakes: &HashMap, - epoch_ms: u64, + epoch_duration: Duration, ) -> HashMap { - self.pull.make_timeouts(&self.id, stakes, epoch_ms) + self.pull.make_timeouts(self.id, stakes, epoch_duration) } pub fn purge( @@ -312,27 +313,20 @@ impl CrdsGossip { timeouts: &HashMap, ) -> usize { let mut rv = 0; - if now > self.push.msg_timeout { - let min = now - self.push.msg_timeout; - self.push.purge_old_pending_push_messages(&self.crds, min); - } if now > 5 * self.push.msg_timeout { let min = now - 5 * self.push.msg_timeout; self.push.purge_old_received_cache(min); } if now > self.pull.crds_timeout { //sanity check - let min = self.pull.crds_timeout; assert_eq!(timeouts[&self.id], std::u64::MAX); - assert_eq!(timeouts[&Pubkey::default()], min); + assert!(timeouts.contains_key(&Pubkey::default())); rv = self .pull .purge_active(thread_pool, &mut self.crds, now, &timeouts); } - if now > 5 * self.pull.crds_timeout { - let min = now - 5 * self.pull.crds_timeout; - self.pull.purge_purged(min); - } + self.crds + .trim_purged(now.saturating_sub(5 * self.pull.crds_timeout)); self.pull.purge_failed_inserts(now); rv } diff --git a/core/src/crds_gossip_pull.rs b/core/src/crds_gossip_pull.rs index c9ada0d5a8..98a6bcd486 100644 --- a/core/src/crds_gossip_pull.rs +++ b/core/src/crds_gossip_pull.rs @@ -10,12 +10,13 @@ //! of false positives. use crate::{ - cluster_info::CRDS_UNIQUE_PUBKEY_CAPACITY, + cluster_info::{Ping, CRDS_UNIQUE_PUBKEY_CAPACITY}, contact_info::ContactInfo, - crds::{Crds, VersionedCrdsValue}, - crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS}, + crds::Crds, + crds_gossip::{get_stake, get_weight}, crds_gossip_error::CrdsGossipError, - crds_value::{CrdsValue, CrdsValueLabel}, + crds_value::CrdsValue, + ping_pong::PingCache, }; use itertools::Itertools; use lru::LruCache; @@ -23,12 +24,18 @@ use rand::distributions::{Distribution, WeightedIndex}; use rand::Rng; use rayon::{prelude::*, ThreadPool}; use solana_runtime::bloom::{AtomicBloom, Bloom}; -use solana_sdk::hash::Hash; -use solana_sdk::pubkey::Pubkey; -use std::cmp; -use std::collections::VecDeque; -use std::collections::{HashMap, HashSet}; -use std::convert::TryInto; +use solana_sdk::{ + hash::{hash, Hash}, + pubkey::Pubkey, + signature::{Keypair, Signer}, +}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + convert::TryInto, + net::SocketAddr, + sync::Mutex, + time::{Duration, Instant}, +}; pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000; // The maximum age of a value received over pull responses @@ -175,8 +182,6 @@ pub struct ProcessPullStats { pub struct CrdsGossipPull { /// timestamp of last request pub(crate) pull_request_time: LruCache, - /// hash and insert time - pub purged_values: VecDeque<(Hash, u64)>, // Hash value and record time (ms) of the pull responses which failed to be // inserted in crds table; Preserved to stop the sender to send back the // same outdated payload again by adding them to the filter for the next @@ -190,7 +195,6 @@ pub struct CrdsGossipPull { impl Default for CrdsGossipPull { fn default() -> Self { Self { - purged_values: VecDeque::new(), pull_request_time: LruCache::new(CRDS_UNIQUE_PUBKEY_CAPACITY), failed_inserts: VecDeque::new(), crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS, @@ -201,35 +205,62 @@ impl Default for CrdsGossipPull { } impl CrdsGossipPull { /// generate a random request + #[allow(clippy::too_many_arguments)] pub fn new_pull_request( &self, thread_pool: &ThreadPool, crds: &Crds, - self_id: &Pubkey, + self_keypair: &Keypair, self_shred_version: u16, now: u64, gossip_validators: Option<&HashSet>, stakes: &HashMap, bloom_size: usize, - ) -> Result<(Pubkey, Vec, CrdsValue), CrdsGossipError> { - let options = self.pull_options( - crds, - &self_id, - self_shred_version, - now, - gossip_validators, - stakes, - ); - if options.is_empty() { + ping_cache: &Mutex, + pings: &mut Vec<(SocketAddr, Ping)>, + ) -> Result<(ContactInfo, Vec), CrdsGossipError> { + let (weights, peers): (Vec<_>, Vec<_>) = self + .pull_options( + crds, + &self_keypair.pubkey(), + self_shred_version, + now, + gossip_validators, + stakes, + ) + .into_iter() + .unzip(); + if peers.is_empty() { return Err(CrdsGossipError::NoPeers); } - let filters = self.build_crds_filters(thread_pool, crds, bloom_size); - let index = WeightedIndex::new(options.iter().map(|weighted| weighted.0)).unwrap(); - let random = index.sample(&mut rand::thread_rng()); - let self_info = crds - .lookup(&CrdsValueLabel::ContactInfo(*self_id)) - .unwrap_or_else(|| panic!("self_id invalid {}", self_id)); - Ok((options[random].1.id, filters, self_info.clone())) + let mut peers = { + let mut rng = rand::thread_rng(); + let num_samples = peers.len() * 2; + let index = WeightedIndex::new(weights).unwrap(); + let sample_peer = move || peers[index.sample(&mut rng)]; + std::iter::repeat_with(sample_peer).take(num_samples) + }; + let peer = { + let mut rng = rand::thread_rng(); + let mut ping_cache = ping_cache.lock().unwrap(); + let mut pingf = move || Ping::new_rand(&mut rng, self_keypair).ok(); + let now = Instant::now(); + peers.find(|peer| { + let node = (peer.id, peer.gossip); + let (check, ping) = ping_cache.check(now, node, &mut pingf); + if let Some(ping) = ping { + pings.push((peer.gossip, ping)); + } + check + }) + }; + match peer { + None => Err(CrdsGossipError::NoPeers), + Some(peer) => { + let filters = self.build_crds_filters(thread_pool, crds, bloom_size); + Ok((peer.clone(), filters)) + } + } } fn pull_options<'a>( @@ -283,13 +314,8 @@ impl CrdsGossipPull { /// This is used for weighted random selection during `new_pull_request` /// It's important to use the local nodes request creation time as the weight /// instead of the response received time otherwise failed nodes will increase their weight. - pub fn mark_pull_request_creation_time(&mut self, from: &Pubkey, now: u64) { - self.pull_request_time.put(*from, now); - } - - /// Store an old hash in the purged values set - pub fn record_old_hash(&mut self, hash: Hash, timestamp: u64) { - self.purged_values.push_back((hash, timestamp)) + pub fn mark_pull_request_creation_time(&mut self, from: Pubkey, now: u64) { + self.pull_request_time.put(from, now); } /// process a pull request @@ -298,11 +324,8 @@ impl CrdsGossipPull { I: IntoIterator, { for caller in callers { - let key = caller.label().pubkey(); - if let Ok(Some(val)) = crds.insert(caller, now) { - self.purged_values - .push_back((val.value_hash, val.local_timestamp)); - } + let key = caller.pubkey(); + let _ = crds.insert(caller, now); crds.update_record_timestamp(&key, now); } } @@ -331,56 +354,42 @@ impl CrdsGossipPull { responses: Vec, now: u64, stats: &mut ProcessPullStats, - ) -> (Vec, Vec, Vec) { - let mut versioned = vec![]; - let mut versioned_expired_timestamp = vec![]; - let mut failed_inserts = vec![]; - let mut maybe_push = |response, values: &mut Vec| { - let (push, value) = crds.would_insert(response, now); - if push { - values.push(value); + ) -> (Vec, Vec, Vec) { + let mut active_values = vec![]; + let mut expired_values = vec![]; + let default_timeout = timeouts + .get(&Pubkey::default()) + .copied() + .unwrap_or(self.msg_timeout); + let upsert = |response: CrdsValue| { + let owner = response.label().pubkey(); + // Check if the crds value is older than the msg_timeout + let timeout = timeouts.get(&owner).copied().unwrap_or(default_timeout); + // Before discarding this value, check if a ContactInfo for the + // owner exists in the table. If it doesn't, that implies that this + // value can be discarded + if !crds.upserts(&response) { + Some(response) + } else if now <= response.wallclock().saturating_add(timeout) { + active_values.push(response); + None + } else if crds.get_contact_info(owner).is_some() { + // Silently insert this old value without bumping record + // timestamps + expired_values.push(response); + None } else { - failed_inserts.push(value.value_hash) + stats.timeout_count += 1; + stats.failed_timeout += 1; + Some(response) } }; - for r in responses { - let owner = r.label().pubkey(); - // Check if the crds value is older than the msg_timeout - if now > r.wallclock().checked_add(self.msg_timeout).unwrap_or(0) - || now + self.msg_timeout < r.wallclock() - { - match &r.label() { - CrdsValueLabel::ContactInfo(_) => { - // Check if this ContactInfo is actually too old, it's possible that it has - // stake and so might have a longer effective timeout - let timeout = *timeouts - .get(&owner) - .unwrap_or_else(|| timeouts.get(&Pubkey::default()).unwrap()); - if now > r.wallclock().checked_add(timeout).unwrap_or(0) - || now + timeout < r.wallclock() - { - stats.timeout_count += 1; - stats.failed_timeout += 1; - continue; - } - } - _ => { - // Before discarding this value, check if a ContactInfo for the owner - // exists in the table. If it doesn't, that implies that this value can be discarded - if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() { - stats.timeout_count += 1; - stats.failed_timeout += 1; - } else { - // Silently insert this old value without bumping record timestamps - maybe_push(r, &mut versioned_expired_timestamp); - } - continue; - } - } - } - maybe_push(r, &mut versioned); - } - (versioned, versioned_expired_timestamp, failed_inserts) + let failed_inserts = responses + .into_iter() + .filter_map(upsert) + .map(|resp| hash(&bincode::serialize(&resp).unwrap())) + .collect(); + (active_values, expired_values, failed_inserts) } /// process a vec of pull responses @@ -388,36 +397,22 @@ impl CrdsGossipPull { &mut self, crds: &mut Crds, from: &Pubkey, - responses: Vec, - responses_expired_timeout: Vec, - mut failed_inserts: Vec, + responses: Vec, + responses_expired_timeout: Vec, + failed_inserts: Vec, now: u64, stats: &mut ProcessPullStats, - ) -> Vec<(CrdsValueLabel, Hash, u64)> { - let mut success = vec![]; + ) { let mut owners = HashSet::new(); - for r in responses_expired_timeout { - let value_hash = r.value_hash; - if crds.insert_versioned(r).is_err() { - failed_inserts.push(value_hash); - } + for response in responses_expired_timeout { + let _ = crds.insert(response, now); } - for r in responses { - let label = r.value.label(); - let wc = r.value.wallclock(); - let hash = r.value_hash; - match crds.insert_versioned(r) { - Err(_) => failed_inserts.push(hash), - Ok(old) => { - stats.success += 1; - self.num_pulls += 1; - owners.insert(label.pubkey()); - success.push((label, hash, wc)); - if let Some(val) = old { - self.purged_values - .push_back((val.value_hash, val.local_timestamp)) - } - } + for response in responses { + let owner = response.pubkey(); + if let Ok(()) = crds.insert(response, now) { + stats.success += 1; + self.num_pulls += 1; + owners.insert(owner); } } owners.insert(*from); @@ -428,7 +423,6 @@ impl CrdsGossipPull { self.purge_failed_inserts(now); self.failed_inserts .extend(failed_inserts.into_iter().zip(std::iter::repeat(now))); - success } pub fn purge_failed_inserts(&mut self, now: u64) { @@ -452,21 +446,18 @@ impl CrdsGossipPull { bloom_size: usize, ) -> Vec { const PAR_MIN_LENGTH: usize = 512; - let num = cmp::max( - CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS, - crds.len() + self.purged_values.len() + self.failed_inserts.len(), - ); - let filters = CrdsFilterSet::new(num, bloom_size); + #[cfg(debug_assertions)] + const MIN_NUM_BLOOM_ITEMS: usize = 512; + #[cfg(not(debug_assertions))] + const MIN_NUM_BLOOM_ITEMS: usize = 65_536; + let num_items = crds.len() + crds.num_purged() + self.failed_inserts.len(); + let num_items = MIN_NUM_BLOOM_ITEMS.max(num_items); + let filters = CrdsFilterSet::new(num_items, bloom_size); thread_pool.install(|| { crds.par_values() .with_min_len(PAR_MIN_LENGTH) .map(|v| v.value_hash) - .chain( - self.purged_values - .par_iter() - .with_min_len(PAR_MIN_LENGTH) - .map(|(v, _)| *v), - ) + .chain(crds.purged().with_min_len(PAR_MIN_LENGTH)) .chain( self.failed_inserts .par_iter() @@ -489,8 +480,8 @@ impl CrdsGossipPull { let msg_timeout = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS; let jitter = rand::thread_rng().gen_range(0, msg_timeout / 4); //skip filters from callers that are too old - let future = now.saturating_add(msg_timeout); - let past = now.saturating_sub(msg_timeout); + let caller_wallclock_window = + now.saturating_sub(msg_timeout)..now.saturating_add(msg_timeout); let mut dropped_requests = 0; let mut total_skipped = 0; let ret: Vec<_> = filters @@ -500,7 +491,7 @@ impl CrdsGossipPull { return None; } let caller_wallclock = caller.wallclock(); - if caller_wallclock >= future || caller_wallclock < past { + if !caller_wallclock_window.contains(&caller_wallclock) { dropped_requests += 1; return Some(vec![]); } @@ -533,30 +524,31 @@ impl CrdsGossipPull { inc_new_counter_info!("gossip_filter_crds_values-dropped_values", total_skipped); ret } - pub fn make_timeouts_def( - &self, - self_id: &Pubkey, - stakes: &HashMap, - epoch_ms: u64, - min_ts: u64, - ) -> HashMap { - let mut timeouts: HashMap = stakes.keys().map(|s| (*s, epoch_ms)).collect(); - timeouts.insert(*self_id, std::u64::MAX); - timeouts.insert(Pubkey::default(), min_ts); - timeouts - } - pub fn make_timeouts( + pub(crate) fn make_timeouts( &self, - self_id: &Pubkey, + self_pubkey: Pubkey, stakes: &HashMap, - epoch_ms: u64, + epoch_duration: Duration, ) -> HashMap { - self.make_timeouts_def(self_id, stakes, epoch_ms, self.crds_timeout) + let extended_timeout = self.crds_timeout.max(epoch_duration.as_millis() as u64); + let default_timeout = if stakes.values().all(|stake| *stake == 0) { + extended_timeout + } else { + self.crds_timeout + }; + stakes + .iter() + .filter(|(_, stake)| **stake > 0) + .map(|(pubkey, _)| (*pubkey, extended_timeout)) + .chain(vec![ + (Pubkey::default(), default_timeout), + (self_pubkey, u64::MAX), + ]) + .collect() } /// Purge values from the crds that are older then `active_timeout` - /// The value_hash of an active item is put into self.purged_values queue pub fn purge_active( &mut self, thread_pool: &ThreadPool, @@ -564,25 +556,11 @@ impl CrdsGossipPull { now: u64, timeouts: &HashMap, ) -> usize { - let num_purged_values = self.purged_values.len(); - self.purged_values.extend( - crds.find_old_labels(thread_pool, now, timeouts) - .into_iter() - .filter_map(|label| { - let val = crds.remove(&label)?; - Some((val.value_hash, val.local_timestamp)) - }), - ); - self.purged_values.len() - num_purged_values - } - /// Purge values from the `self.purged_values` queue that are older then purge_timeout - pub fn purge_purged(&mut self, min_ts: u64) { - let cnt = self - .purged_values - .iter() - .take_while(|v| v.1 < min_ts) - .count(); - self.purged_values.drain(..cnt); + let labels = crds.find_old_labels(thread_pool, now, timeouts); + for label in &labels { + crds.remove(label, now); + } + labels.len() } /// For legacy tests @@ -622,20 +600,19 @@ impl CrdsGossipPull { } Self { pull_request_time, - purged_values: self.purged_values.clone(), failed_inserts: self.failed_inserts.clone(), ..*self } } } #[cfg(test)] -mod test { +pub(crate) mod tests { use super::*; use crate::cluster_info::MAX_BLOOM_SIZE; use crate::contact_info::ContactInfo; use crate::crds_value::{CrdsData, Vote}; use itertools::Itertools; - use rand::thread_rng; + use rand::{seq::SliceRandom, thread_rng}; use rayon::ThreadPoolBuilder; use solana_perf::test_tx::test_tx; use solana_sdk::{ @@ -643,7 +620,12 @@ mod test { packet::PACKET_DATA_SIZE, timing::timestamp, }; - use std::iter::repeat_with; + use std::{iter::repeat_with, time::Duration}; + + #[cfg(debug_assertions)] + pub(crate) const MIN_NUM_BLOOM_FILTERS: usize = 1; + #[cfg(not(debug_assertions))] + pub(crate) const MIN_NUM_BLOOM_FILTERS: usize = 64; #[test] fn test_hash_as_u64() { @@ -881,37 +863,23 @@ mod test { fn test_build_crds_filter() { let mut rng = thread_rng(); let thread_pool = ThreadPoolBuilder::new().build().unwrap(); - let mut crds_gossip_pull = CrdsGossipPull::default(); + let crds_gossip_pull = CrdsGossipPull::default(); let mut crds = Crds::default(); - for _ in 0..10_000 { - crds_gossip_pull - .purged_values - .push_back((solana_sdk::hash::new_rand(&mut rng), rng.gen())); - } + let keypairs: Vec<_> = repeat_with(Keypair::new).take(10_000).collect(); let mut num_inserts = 0; - for _ in 0..20_000 { - if crds - .insert(CrdsValue::new_rand(&mut rng, None), rng.gen()) - .is_ok() - { + for _ in 0..40_000 { + let keypair = keypairs.choose(&mut rng).unwrap(); + let value = CrdsValue::new_rand(&mut rng, Some(keypair)); + if crds.insert(value, rng.gen()).is_ok() { num_inserts += 1; } } - assert_eq!(num_inserts, 20_000); + assert!(num_inserts > 30_000, "num inserts: {}", num_inserts); let filters = crds_gossip_pull.build_crds_filters(&thread_pool, &crds, MAX_BLOOM_SIZE); - assert_eq!(filters.len(), 32); - let hash_values: Vec<_> = crds - .values() - .map(|v| v.value_hash) - .chain( - crds_gossip_pull - .purged_values - .iter() - .map(|(value_hash, _)| value_hash) - .cloned(), - ) - .collect(); - assert_eq!(hash_values.len(), 10_000 + 20_000); + assert_eq!(filters.len(), MIN_NUM_BLOOM_FILTERS.max(32)); + let purged: Vec<_> = thread_pool.install(|| crds.purged().collect()); + let hash_values: Vec<_> = crds.values().map(|v| v.value_hash).chain(purged).collect(); + assert_eq!(hash_values.len(), 40_000); let mut false_positives = 0; for hash_value in hash_values { let mut num_hits = 0; @@ -926,112 +894,134 @@ mod test { } assert_eq!(num_hits, 1); } - assert!(false_positives < 50_000, "fp: {}", false_positives); + assert!(false_positives < 150_000, "fp: {}", false_positives); } #[test] fn test_new_pull_request() { let thread_pool = ThreadPoolBuilder::new().build().unwrap(); let mut crds = Crds::default(); + let node_keypair = Keypair::new(); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), + &node_keypair.pubkey(), 0, ))); - let id = entry.label().pubkey(); let node = CrdsGossipPull::default(); + let mut pings = Vec::new(); + let ping_cache = Mutex::new(PingCache::new( + Duration::from_secs(20 * 60), // ttl + 128, // capacity + )); assert_eq!( node.new_pull_request( &thread_pool, &crds, - &id, + &node_keypair, 0, 0, None, &HashMap::new(), - PACKET_DATA_SIZE + PACKET_DATA_SIZE, + &ping_cache, + &mut pings, ), Err(CrdsGossipError::NoPeers) ); - crds.insert(entry.clone(), 0).unwrap(); + crds.insert(entry, 0).unwrap(); assert_eq!( node.new_pull_request( &thread_pool, &crds, - &id, + &node_keypair, 0, 0, None, &HashMap::new(), - PACKET_DATA_SIZE + PACKET_DATA_SIZE, + &ping_cache, + &mut pings, ), Err(CrdsGossipError::NoPeers) ); - - let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - ))); + let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); + ping_cache + .lock() + .unwrap() + .mock_pong(new.id, new.gossip, Instant::now()); + let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(new)); crds.insert(new.clone(), 0).unwrap(); let req = node.new_pull_request( &thread_pool, &crds, - &id, + &node_keypair, 0, 0, None, &HashMap::new(), PACKET_DATA_SIZE, + &ping_cache, + &mut pings, ); - let (to, _, self_info) = req.unwrap(); - assert_eq!(to, new.label().pubkey()); - assert_eq!(self_info, entry); + let (peer, _) = req.unwrap(); + assert_eq!(peer, *new.contact_info().unwrap()); } #[test] fn test_new_mark_creation_time() { let now: u64 = 1_605_127_770_789; let thread_pool = ThreadPoolBuilder::new().build().unwrap(); + let mut ping_cache = PingCache::new( + Duration::from_secs(20 * 60), // ttl + 128, // capacity + ); let mut crds = Crds::default(); + let node_keypair = Keypair::new(); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), + &node_keypair.pubkey(), 0, ))); - let node_pubkey = entry.label().pubkey(); let mut node = CrdsGossipPull::default(); - crds.insert(entry.clone(), now).unwrap(); - let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - ))); + crds.insert(entry, now).unwrap(); + let old = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); + ping_cache.mock_pong(old.id, old.gossip, Instant::now()); + let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(old)); crds.insert(old.clone(), now).unwrap(); - let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - ))); + let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); + ping_cache.mock_pong(new.id, new.gossip, Instant::now()); + let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(new)); crds.insert(new.clone(), now).unwrap(); // set request creation time to now. let now = now + 50_000; - node.mark_pull_request_creation_time(&new.label().pubkey(), now); + node.mark_pull_request_creation_time(new.label().pubkey(), now); // odds of getting the other request should be close to 1. let now = now + 1_000; - for _ in 0..10 { - let req = node.new_pull_request( - &thread_pool, - &crds, - &node_pubkey, - 0, - now, - None, - &HashMap::new(), - PACKET_DATA_SIZE, - ); - let (to, _, self_info) = req.unwrap(); - assert_eq!(to, old.label().pubkey()); - assert_eq!(self_info, entry); - } + let mut pings = Vec::new(); + let ping_cache = Mutex::new(ping_cache); + let old = old.contact_info().unwrap(); + let count = repeat_with(|| { + let (peer, _filters) = node + .new_pull_request( + &thread_pool, + &crds, + &node_keypair, + 0, // self_shred_version + now, + None, // gossip_validators + &HashMap::new(), // stakes + PACKET_DATA_SIZE, // bloom_size + &ping_cache, + &mut pings, + ) + .unwrap(); + peer + }) + .take(100) + .filter(|peer| peer != old) + .count(); + assert!(count < 2, "count of peer != old: {}", count); } #[test] @@ -1045,7 +1035,7 @@ mod test { for k in 0..NUM_REPS { let pubkey = pubkeys[rng.gen_range(0, pubkeys.len())]; let now = now + k as u64; - node.mark_pull_request_creation_time(&pubkey, now); + node.mark_pull_request_creation_time(pubkey, now); *requests.entry(pubkey).or_default() = now; } assert!(node.pull_request_time.len() <= CRDS_UNIQUE_PUBKEY_CAPACITY); @@ -1072,33 +1062,40 @@ mod test { #[test] fn test_generate_pull_responses() { let thread_pool = ThreadPoolBuilder::new().build().unwrap(); + let node_keypair = Keypair::new(); let mut node_crds = Crds::default(); + let mut ping_cache = PingCache::new( + Duration::from_secs(20 * 60), // ttl + 128, // capacity + ); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), + &node_keypair.pubkey(), 0, ))); - let node_pubkey = entry.label().pubkey(); + let caller = entry.clone(); let node = CrdsGossipPull::default(); node_crds.insert(entry, 0).unwrap(); - let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - ))); + let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); + ping_cache.mock_pong(new.id, new.gossip, Instant::now()); + let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(new)); node_crds.insert(new, 0).unwrap(); + let mut pings = Vec::new(); let req = node.new_pull_request( &thread_pool, &node_crds, - &node_pubkey, + &node_keypair, 0, 0, None, &HashMap::new(), PACKET_DATA_SIZE, + &Mutex::new(ping_cache), + &mut pings, ); let mut dest_crds = Crds::default(); let dest = CrdsGossipPull::default(); - let (_, filters, caller) = req.unwrap(); + let (_, filters) = req.unwrap(); let mut filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect(); let rsp = dest.generate_pull_responses( &dest_crds, @@ -1125,56 +1122,68 @@ mod test { CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS, ); assert_eq!(rsp[0].len(), 0); - - assert_eq!(filters.len(), 1); - filters.push(filters[0].clone()); - //should return new value since caller is new - filters[1].0 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS + 1, - ))); - + assert_eq!(filters.len(), MIN_NUM_BLOOM_FILTERS); + filters.extend({ + // Should return new value since caller is new. + let now = CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS + 1; + let caller = ContactInfo::new_localhost(&Pubkey::new_unique(), now); + let caller = CrdsValue::new_unsigned(CrdsData::ContactInfo(caller)); + filters + .iter() + .map(|(_, filter)| (caller.clone(), filter.clone())) + .collect::>() + }); let rsp = dest.generate_pull_responses( &dest_crds, &filters, /*output_size_limit=*/ usize::MAX, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS, ); - assert_eq!(rsp.len(), 2); - assert_eq!(rsp[0].len(), 0); - assert_eq!(rsp[1].len(), 1); // Orders are also preserved. + assert_eq!(rsp.len(), 2 * MIN_NUM_BLOOM_FILTERS); + // There should be only one non-empty response in the 2nd half. + // Orders are also preserved. + assert!(rsp.iter().take(MIN_NUM_BLOOM_FILTERS).all(|r| r.is_empty())); + assert_eq!(rsp.iter().filter(|r| r.is_empty()).count(), rsp.len() - 1); + assert_eq!(rsp.iter().find(|r| r.len() == 1).unwrap().len(), 1); } #[test] fn test_process_pull_request() { let thread_pool = ThreadPoolBuilder::new().build().unwrap(); + let node_keypair = Keypair::new(); let mut node_crds = Crds::default(); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), + &node_keypair.pubkey(), 0, ))); - let node_pubkey = entry.label().pubkey(); + let caller = entry.clone(); let node = CrdsGossipPull::default(); node_crds.insert(entry, 0).unwrap(); - let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - ))); + let mut ping_cache = PingCache::new( + Duration::from_secs(20 * 60), // ttl + 128, // capacity + ); + let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); + ping_cache.mock_pong(new.id, new.gossip, Instant::now()); + let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(new)); node_crds.insert(new, 0).unwrap(); + let mut pings = Vec::new(); let req = node.new_pull_request( &thread_pool, &node_crds, - &node_pubkey, + &node_keypair, 0, 0, None, &HashMap::new(), PACKET_DATA_SIZE, + &Mutex::new(ping_cache), + &mut pings, ); let mut dest_crds = Crds::default(); let mut dest = CrdsGossipPull::default(); - let (_, filters, caller) = req.unwrap(); + let (_, filters) = req.unwrap(); let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect(); let rsp = dest.generate_pull_responses( &dest_crds, @@ -1188,78 +1197,67 @@ mod test { 1, ); assert!(rsp.iter().all(|rsp| rsp.is_empty())); - assert!(dest_crds.lookup(&caller.label()).is_some()); - assert_eq!( - dest_crds - .lookup_versioned(&caller.label()) - .unwrap() - .insert_timestamp, - 1 - ); - assert_eq!( - dest_crds - .lookup_versioned(&caller.label()) - .unwrap() - .local_timestamp, - 1 - ); + assert!(dest_crds.get(&caller.label()).is_some()); + assert_eq!(dest_crds.get(&caller.label()).unwrap().local_timestamp, 1); } #[test] fn test_process_pull_request_response() { let thread_pool = ThreadPoolBuilder::new().build().unwrap(); + let node_keypair = Keypair::new(); let mut node_crds = Crds::default(); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), + &node_keypair.pubkey(), 1, ))); + let caller = entry.clone(); let node_pubkey = entry.label().pubkey(); let mut node = CrdsGossipPull::default(); node_crds.insert(entry, 0).unwrap(); - - let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 1, - ))); + let mut ping_cache = PingCache::new( + Duration::from_secs(20 * 60), // ttl + 128, // capacity + ); + let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 1); + ping_cache.mock_pong(new.id, new.gossip, Instant::now()); + let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(new)); node_crds.insert(new, 0).unwrap(); let mut dest = CrdsGossipPull::default(); let mut dest_crds = Crds::default(); let new_id = solana_sdk::pubkey::new_rand(); - let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &new_id, 1, - ))); + let new = ContactInfo::new_localhost(&new_id, 1); + ping_cache.mock_pong(new.id, new.gossip, Instant::now()); + let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(new)); dest_crds.insert(new.clone(), 0).unwrap(); // node contains a key from the dest node, but at an older local timestamp - let same_key = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &new_id, 0, - ))); + let same_key = ContactInfo::new_localhost(&new_id, 0); + ping_cache.mock_pong(same_key.id, same_key.gossip, Instant::now()); + let same_key = CrdsValue::new_unsigned(CrdsData::ContactInfo(same_key)); assert_eq!(same_key.label(), new.label()); assert!(same_key.wallclock() < new.wallclock()); node_crds.insert(same_key.clone(), 0).unwrap(); - assert_eq!( - node_crds - .lookup_versioned(&same_key.label()) - .unwrap() - .local_timestamp, - 0 - ); + assert_eq!(node_crds.get(&same_key.label()).unwrap().local_timestamp, 0); let mut done = false; + let mut pings = Vec::new(); + let ping_cache = Mutex::new(ping_cache); for _ in 0..30 { // there is a chance of a false positive with bloom filters let req = node.new_pull_request( &thread_pool, &node_crds, - &node_pubkey, + &node_keypair, 0, 0, None, &HashMap::new(), PACKET_DATA_SIZE, + &ping_cache, + &mut pings, ); - let (_, filters, caller) = req.unwrap(); + let (_, filters) = req.unwrap(); let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect(); - let mut rsp = dest.generate_pull_responses( + let rsp = dest.generate_pull_responses( &dest_crds, &filters, /*output_size_limit=*/ usize::MAX, @@ -1279,32 +1277,20 @@ mod test { if rsp.is_empty() { continue; } - assert_eq!(rsp.len(), 1); + assert_eq!(rsp.len(), MIN_NUM_BLOOM_FILTERS); let failed = node .process_pull_response( &mut node_crds, &node_pubkey, - &node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1), - rsp.pop().unwrap(), + &node.make_timeouts(node_pubkey, &HashMap::new(), Duration::default()), + rsp.into_iter().flatten().collect(), 1, ) .0; assert_eq!(failed, 0); - assert_eq!( - node_crds - .lookup_versioned(&new.label()) - .unwrap() - .local_timestamp, - 1 - ); + assert_eq!(node_crds.get(&new.label()).unwrap().local_timestamp, 1); // verify that the whole record was updated for dest since this is a response from dest - assert_eq!( - node_crds - .lookup_versioned(&same_key.label()) - .unwrap() - .local_timestamp, - 1 - ); + assert_eq!(node_crds.get(&same_key.label()).unwrap().local_timestamp, 1); done = true; break; } @@ -1327,20 +1313,24 @@ mod test { 0, ))); node_crds.insert(old.clone(), 0).unwrap(); - let value_hash = node_crds.lookup_versioned(&old.label()).unwrap().value_hash; + let value_hash = node_crds.get(&old.label()).unwrap().value_hash; //verify self is valid - assert_eq!(node_crds.lookup(&node_label).unwrap().label(), node_label); - + assert_eq!( + node_crds.get(&node_label).unwrap().value.label(), + node_label + ); // purge - let timeouts = node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1); - node.purge_active(&thread_pool, &mut node_crds, 2, &timeouts); + let timeouts = node.make_timeouts(node_pubkey, &HashMap::new(), Duration::default()); + node.purge_active(&thread_pool, &mut node_crds, node.crds_timeout, &timeouts); //verify self is still valid after purge - assert_eq!(node_crds.lookup(&node_label).unwrap().label(), node_label); - - assert_eq!(node_crds.lookup_versioned(&old.label()), None); - assert_eq!(node.purged_values.len(), 1); + assert_eq!( + node_crds.get(&node_label).unwrap().value.label(), + node_label + ); + assert_eq!(node_crds.get(&old.label()), None); + assert_eq!(node_crds.num_purged(), 1); for _ in 0..30 { // there is a chance of a false positive with bloom filters // assert that purged value is still in the set @@ -1350,8 +1340,8 @@ mod test { } // purge the value - node.purge_purged(1); - assert_eq!(node.purged_values.len(), 0); + node_crds.trim_purged(node.crds_timeout + 1); + assert_eq!(node_crds.num_purged(), 0); } #[test] #[allow(clippy::float_cmp)] @@ -1469,7 +1459,7 @@ mod test { node.msg_timeout + 100, ) .0, - 2 + 4 ); let mut node_crds = Crds::default(); @@ -1511,10 +1501,10 @@ mod test { &peer_pubkey, &timeouts, vec![peer_vote], - node.msg_timeout + 1, + node.msg_timeout + 2, ) .0, - 1 + 2 ); } } diff --git a/core/src/crds_gossip_push.rs b/core/src/crds_gossip_push.rs index a6cc6f7daf..595c97680b 100644 --- a/core/src/crds_gossip_push.rs +++ b/core/src/crds_gossip_push.rs @@ -11,22 +11,22 @@ use crate::{ cluster_info::CRDS_UNIQUE_PUBKEY_CAPACITY, contact_info::ContactInfo, - crds::{Crds, VersionedCrdsValue}, - crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS}, + crds::{Crds, Cursor}, + crds_gossip::{get_stake, get_weight}, crds_gossip_error::CrdsGossipError, - crds_value::{CrdsValue, CrdsValueLabel}, + crds_value::CrdsValue, weighted_shuffle::weighted_shuffle, }; use bincode::serialized_size; use indexmap::map::IndexMap; -use itertools::Itertools; use lru::LruCache; use rand::{seq::SliceRandom, Rng}; use solana_runtime::bloom::{AtomicBloom, Bloom}; -use solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::timestamp}; +use solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::timestamp}; use std::{ cmp, collections::{HashMap, HashSet}, + ops::RangeBounds, }; pub const CRDS_GOSSIP_NUM_ACTIVE: usize = 30; @@ -46,13 +46,16 @@ pub struct CrdsGossipPush { pub max_bytes: usize, /// active set of validators for push active_set: IndexMap>, - /// push message queue - push_messages: HashMap, + /// Cursor into the crds table for values to push. + crds_cursor: Cursor, /// Cache that tracks which validators a message was received from /// bool indicates it has been pruned. /// This cache represents a lagging view of which validators /// currently have this node in their `active_set` - received_cache: HashMap>, + received_cache: HashMap< + Pubkey, // origin/owner + HashMap, + >, last_pushed_to: LruCache, pub num_active: usize, pub push_fanout: usize, @@ -69,7 +72,7 @@ impl Default for CrdsGossipPush { // Allow upto 64 Crds Values per PUSH max_bytes: PACKET_DATA_SIZE * 64, active_set: IndexMap::new(), - push_messages: HashMap::new(), + crds_cursor: Cursor::default(), received_cache: HashMap::new(), last_pushed_to: LruCache::new(CRDS_UNIQUE_PUBKEY_CAPACITY), num_active: CRDS_GOSSIP_NUM_ACTIVE, @@ -83,8 +86,9 @@ impl Default for CrdsGossipPush { } } impl CrdsGossipPush { - pub fn num_pending(&self) -> usize { - self.push_messages.len() + pub fn num_pending(&self, crds: &Crds) -> usize { + let mut cursor = self.crds_cursor; + crds.get_entries(&mut cursor).count() } fn prune_stake_threshold(self_stake: u64, origin_stake: u64) -> u64 { @@ -100,67 +104,62 @@ impl CrdsGossipPush { ) -> Vec { let origin_stake = stakes.get(origin).unwrap_or(&0); let self_stake = stakes.get(self_pubkey).unwrap_or(&0); - let cache = self.received_cache.get(origin); - if cache.is_none() { - return Vec::new(); - } - let peers = cache.unwrap(); - + let peers = match self.received_cache.get_mut(origin) { + None => return Vec::default(), + Some(peers) => peers, + }; let peer_stake_total: u64 = peers .iter() - .filter(|v| !(v.1).0) - .map(|v| stakes.get(v.0).unwrap_or(&0)) + .filter(|(_, (pruned, _))| !pruned) + .filter_map(|(peer, _)| stakes.get(peer)) .sum(); let prune_stake_threshold = Self::prune_stake_threshold(*self_stake, *origin_stake); if peer_stake_total < prune_stake_threshold { return Vec::new(); } - - let staked_peers: Vec<(Pubkey, u64)> = peers - .iter() - .filter(|v| !(v.1).0) - .filter_map(|p| stakes.get(p.0).map(|s| (*p.0, *s))) - .filter(|(_, s)| *s > 0) - .collect(); - - let mut seed = [0; 32]; - rand::thread_rng().fill(&mut seed[..]); - let shuffle = weighted_shuffle( - &staked_peers.iter().map(|(_, stake)| *stake).collect_vec(), - seed, - ); - + let shuffled_staked_peers = { + let peers: Vec<_> = peers + .iter() + .filter(|(_, (pruned, _))| !pruned) + .filter_map(|(peer, _)| Some((*peer, *stakes.get(peer)?))) + .filter(|(_, stake)| *stake > 0) + .collect(); + let mut seed = [0; 32]; + rand::thread_rng().fill(&mut seed[..]); + let weights: Vec<_> = peers.iter().map(|(_, stake)| *stake).collect(); + weighted_shuffle(&weights, seed) + .into_iter() + .map(move |i| peers[i]) + }; let mut keep = HashSet::new(); let mut peer_stake_sum = 0; keep.insert(*origin); - for next in shuffle { - let (next_peer, next_stake) = staked_peers[next]; - if next_peer == *origin { + for (peer, stake) in shuffled_staked_peers { + if peer == *origin { continue; } - keep.insert(next_peer); - peer_stake_sum += next_stake; + keep.insert(peer); + peer_stake_sum += stake; if peer_stake_sum >= prune_stake_threshold && keep.len() >= CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES { break; } } - - let pruned_peers: Vec = peers + for (peer, (pruned, _)) in peers.iter_mut() { + if !*pruned && !keep.contains(peer) { + *pruned = true; + } + } + peers .keys() - .filter(|p| !keep.contains(p)) - .cloned() - .collect(); - pruned_peers.iter().for_each(|p| { - self.received_cache - .get_mut(origin) - .unwrap() - .get_mut(p) - .unwrap() - .0 = true; - }); - pruned_peers + .filter(|peer| !keep.contains(peer)) + .copied() + .collect() + } + + fn wallclock_window(&self, now: u64) -> impl RangeBounds { + now.saturating_sub(self.msg_timeout)..=now.saturating_add(self.msg_timeout) } /// process a push message to the network @@ -170,41 +169,22 @@ impl CrdsGossipPush { from: &Pubkey, value: CrdsValue, now: u64, - ) -> Result, CrdsGossipError> { + ) -> Result<(), CrdsGossipError> { self.num_total += 1; - if now > value.wallclock().checked_add(self.msg_timeout).unwrap_or(0) { + if !self.wallclock_window(now).contains(&value.wallclock()) { return Err(CrdsGossipError::PushMessageTimeout); } - if now + self.msg_timeout < value.wallclock() { - return Err(CrdsGossipError::PushMessageTimeout); - } - let label = value.label(); - let origin = label.pubkey(); - let new_value = crds.new_versioned(now, value); - let value_hash = new_value.value_hash; - let received_set = self - .received_cache + let origin = value.pubkey(); + self.received_cache .entry(origin) - .or_insert_with(HashMap::new); - received_set.entry(*from).or_insert((false, 0)).1 = now; - - let old = crds.insert_versioned(new_value); - if old.is_err() { + .or_default() + .entry(*from) + .and_modify(|(_pruned, timestamp)| *timestamp = now) + .or_insert((/*pruned:*/ false, now)); + crds.insert(value, now).map_err(|_| { self.num_old += 1; - return Err(CrdsGossipError::PushMessageOldVersion); - } - self.push_messages.insert(label, value_hash); - Ok(old.unwrap()) - } - - /// push pull responses - pub fn push_pull_responses(&mut self, values: Vec<(CrdsValueLabel, Hash, u64)>, now: u64) { - for (label, value_hash, wc) in values { - if now > wc.checked_add(self.msg_timeout).unwrap_or(0) { - continue; - } - self.push_messages.insert(label, value_hash); - } + CrdsGossipError::PushMessageOldVersion + }) } /// New push message to broadcast to peers. @@ -213,7 +193,6 @@ impl CrdsGossipPush { /// The list of push messages is created such that all the randomly selected peers have not /// pruned the source addresses. pub fn new_push_messages(&mut self, crds: &Crds, now: u64) -> HashMap> { - trace!("new_push_messages {}", self.push_messages.len()); let push_fanout = self.push_fanout.min(self.active_set.len()); if push_fanout == 0 { return HashMap::default(); @@ -221,22 +200,24 @@ impl CrdsGossipPush { let mut num_pushes = 0; let mut num_values = 0; let mut total_bytes: usize = 0; - let mut labels = vec![]; let mut push_messages: HashMap> = HashMap::new(); - let cutoff = now.saturating_sub(self.msg_timeout); - let lookup = |label, &hash| -> Option<&CrdsValue> { - let value = crds.lookup_versioned(label)?; - if value.value_hash != hash || value.value.wallclock() < cutoff { - None - } else { - Some(&value.value) + let wallclock_window = self.wallclock_window(now); + let entries = crds + .get_entries(&mut self.crds_cursor) + .map(|entry| &entry.value) + .filter(|value| wallclock_window.contains(&value.wallclock())); + for value in entries { + let serialized_size = serialized_size(&value).unwrap(); + total_bytes = total_bytes.saturating_add(serialized_size as usize); + if total_bytes > self.max_bytes { + break; } - }; - let mut push_value = |origin: Pubkey, value: &CrdsValue| { - //use a consistent index for the same origin so - //the active set learns the MST for that origin - let start = origin.as_ref()[0] as usize; - for i in start..(start + push_fanout) { + num_values += 1; + let origin = value.pubkey(); + // Use a consistent index for the same origin so the active set + // learns the MST for that origin. + let offset = origin.as_ref()[0] as usize; + for i in offset..offset + push_fanout { let index = i % self.active_set.len(); let (peer, filter) = self.active_set.get_index(index).unwrap(); if !filter.contains(&origin) || value.should_force_push(peer) { @@ -245,27 +226,9 @@ impl CrdsGossipPush { num_pushes += 1; } } - }; - for (label, hash) in &self.push_messages { - match lookup(label, hash) { - None => labels.push(label.clone()), - Some(value) if value.wallclock() > now => continue, - Some(value) => { - total_bytes += serialized_size(value).unwrap() as usize; - if total_bytes > self.max_bytes { - break; - } - num_values += 1; - labels.push(label.clone()); - push_value(label.pubkey(), value); - } - } } self.num_pushes += num_pushes; trace!("new_push_messages {} {}", num_values, self.active_set.len()); - for label in labels { - self.push_messages.remove(&label); - } for target_pubkey in push_messages.keys().copied() { self.last_pushed_to.put(target_pubkey, now); } @@ -300,46 +263,48 @@ impl CrdsGossipPush { network_size: usize, ratio: usize, ) { + const BLOOM_FALSE_RATE: f64 = 0.1; + const BLOOM_MAX_BITS: usize = 1024 * 8 * 4; + #[cfg(debug_assertions)] + const MIN_NUM_BLOOM_ITEMS: usize = 512; + #[cfg(not(debug_assertions))] + const MIN_NUM_BLOOM_ITEMS: usize = CRDS_UNIQUE_PUBKEY_CAPACITY; let mut rng = rand::thread_rng(); let need = Self::compute_need(self.num_active, self.active_set.len(), ratio); let mut new_items = HashMap::new(); - - let options: Vec<_> = self.push_options( - crds, - &self_id, - self_shred_version, - stakes, - gossip_validators, - ); - if options.is_empty() { + let (weights, peers): (Vec<_>, Vec<_>) = self + .push_options( + crds, + &self_id, + self_shred_version, + stakes, + gossip_validators, + ) + .into_iter() + .unzip(); + if peers.is_empty() { return; } - - let mut seed = [0; 32]; - rng.fill(&mut seed[..]); - let mut shuffle = weighted_shuffle( - &options.iter().map(|weighted| weighted.0).collect_vec(), - seed, - ) - .into_iter(); - - while new_items.len() < need { - match shuffle.next() { - Some(index) => { - let item = options[index].1; - if self.active_set.get(&item.id).is_some() { - continue; - } - if new_items.get(&item.id).is_some() { - continue; - } - let size = cmp::max(CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS, network_size); - let bloom: AtomicBloom<_> = Bloom::random(size, 0.1, 1024 * 8 * 4).into(); - bloom.add(&item.id); - new_items.insert(item.id, bloom); - } - _ => break, + let num_bloom_items = MIN_NUM_BLOOM_ITEMS.max(network_size); + let shuffle = { + let mut seed = [0; 32]; + rng.fill(&mut seed[..]); + weighted_shuffle(&weights, seed).into_iter() + }; + for peer in shuffle.map(|i| peers[i].id) { + if new_items.len() >= need { + break; + } + if self.active_set.contains_key(&peer) || new_items.contains_key(&peer) { + continue; } + let bloom = AtomicBloom::from(Bloom::random( + num_bloom_items, + BLOOM_FALSE_RATE, + BLOOM_MAX_BITS, + )); + bloom.add(&peer); + new_items.insert(peer, bloom); } let mut keys: Vec = self.active_set.keys().cloned().collect(); keys.shuffle(&mut rng); @@ -400,15 +365,6 @@ impl CrdsGossipPush { .collect() } - /// purge old pending push messages - pub fn purge_old_pending_push_messages(&mut self, crds: &Crds, min_time: u64) { - self.push_messages.retain(|k, hash| { - matches!(crds.lookup_versioned(k), Some(versioned) if - versioned.value.wallclock() >= min_time - && versioned.value_hash == *hash) - }); - } - /// purge received push message cache pub fn purge_old_received_cache(&mut self, min_time: u64) { self.received_cache.retain(|_, v| { @@ -430,7 +386,6 @@ impl CrdsGossipPush { } Self { active_set, - push_messages: self.push_messages.clone(), received_cache: self.received_cache.clone(), last_pushed_to, ..*self @@ -502,9 +457,9 @@ mod test { // push a new message assert_eq!( push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0), - Ok(None) + Ok(()) ); - assert_eq!(crds.lookup(&label), Some(&value)); + assert_eq!(crds.get(&label).unwrap().value, value); // push it again assert_matches!( @@ -523,7 +478,7 @@ mod test { // push a new message assert_eq!( push.process_push_message(&mut crds, &Pubkey::default(), value, 0), - Ok(None) + Ok(()) ); // push an old version @@ -567,19 +522,16 @@ mod test { // push a new message assert_eq!( - push.process_push_message(&mut crds, &Pubkey::default(), value_old.clone(), 0), - Ok(None) + push.process_push_message(&mut crds, &Pubkey::default(), value_old, 0), + Ok(()) ); // push an old version ci.wallclock = 1; let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci)); assert_eq!( - push.process_push_message(&mut crds, &Pubkey::default(), value, 0) - .unwrap() - .unwrap() - .value, - value_old + push.process_push_message(&mut crds, &Pubkey::default(), value, 0), + Ok(()) ); } #[test] @@ -600,7 +552,7 @@ mod test { 0, ))); - assert_eq!(crds.insert(value1.clone(), now), Ok(None)); + assert_eq!(crds.insert(value1.clone(), now), Ok(())); push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); assert!(push.active_set.get(&value1.label().pubkey()).is_some()); @@ -609,7 +561,7 @@ mod test { 0, ))); assert!(push.active_set.get(&value2.label().pubkey()).is_none()); - assert_eq!(crds.insert(value2.clone(), now), Ok(None)); + assert_eq!(crds.insert(value2.clone(), now), Ok(())); for _ in 0..30 { push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); if push.active_set.get(&value2.label().pubkey()).is_some() { @@ -622,7 +574,7 @@ mod test { let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo( ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0), )); - assert_eq!(crds.insert(value2.clone(), now), Ok(None)); + assert_eq!(crds.insert(value2.clone(), now), Ok(())); } push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); assert_eq!(push.active_set.len(), push.num_active); @@ -780,7 +732,7 @@ mod test { &solana_sdk::pubkey::new_rand(), 0, ))); - assert_eq!(crds.insert(peer.clone(), now), Ok(None)); + assert_eq!(crds.insert(peer.clone(), now), Ok(())); push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( @@ -791,7 +743,7 @@ mod test { expected.insert(peer.label().pubkey(), vec![new_msg.clone()]); assert_eq!( push.process_push_message(&mut crds, &Pubkey::default(), new_msg, 0), - Ok(None) + Ok(()) ); assert_eq!(push.active_set.len(), 1); assert_eq!(push.new_push_messages(&crds, 0), expected); @@ -799,36 +751,32 @@ mod test { #[test] fn test_personalized_push_messages() { let now = timestamp(); + let mut rng = rand::thread_rng(); let mut crds = Crds::default(); let mut push = CrdsGossipPush::default(); - let peer_1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - ))); - assert_eq!(crds.insert(peer_1.clone(), now), Ok(None)); - let peer_2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - ))); - assert_eq!(crds.insert(peer_2.clone(), now), Ok(None)); - let peer_3 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - now, - ))); + let peers: Vec<_> = vec![0, 0, now] + .into_iter() + .map(|wallclock| { + let mut peer = ContactInfo::new_rand(&mut rng, /*pubkey=*/ None); + peer.wallclock = wallclock; + CrdsValue::new_unsigned(CrdsData::ContactInfo(peer)) + }) + .collect(); + assert_eq!(crds.insert(peers[0].clone(), now), Ok(())); + assert_eq!(crds.insert(peers[1].clone(), now), Ok(())); assert_eq!( - push.process_push_message(&mut crds, &Pubkey::default(), peer_3.clone(), now), - Ok(None) + push.process_push_message(&mut crds, &Pubkey::default(), peers[2].clone(), now), + Ok(()) ); push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); // push 3's contact info to 1 and 2 and 3 - let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &peer_3.pubkey(), - 0, - ))); - let mut expected = HashMap::new(); - expected.insert(peer_1.pubkey(), vec![new_msg.clone()]); - expected.insert(peer_2.pubkey(), vec![new_msg]); + let expected: HashMap<_, _> = vec![ + (peers[0].pubkey(), vec![peers[2].clone()]), + (peers[1].pubkey(), vec![peers[2].clone()]), + ] + .into_iter() + .collect(); assert_eq!(push.active_set.len(), 3); assert_eq!(push.new_push_messages(&crds, now), expected); } @@ -841,7 +789,7 @@ mod test { &solana_sdk::pubkey::new_rand(), 0, ))); - assert_eq!(crds.insert(peer.clone(), 0), Ok(None)); + assert_eq!(crds.insert(peer.clone(), 0), Ok(())); push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( @@ -851,7 +799,7 @@ mod test { let expected = HashMap::new(); assert_eq!( push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 0), - Ok(None) + Ok(()) ); push.process_prune_msg( &self_id, @@ -868,7 +816,7 @@ mod test { &solana_sdk::pubkey::new_rand(), 0, ))); - assert_eq!(crds.insert(peer, 0), Ok(None)); + assert_eq!(crds.insert(peer, 0), Ok(())); push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1); let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0); @@ -877,9 +825,8 @@ mod test { let expected = HashMap::new(); assert_eq!( push.process_push_message(&mut crds, &Pubkey::default(), new_msg, 1), - Ok(None) + Ok(()) ); - push.purge_old_pending_push_messages(&crds, 0); assert_eq!(push.new_push_messages(&crds, 0), expected); } @@ -894,9 +841,9 @@ mod test { // push a new message assert_eq!( push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0), - Ok(None) + Ok(()) ); - assert_eq!(crds.lookup(&label), Some(&value)); + assert_eq!(crds.get(&label).unwrap().value, value); // push it again assert_matches!( diff --git a/core/src/crds_shards.rs b/core/src/crds_shards.rs index 8b0fc5df87..74bef44b85 100644 --- a/core/src/crds_shards.rs +++ b/core/src/crds_shards.rs @@ -130,19 +130,17 @@ where #[cfg(test)] mod test { use super::*; - use crate::contact_info::ContactInfo; - use crate::crds_value::{CrdsData, CrdsValue}; + use crate::{crds::Crds, crds_value::CrdsValue}; use rand::{thread_rng, Rng}; use solana_sdk::timing::timestamp; - use std::collections::HashSet; - use std::ops::Index; - - fn new_test_crds_value() -> VersionedCrdsValue { - let data = CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - timestamp(), - )); - VersionedCrdsValue::new(timestamp(), CrdsValue::new_unsigned(data)) + use std::{collections::HashSet, iter::repeat_with, ops::Index}; + + fn new_test_crds_value(rng: &mut R) -> VersionedCrdsValue { + let value = CrdsValue::new_rand(rng, None); + let label = value.label(); + let mut crds = Crds::default(); + crds.insert(value, timestamp()).unwrap(); + crds.get(&label).cloned().unwrap() } // Returns true if the first mask_bits most significant bits of hash is the @@ -176,7 +174,7 @@ mod test { fn test_crds_shards_round_trip() { let mut rng = thread_rng(); // Generate some random hash and crds value labels. - let mut values: Vec<_> = std::iter::repeat_with(new_test_crds_value) + let mut values: Vec<_> = repeat_with(|| new_test_crds_value(&mut rng)) .take(4096) .collect(); // Insert everything into the crds shards. diff --git a/core/src/crds_value.rs b/core/src/crds_value.rs index 9fc6e4bf81..5fce1a2286 100644 --- a/core/src/crds_value.rs +++ b/core/src/crds_value.rs @@ -20,6 +20,7 @@ use solana_sdk::{ use solana_vote_program::vote_transaction::parse_vote_transaction; use std::{ borrow::{Borrow, Cow}, + cmp::Ordering, collections::{hash_map::Entry, BTreeSet, HashMap}, fmt, }; @@ -405,7 +406,7 @@ impl NodeInstance { } // Clones the value with an updated wallclock. - pub fn with_wallclock(&self, now: u64) -> Self { + pub(crate) fn with_wallclock(&self, now: u64) -> Self { Self { wallclock: now, ..*self @@ -414,7 +415,7 @@ impl NodeInstance { // Returns true if the crds-value is a duplicate instance // of this node, with a more recent timestamp. - pub fn check_duplicate(&self, other: &CrdsValue) -> bool { + pub(crate) fn check_duplicate(&self, other: &CrdsValue) -> bool { match &other.data { CrdsData::NodeInstance(other) => { self.token != other.token @@ -424,6 +425,26 @@ impl NodeInstance { _ => false, } } + + // Returns None if tokens are the same or other is not a node-instance from + // the same owner. Otherwise returns true if self has more recent timestamp + // than other, and so overrides it. + pub(crate) fn overrides(&self, other: &CrdsValue) -> Option { + let other = match &other.data { + CrdsData::NodeInstance(other) => other, + _ => return None, + }; + if self.token == other.token || self.from != other.from { + return None; + } + match self.timestamp.cmp(&other.timestamp) { + Ordering::Less => Some(false), + Ordering::Greater => Some(true), + // Ties should be broken in a deterministic way across the cluster, + // so that nodes propagate the same value through gossip. + Ordering::Equal => Some(other.token < self.token), + } + } } impl Sanitize for NodeInstance { @@ -445,7 +466,7 @@ pub enum CrdsValueLabel { AccountsHashes(Pubkey), LegacyVersion(Pubkey), Version(Pubkey), - NodeInstance(Pubkey, u64 /*token*/), + NodeInstance(Pubkey), DuplicateShred(DuplicateShredIndex, Pubkey), } @@ -460,7 +481,7 @@ impl fmt::Display for CrdsValueLabel { CrdsValueLabel::AccountsHashes(_) => write!(f, "AccountsHashes({})", self.pubkey()), CrdsValueLabel::LegacyVersion(_) => write!(f, "LegacyVersion({})", self.pubkey()), CrdsValueLabel::Version(_) => write!(f, "Version({})", self.pubkey()), - CrdsValueLabel::NodeInstance(pk, token) => write!(f, "NodeInstance({}, {})", pk, token), + CrdsValueLabel::NodeInstance(pk) => write!(f, "NodeInstance({})", pk), CrdsValueLabel::DuplicateShred(ix, pk) => write!(f, "DuplicateShred({}, {})", ix, pk), } } @@ -477,27 +498,10 @@ impl CrdsValueLabel { CrdsValueLabel::AccountsHashes(p) => *p, CrdsValueLabel::LegacyVersion(p) => *p, CrdsValueLabel::Version(p) => *p, - CrdsValueLabel::NodeInstance(p, _ /*token*/) => *p, + CrdsValueLabel::NodeInstance(p) => *p, CrdsValueLabel::DuplicateShred(_, p) => *p, } } - - /// Returns number of possible distinct labels of the same type for - /// a fixed pubkey, and None if that is practically unlimited. - pub(crate) fn value_space(&self) -> Option { - match self { - CrdsValueLabel::ContactInfo(_) => Some(1), - CrdsValueLabel::Vote(_, _) => Some(MAX_VOTES as usize), - CrdsValueLabel::LowestSlot(_) => Some(1), - CrdsValueLabel::SnapshotHashes(_) => Some(1), - CrdsValueLabel::EpochSlots(_, _) => Some(MAX_EPOCH_SLOTS as usize), - CrdsValueLabel::AccountsHashes(_) => Some(1), - CrdsValueLabel::LegacyVersion(_) => Some(1), - CrdsValueLabel::Version(_) => Some(1), - CrdsValueLabel::NodeInstance(_, _) => None, - CrdsValueLabel::DuplicateShred(_, _) => Some(MAX_DUPLICATE_SHREDS as usize), - } - } } impl CrdsValue { @@ -570,7 +574,7 @@ impl CrdsValue { CrdsData::EpochSlots(ix, _) => CrdsValueLabel::EpochSlots(*ix, self.pubkey()), CrdsData::LegacyVersion(_) => CrdsValueLabel::LegacyVersion(self.pubkey()), CrdsData::Version(_) => CrdsValueLabel::Version(self.pubkey()), - CrdsData::NodeInstance(node) => CrdsValueLabel::NodeInstance(node.from, node.token), + CrdsData::NodeInstance(node) => CrdsValueLabel::NodeInstance(node.from), CrdsData::DuplicateShred(ix, shred) => CrdsValueLabel::DuplicateShred(*ix, shred.from), } } @@ -931,7 +935,7 @@ mod test { token: rng.gen(), ..node }; - assert_ne!( + assert_eq!( make_crds_value(node).label(), make_crds_value(other).label() ); @@ -946,20 +950,31 @@ mod test { let mut rng = rand::thread_rng(); let pubkey = Pubkey::new_unique(); let node = NodeInstance::new(&mut rng, pubkey, now); + let node_crds = make_crds_value(node.clone()); // Same token is not a duplicate. - assert!(!node.check_duplicate(&make_crds_value(NodeInstance { + let other = NodeInstance { from: pubkey, wallclock: now + 1, timestamp: now + 1, token: node.token, - }))); + }; + let other_crds = make_crds_value(other.clone()); + assert!(!node.check_duplicate(&other_crds)); + assert!(!other.check_duplicate(&node_crds)); + assert_eq!(node.overrides(&other_crds), None); + assert_eq!(other.overrides(&node_crds), None); // Older timestamp is not a duplicate. - assert!(!node.check_duplicate(&make_crds_value(NodeInstance { + let other = NodeInstance { from: pubkey, wallclock: now + 1, timestamp: now - 1, token: rng.gen(), - }))); + }; + let other_crds = make_crds_value(other.clone()); + assert!(!node.check_duplicate(&other_crds)); + assert!(other.check_duplicate(&node_crds)); + assert_eq!(node.overrides(&other_crds), Some(true)); + assert_eq!(other.overrides(&node_crds), Some(false)); // Updated wallclock is not a duplicate. let other = node.with_wallclock(now + 8); assert_eq!( @@ -971,27 +986,56 @@ mod test { token: node.token, } ); - assert!(!node.check_duplicate(&make_crds_value(other))); - // Duplicate instance. - assert!(node.check_duplicate(&make_crds_value(NodeInstance { - from: pubkey, - wallclock: 0, - timestamp: now, - token: rng.gen(), - }))); + let other_crds = make_crds_value(other.clone()); + assert!(!node.check_duplicate(&other_crds)); + assert!(!other.check_duplicate(&node_crds)); + assert_eq!(node.overrides(&other_crds), None); + assert_eq!(other.overrides(&node_crds), None); + // Duplicate instance; tied timestamp. + for _ in 0..10 { + let other = NodeInstance { + from: pubkey, + wallclock: 0, + timestamp: now, + token: rng.gen(), + }; + let other_crds = make_crds_value(other.clone()); + assert!(node.check_duplicate(&other_crds)); + assert!(other.check_duplicate(&node_crds)); + assert_eq!(node.overrides(&other_crds), Some(other.token < node.token)); + assert_eq!(other.overrides(&node_crds), Some(node.token < other.token)); + } + // Duplicate instance; more recent timestamp. + for _ in 0..10 { + let other = NodeInstance { + from: pubkey, + wallclock: 0, + timestamp: now + 1, + token: rng.gen(), + }; + let other_crds = make_crds_value(other.clone()); + assert!(node.check_duplicate(&other_crds)); + assert!(!other.check_duplicate(&node_crds)); + assert_eq!(node.overrides(&other_crds), Some(false)); + assert_eq!(other.overrides(&node_crds), Some(true)); + } // Different pubkey is not a duplicate. - assert!(!node.check_duplicate(&make_crds_value(NodeInstance { + let other = NodeInstance { from: Pubkey::new_unique(), wallclock: now + 1, timestamp: now + 1, token: rng.gen(), - }))); + }; + let other_crds = make_crds_value(other.clone()); + assert!(!node.check_duplicate(&other_crds)); + assert!(!other.check_duplicate(&node_crds)); + assert_eq!(node.overrides(&other_crds), None); + assert_eq!(other.overrides(&node_crds), None); // Differnt crds value is not a duplicate. - assert!( - !node.check_duplicate(&CrdsValue::new_unsigned(CrdsData::ContactInfo( - ContactInfo::new_rand(&mut rng, Some(pubkey)) - ))) - ); + let other = ContactInfo::new_rand(&mut rng, Some(pubkey)); + let other = CrdsValue::new_unsigned(CrdsData::ContactInfo(other)); + assert!(!node.check_duplicate(&other)); + assert_eq!(node.overrides(&other), None); } #[test] diff --git a/core/src/duplicate_shred.rs b/core/src/duplicate_shred.rs index 47cec81db0..db67f08f6f 100644 --- a/core/src/duplicate_shred.rs +++ b/core/src/duplicate_shred.rs @@ -343,17 +343,9 @@ pub(crate) mod tests { fn test_duplicate_shred_round_trip() { let mut rng = rand::thread_rng(); let leader = Arc::new(Keypair::new()); - let (slot, parent_slot, fec_rate, reference_tick, version) = - (53084024, 53084023, 0.0, 0, 0); - let shredder = Shredder::new( - slot, - parent_slot, - fec_rate, - leader.clone(), - reference_tick, - version, - ) - .unwrap(); + let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0); + let shredder = + Shredder::new(slot, parent_slot, leader.clone(), reference_tick, version).unwrap(); let next_shred_index = rng.gen(); let shred1 = new_rand_shred(&mut rng, next_shred_index, &shredder); let shred2 = new_rand_shred(&mut rng, next_shred_index, &shredder); diff --git a/core/src/fork_choice.rs b/core/src/fork_choice.rs index 2f223b26a3..b5e6524359 100644 --- a/core/src/fork_choice.rs +++ b/core/src/fork_choice.rs @@ -1,5 +1,6 @@ use crate::{ - consensus::{ComputedBankState, SwitchForkDecision, Tower}, + consensus::{SwitchForkDecision, Tower}, + latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks, progress_map::ProgressMap, replay_stage::HeaviestForkFailures, }; @@ -16,12 +17,12 @@ pub(crate) struct SelectVoteAndResetForkResult { } pub(crate) trait ForkChoice { + type ForkChoiceKey; fn compute_bank_stats( &mut self, bank: &Bank, tower: &Tower, - progress: &mut ProgressMap, - computed_bank_state: &ComputedBankState, + latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, ); // Returns: @@ -36,4 +37,8 @@ pub(crate) trait ForkChoice { ancestors: &HashMap>, bank_forks: &RwLock, ) -> (Arc, Option>); + + fn mark_fork_invalid_candidate(&mut self, invalid_slot: &Self::ForkChoiceKey); + + fn mark_fork_valid_candidate(&mut self, valid_slot: &Self::ForkChoiceKey); } diff --git a/core/src/gossip_service.rs b/core/src/gossip_service.rs index 3bf4786629..0262dc5a3e 100644 --- a/core/src/gossip_service.rs +++ b/core/src/gossip_service.rs @@ -85,29 +85,33 @@ pub fn discover_cluster( entrypoint: &SocketAddr, num_nodes: usize, ) -> std::io::Result> { - discover( - None, + const DISCOVER_CLUSTER_TIMEOUT: Duration = Duration::from_secs(120); + let (_all_peers, validators) = discover( + None, // keypair Some(entrypoint), Some(num_nodes), - Some(30), - None, - None, - None, - 0, - ) - .map(|(_all_peers, validators)| validators) + DISCOVER_CLUSTER_TIMEOUT, + None, // find_node_by_pubkey + None, // find_node_by_gossip_addr + None, // my_gossip_addr + 0, // my_shred_version + )?; + Ok(validators) } pub fn discover( keypair: Option>, entrypoint: Option<&SocketAddr>, num_nodes: Option, // num_nodes only counts validators, excludes spy nodes - timeout: Option, + timeout: Duration, find_node_by_pubkey: Option, find_node_by_gossip_addr: Option<&SocketAddr>, my_gossip_addr: Option<&SocketAddr>, my_shred_version: u16, -) -> std::io::Result<(Vec, Vec)> { +) -> std::io::Result<( + Vec, // all gossip peers + Vec, // tvu peers (validators) +)> { let keypair = keypair.unwrap_or_else(|| Arc::new(Keypair::new())); let exit = Arc::new(AtomicBool::new(false)); @@ -129,7 +133,7 @@ pub fn discover( let _ip_echo_server = ip_echo.map(solana_net_utils::ip_echo_server); - let (met_criteria, secs, all_peers, tvu_peers) = spy( + let (met_criteria, elapsed, all_peers, tvu_peers) = spy( spy_ref.clone(), num_nodes, timeout, @@ -143,7 +147,7 @@ pub fn discover( if met_criteria { info!( "discover success in {}s...\n{}", - secs, + elapsed.as_secs(), spy_ref.contact_info_trace() ); return Ok((all_peers, tvu_peers)); @@ -205,22 +209,21 @@ pub fn get_multi_client(nodes: &[ContactInfo]) -> (ThinClient, usize) { fn spy( spy_ref: Arc, num_nodes: Option, - timeout: Option, + timeout: Duration, find_node_by_pubkey: Option, find_node_by_gossip_addr: Option<&SocketAddr>, -) -> (bool, u64, Vec, Vec) { +) -> ( + bool, // if found the specified nodes + Duration, // elapsed time until found the nodes or timed-out + Vec, // all gossip peers + Vec, // tvu peers (validators) +) { let now = Instant::now(); let mut met_criteria = false; let mut all_peers: Vec = Vec::new(); let mut tvu_peers: Vec = Vec::new(); let mut i = 1; - while !met_criteria { - if let Some(secs) = timeout { - if now.elapsed() >= Duration::from_secs(secs) { - break; - } - } - + while !met_criteria && now.elapsed() < timeout { all_peers = spy_ref .all_peers() .into_iter() @@ -266,7 +269,7 @@ fn spy( )); i += 1; } - (met_criteria, now.elapsed().as_secs(), all_peers, tvu_peers) + (met_criteria, now.elapsed(), all_peers, tvu_peers) } /// Makes a spy or gossip node based on whether or not a gossip_addr was passed in @@ -329,6 +332,7 @@ mod tests { #[test] fn test_gossip_services_spy() { + const TIMEOUT: Duration = Duration::from_secs(5); let keypair = Keypair::new(); let peer0 = solana_sdk::pubkey::new_rand(); let peer1 = solana_sdk::pubkey::new_rand(); @@ -341,52 +345,57 @@ mod tests { let spy_ref = Arc::new(cluster_info); - let (met_criteria, secs, _, tvu_peers) = spy(spy_ref.clone(), None, Some(1), None, None); - assert_eq!(met_criteria, false); - assert_eq!(secs, 1); + let (met_criteria, elapsed, _, tvu_peers) = spy(spy_ref.clone(), None, TIMEOUT, None, None); + assert!(!met_criteria); + assert!((TIMEOUT..TIMEOUT + Duration::from_secs(1)).contains(&elapsed)); assert_eq!(tvu_peers, spy_ref.tvu_peers()); // Find num_nodes - let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(1), None, None, None); - assert_eq!(met_criteria, true); - let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(2), None, None, None); - assert_eq!(met_criteria, true); + let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(1), TIMEOUT, None, None); + assert!(met_criteria); + let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(2), TIMEOUT, None, None); + assert!(met_criteria); // Find specific node by pubkey - let (met_criteria, _, _, _) = spy(spy_ref.clone(), None, None, Some(peer0), None); - assert_eq!(met_criteria, true); + let (met_criteria, _, _, _) = spy(spy_ref.clone(), None, TIMEOUT, Some(peer0), None); + assert!(met_criteria); let (met_criteria, _, _, _) = spy( spy_ref.clone(), None, - Some(0), + TIMEOUT, Some(solana_sdk::pubkey::new_rand()), None, ); assert_eq!(met_criteria, false); // Find num_nodes *and* specific node by pubkey - let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(1), None, Some(peer0), None); - assert_eq!(met_criteria, true); - let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(3), Some(0), Some(peer0), None); - assert_eq!(met_criteria, false); + let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(1), TIMEOUT, Some(peer0), None); + assert!(met_criteria); + let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(3), TIMEOUT, Some(peer0), None); + assert!(!met_criteria); let (met_criteria, _, _, _) = spy( spy_ref.clone(), Some(1), - Some(0), + TIMEOUT, Some(solana_sdk::pubkey::new_rand()), None, ); assert_eq!(met_criteria, false); // Find specific node by gossip address - let (met_criteria, _, _, _) = - spy(spy_ref.clone(), None, None, None, Some(&peer0_info.gossip)); - assert_eq!(met_criteria, true); + let (met_criteria, _, _, _) = spy( + spy_ref.clone(), + None, + TIMEOUT, + None, + Some(&peer0_info.gossip), + ); + assert!(met_criteria); let (met_criteria, _, _, _) = spy( spy_ref, None, - Some(0), + TIMEOUT, None, Some(&"1.1.1.1:1234".parse().unwrap()), ); diff --git a/core/src/heaviest_subtree_fork_choice.rs b/core/src/heaviest_subtree_fork_choice.rs index 236980e973..538ed432c5 100644 --- a/core/src/heaviest_subtree_fork_choice.rs +++ b/core/src/heaviest_subtree_fork_choice.rs @@ -1,17 +1,19 @@ use crate::{ - consensus::{ComputedBankState, Tower}, - fork_choice::ForkChoice, - progress_map::ProgressMap, - tree_diff::TreeDiff, + consensus::Tower, fork_choice::ForkChoice, + latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks, + progress_map::ProgressMap, tree_diff::TreeDiff, }; +use solana_measure::measure::Measure; use solana_runtime::{bank::Bank, bank_forks::BankForks, epoch_stakes::EpochStakes}; use solana_sdk::{ clock::{Epoch, Slot}, epoch_schedule::EpochSchedule, + hash::Hash, pubkey::Pubkey, }; use std::{ - collections::{BTreeMap, HashMap, HashSet, VecDeque}, + borrow::Borrow, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet, VecDeque}, sync::{Arc, RwLock}, time::Instant, }; @@ -19,20 +21,41 @@ use std::{ use trees::{Tree, TreeWalk}; pub type ForkWeight = u64; +pub type SlotHashKey = (Slot, Hash); +type UpdateOperations = BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>; + const MAX_ROOT_PRINT_SECONDS: u64 = 30; #[derive(PartialEq, Eq, Clone, Debug, PartialOrd, Ord)] enum UpdateLabel { Aggregate, Add, + MarkValid, Subtract, } +pub trait GetSlotHash { + fn slot_hash(&self) -> SlotHashKey; +} + +impl GetSlotHash for SlotHashKey { + fn slot_hash(&self) -> SlotHashKey { + *self + } +} + +impl GetSlotHash for Slot { + fn slot_hash(&self) -> SlotHashKey { + (*self, Hash::default()) + } +} + #[derive(PartialEq, Eq, Clone, Debug)] enum UpdateOperation { - Aggregate, Add(u64), + MarkValid, Subtract(u64), + Aggregate, } impl UpdateOperation { @@ -40,6 +63,7 @@ impl UpdateOperation { match self { Self::Aggregate => panic!("Should not get here"), Self::Add(stake) => *stake += new_stake, + Self::MarkValid => panic!("Should not get here"), Self::Subtract(stake) => *stake += new_stake, } } @@ -53,20 +77,23 @@ struct ForkInfo { stake_voted_subtree: ForkWeight, // Best slot in the subtree rooted at this slot, does not // have to be a direct child in `children` - best_slot: Slot, - parent: Option, - children: Vec, + best_slot: SlotHashKey, + parent: Option, + children: Vec, + // Whether the fork rooted at this slot is a valid contender + // for the best fork + is_candidate: bool, } pub struct HeaviestSubtreeForkChoice { - fork_infos: HashMap, - latest_votes: HashMap, - root: Slot, + fork_infos: HashMap, + latest_votes: HashMap, + root: SlotHashKey, last_root_time: Instant, } impl HeaviestSubtreeForkChoice { - pub(crate) fn new(root: Slot) -> Self { + pub(crate) fn new(root: SlotHashKey) -> Self { let mut heaviest_subtree_fork_choice = Self { root, // Doesn't implement default because `root` must @@ -81,17 +108,23 @@ impl HeaviestSubtreeForkChoice { // Given a root and a list of `frozen_banks` sorted smallest to greatest by slot, // return a new HeaviestSubtreeForkChoice - pub(crate) fn new_from_frozen_banks(root: Slot, frozen_banks: &[Arc]) -> Self { + pub(crate) fn new_from_frozen_banks(root: SlotHashKey, frozen_banks: &[Arc]) -> Self { let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new(root); - let mut prev_slot = root; + let mut prev_slot = root.0; for bank in frozen_banks.iter() { assert!(bank.is_frozen()); - if bank.slot() > root { + if bank.slot() > root.0 { // Make sure the list is sorted assert!(bank.slot() > prev_slot); prev_slot = bank.slot(); - heaviest_subtree_fork_choice - .add_new_leaf_slot(bank.slot(), Some(bank.parent_slot())); + let bank_hash = bank.hash(); + assert_ne!(bank_hash, Hash::default()); + let parent_bank_hash = bank.parent_hash(); + assert_ne!(parent_bank_hash, Hash::default()); + heaviest_subtree_fork_choice.add_new_leaf_slot( + (bank.slot(), bank_hash), + Some((bank.parent_slot(), parent_bank_hash)), + ); } } @@ -103,52 +136,65 @@ impl HeaviestSubtreeForkChoice { let mut frozen_banks: Vec<_> = bank_forks.frozen_banks().values().cloned().collect(); frozen_banks.sort_by_key(|bank| bank.slot()); - let root = bank_forks.root(); - Self::new_from_frozen_banks(root, &frozen_banks) + let root_bank = bank_forks.root_bank(); + Self::new_from_frozen_banks((root_bank.slot(), root_bank.hash()), &frozen_banks) } #[cfg(test)] - pub(crate) fn new_from_tree(forks: Tree) -> Self { - let root = forks.root().data; + pub(crate) fn new_from_tree(forks: Tree) -> Self { + let root = forks.root().data.slot_hash(); let mut walk = TreeWalk::from(forks); let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new(root); while let Some(visit) = walk.get() { - let slot = visit.node().data; - if heaviest_subtree_fork_choice.fork_infos.contains_key(&slot) { + let slot_hash = visit.node().data.slot_hash(); + if heaviest_subtree_fork_choice + .fork_infos + .contains_key(&slot_hash) + { walk.forward(); continue; } - let parent = walk.get_parent().map(|n| n.data); - heaviest_subtree_fork_choice.add_new_leaf_slot(slot, parent); + let parent_slot_hash = walk.get_parent().map(|n| n.data.slot_hash()); + heaviest_subtree_fork_choice.add_new_leaf_slot(slot_hash, parent_slot_hash); walk.forward(); } heaviest_subtree_fork_choice } - pub fn best_slot(&self, slot: Slot) -> Option { + pub fn contains_block(&self, key: &SlotHashKey) -> bool { + self.fork_infos.contains_key(key) + } + + pub fn best_slot(&self, key: &SlotHashKey) -> Option { self.fork_infos - .get(&slot) + .get(key) .map(|fork_info| fork_info.best_slot) } - pub fn best_overall_slot(&self) -> Slot { - self.best_slot(self.root).unwrap() + pub fn best_overall_slot(&self) -> SlotHashKey { + self.best_slot(&self.root).unwrap() } - pub fn stake_voted_subtree(&self, slot: Slot) -> Option { + pub fn stake_voted_subtree(&self, key: &SlotHashKey) -> Option { self.fork_infos - .get(&slot) + .get(key) .map(|fork_info| fork_info.stake_voted_subtree) } - pub fn root(&self) -> Slot { + pub fn is_candidate_slot(&self, key: &SlotHashKey) -> Option { + self.fork_infos + .get(key) + .map(|fork_info| fork_info.is_candidate) + } + + pub fn root(&self) -> SlotHashKey { self.root } - pub fn max_by_weight(&self, slot1: Slot, slot2: Slot) -> std::cmp::Ordering { - let weight1 = self.stake_voted_subtree(slot1).unwrap(); - let weight2 = self.stake_voted_subtree(slot2).unwrap(); + pub fn max_by_weight(&self, slot1: SlotHashKey, slot2: SlotHashKey) -> std::cmp::Ordering { + let weight1 = self.stake_voted_subtree(&slot1).unwrap(); + let weight2 = self.stake_voted_subtree(&slot2).unwrap(); if weight1 == weight2 { slot1.cmp(&slot2).reverse() } else { @@ -157,41 +203,42 @@ impl HeaviestSubtreeForkChoice { } // Add new votes, returns the best slot - pub fn add_votes( - &mut self, + pub fn add_votes<'a, 'b>( + &'a mut self, // newly updated votes on a fork - pubkey_votes: &[(Pubkey, Slot)], + pubkey_votes: impl Iterator + 'b>, epoch_stakes: &HashMap, epoch_schedule: &EpochSchedule, - ) -> Slot { + ) -> SlotHashKey { // Generate the set of updates - let update_operations = + let update_operations_batch = self.generate_update_operations(pubkey_votes, epoch_stakes, epoch_schedule); // Finalize all updates - self.process_update_operations(update_operations); + self.process_update_operations(update_operations_batch); self.best_overall_slot() } - pub fn set_root(&mut self, new_root: Slot) { + pub fn set_root(&mut self, new_root: SlotHashKey) { // Remove everything reachable from `self.root` but not `new_root`, // as those are now unrooted. let remove_set = self.subtree_diff(self.root, new_root); - for slot in remove_set { + for node_key in remove_set { self.fork_infos - .remove(&slot) + .remove(&node_key) .expect("Slots reachable from old root must exist in tree"); } - self.fork_infos - .get_mut(&new_root) - .expect("new root must exist in fork_infos map") + let root_fork_info = self.fork_infos.get_mut(&new_root); + + root_fork_info + .unwrap_or_else(|| panic!("New root: {:?}, didn't exist in fork choice", new_root)) .parent = None; self.root = new_root; self.last_root_time = Instant::now(); } - pub fn add_root_parent(&mut self, root_parent: Slot) { - assert!(root_parent < self.root); + pub fn add_root_parent(&mut self, root_parent: SlotHashKey) { + assert!(root_parent.0 < self.root.0); assert!(self.fork_infos.get(&root_parent).is_none()); let root_info = self .fork_infos @@ -205,17 +252,24 @@ impl HeaviestSubtreeForkChoice { best_slot: root_info.best_slot, children: vec![self.root], parent: None, + is_candidate: true, }; self.fork_infos.insert(root_parent, root_parent_info); self.root = root_parent; } - pub fn add_new_leaf_slot(&mut self, slot: Slot, parent: Option) { + pub fn add_new_leaf_slot(&mut self, slot: SlotHashKey, parent: Option) { if self.last_root_time.elapsed().as_secs() > MAX_ROOT_PRINT_SECONDS { self.print_state(); self.last_root_time = Instant::now(); } + if self.fork_infos.contains_key(&slot) { + // Can potentially happen if we repair the same version of the duplicate slot, after + // dumping the original version + return; + } + self.fork_infos .entry(slot) .and_modify(|slot_info| slot_info.parent = parent) @@ -226,6 +280,7 @@ impl HeaviestSubtreeForkChoice { best_slot: slot, children: vec![], parent, + is_candidate: true, }); if parent.is_none() { @@ -243,24 +298,33 @@ impl HeaviestSubtreeForkChoice { // Propagate leaf up the tree to any ancestors who considered the previous leaf // the `best_slot` - self.propagate_new_leaf(slot, parent) + self.propagate_new_leaf(&slot, &parent) } // Returns if the given `maybe_best_child` is the heaviest among the children // it's parent - fn is_best_child(&self, maybe_best_child: Slot) -> bool { + fn is_best_child(&self, maybe_best_child: &SlotHashKey) -> bool { let maybe_best_child_weight = self.stake_voted_subtree(maybe_best_child).unwrap(); let parent = self.parent(maybe_best_child); // If there's no parent, this must be the root if parent.is_none() { return true; } - for child in self.children(parent.unwrap()).unwrap() { + for child in self.children(&parent.unwrap()).unwrap() { let child_weight = self - .stake_voted_subtree(*child) + .stake_voted_subtree(child) .expect("child must exist in `self.fork_infos`"); + + // Don't count children currently marked as invalid + if !self + .is_candidate_slot(child) + .expect("child must exist in tree") + { + continue; + } + if child_weight > maybe_best_child_weight - || (maybe_best_child_weight == child_weight && *child < maybe_best_child) + || (maybe_best_child_weight == child_weight && *child < *maybe_best_child) { return false; } @@ -268,76 +332,72 @@ impl HeaviestSubtreeForkChoice { true } - pub fn all_slots_stake_voted_subtree(&self) -> Vec<(Slot, u64)> { + + pub fn all_slots_stake_voted_subtree(&self) -> impl Iterator { self.fork_infos .iter() - .map(|(slot, fork_info)| (*slot, fork_info.stake_voted_subtree)) - .collect() + .map(|(slot_hash, fork_info)| (slot_hash, fork_info.stake_voted_subtree)) } - pub fn ancestors(&self, start_slot: Slot) -> Vec { - AncestorIterator::new(start_slot, &self.fork_infos).collect() + #[cfg(test)] + pub fn ancestors(&self, start_slot_hash_key: SlotHashKey) -> Vec { + AncestorIterator::new(start_slot_hash_key, &self.fork_infos).collect() } pub fn merge( &mut self, other: HeaviestSubtreeForkChoice, - merge_leaf: Slot, + merge_leaf: &SlotHashKey, epoch_stakes: &HashMap, epoch_schedule: &EpochSchedule, ) { - assert!(self.fork_infos.contains_key(&merge_leaf)); + assert!(self.fork_infos.contains_key(merge_leaf)); // Add all the nodes from `other` into our tree let mut other_slots_nodes: Vec<_> = other .fork_infos .iter() - .map(|(slot, fork_info)| (slot, fork_info.parent.unwrap_or(merge_leaf))) + .map(|(slot_hash_key, fork_info)| { + (slot_hash_key, fork_info.parent.unwrap_or(*merge_leaf)) + }) .collect(); - other_slots_nodes.sort_by_key(|(slot, _)| *slot); - for (slot, parent) in other_slots_nodes { - self.add_new_leaf_slot(*slot, Some(parent)); + other_slots_nodes.sort_by_key(|(slot_hash_key, _)| *slot_hash_key); + for (slot_hash_key, parent) in other_slots_nodes { + self.add_new_leaf_slot(*slot_hash_key, Some(parent)); } - // Add all the latest votes from `other` that are newer than the ones - // in the current tree - let new_votes: Vec<_> = other - .latest_votes - .into_iter() - .filter(|(pk, other_latest_slot)| { - self.latest_votes - .get(&pk) - .map(|latest_slot| other_latest_slot > latest_slot) - .unwrap_or(false) - }) - .collect(); - - self.add_votes(&new_votes, epoch_stakes, epoch_schedule); + // Add all votes, the outdated ones should be filtered out by + // self.add_votes() + self.add_votes(other.latest_votes.into_iter(), epoch_stakes, epoch_schedule); } - pub fn stake_voted_at(&self, slot: Slot) -> Option { + pub fn stake_voted_at(&self, slot: &SlotHashKey) -> Option { self.fork_infos - .get(&slot) + .get(slot) .map(|fork_info| fork_info.stake_voted_at) } - fn propagate_new_leaf(&mut self, slot: Slot, parent: Slot) { - let parent_best_slot = self - .best_slot(parent) + fn propagate_new_leaf( + &mut self, + slot_hash_key: &SlotHashKey, + parent_slot_hash_key: &SlotHashKey, + ) { + let parent_best_slot_hash_key = self + .best_slot(parent_slot_hash_key) .expect("parent must exist in self.fork_infos after its child leaf was created"); // If this new leaf is the direct parent's best child, then propagate // it up the tree - if self.is_best_child(slot) { - let mut ancestor = Some(parent); + if self.is_best_child(slot_hash_key) { + let mut ancestor = Some(*parent_slot_hash_key); loop { if ancestor.is_none() { break; } let ancestor_fork_info = self.fork_infos.get_mut(&ancestor.unwrap()).unwrap(); - if ancestor_fork_info.best_slot == parent_best_slot { - ancestor_fork_info.best_slot = slot; + if ancestor_fork_info.best_slot == parent_best_slot_hash_key { + ancestor_fork_info.best_slot = *slot_hash_key; } else { break; } @@ -346,44 +406,93 @@ impl HeaviestSubtreeForkChoice { } } - #[allow(clippy::map_entry)] + fn insert_mark_valid_aggregate_operations( + &self, + update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>, + slot_hash_key: SlotHashKey, + ) { + self.do_insert_aggregate_operations(update_operations, true, slot_hash_key); + } + fn insert_aggregate_operations( &self, - update_operations: &mut BTreeMap<(Slot, UpdateLabel), UpdateOperation>, - slot: Slot, + update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>, + slot_hash_key: SlotHashKey, + ) { + self.do_insert_aggregate_operations(update_operations, false, slot_hash_key); + } + + #[allow(clippy::map_entry)] + fn do_insert_aggregate_operations( + &self, + update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>, + should_mark_valid: bool, + slot_hash_key: SlotHashKey, ) { - for parent in self.ancestor_iterator(slot) { - let label = (parent, UpdateLabel::Aggregate); - if update_operations.contains_key(&label) { + for parent_slot_hash_key in self.ancestor_iterator(slot_hash_key) { + let aggregate_label = (parent_slot_hash_key, UpdateLabel::Aggregate); + if update_operations.contains_key(&aggregate_label) { break; } else { - update_operations.insert(label, UpdateOperation::Aggregate); + if should_mark_valid { + update_operations.insert( + (parent_slot_hash_key, UpdateLabel::MarkValid), + UpdateOperation::MarkValid, + ); + } + update_operations.insert(aggregate_label, UpdateOperation::Aggregate); } } } - fn ancestor_iterator(&self, start_slot: Slot) -> AncestorIterator { - AncestorIterator::new(start_slot, &self.fork_infos) + fn ancestor_iterator(&self, start_slot_hash_key: SlotHashKey) -> AncestorIterator { + AncestorIterator::new(start_slot_hash_key, &self.fork_infos) } - fn aggregate_slot(&mut self, slot: Slot) { + fn aggregate_slot(&mut self, slot_hash_key: SlotHashKey) { let mut stake_voted_subtree; - let mut best_slot = slot; - if let Some(fork_info) = self.fork_infos.get(&slot) { + let mut best_slot_hash_key = slot_hash_key; + if let Some(fork_info) = self.fork_infos.get(&slot_hash_key) { stake_voted_subtree = fork_info.stake_voted_at; let mut best_child_stake_voted_subtree = 0; - let mut best_child_slot = slot; - for &child in &fork_info.children { + let mut best_child_slot = slot_hash_key; + for child in &fork_info.children { let child_stake_voted_subtree = self.stake_voted_subtree(child).unwrap(); + + // Child forks that are not candidates still contribute to the weight + // of the subtree rooted at `slot_hash_key`. For instance: + /* + Build fork structure: + slot 0 + | + slot 1 + / \ + slot 2 | + | slot 3 (34%) + slot 4 (66%) + + If slot 4 is a duplicate slot, so no longer qualifies as a candidate until + the slot is confirmed, the weight of votes on slot 4 should still count towards + slot 2, otherwise we might pick slot 3 as the heaviest fork to build blocks on + instead of slot 2. + */ + + // See comment above for why this check is outside of the `is_candidate` check. stake_voted_subtree += child_stake_voted_subtree; - if best_child_slot == slot || - child_stake_voted_subtree > best_child_stake_voted_subtree || - // tiebreaker by slot height, prioritize earlier slot - (child_stake_voted_subtree == best_child_stake_voted_subtree && child < best_child_slot) + + // Note: If there's no valid children, then the best slot should default to the + // input `slot` itself. + if self + .is_candidate_slot(child) + .expect("Child must exist in fork_info map") + && (best_child_slot == slot_hash_key || + child_stake_voted_subtree > best_child_stake_voted_subtree || + // tiebreaker by slot height, prioritize earlier slot + (child_stake_voted_subtree == best_child_stake_voted_subtree && child < &best_child_slot)) { best_child_stake_voted_subtree = child_stake_voted_subtree; - best_child_slot = child; - best_slot = self + best_child_slot = *child; + best_slot_hash_key = self .best_slot(child) .expect("`child` must exist in `self.fork_infos`"); } @@ -392,112 +501,192 @@ impl HeaviestSubtreeForkChoice { return; } - let fork_info = self.fork_infos.get_mut(&slot).unwrap(); + let fork_info = self.fork_infos.get_mut(&slot_hash_key).unwrap(); fork_info.stake_voted_subtree = stake_voted_subtree; - fork_info.best_slot = best_slot; + fork_info.best_slot = best_slot_hash_key; } - fn generate_update_operations( - &mut self, - pubkey_votes: &[(Pubkey, Slot)], + fn mark_slot_valid(&mut self, valid_slot_hash_key: (Slot, Hash)) { + if let Some(fork_info) = self.fork_infos.get_mut(&valid_slot_hash_key) { + if !fork_info.is_candidate { + info!( + "marked previously invalid fork starting at slot: {:?} as valid", + valid_slot_hash_key + ); + } + fork_info.is_candidate = true; + } + } + + fn generate_update_operations<'a, 'b>( + &'a mut self, + pubkey_votes: impl Iterator + 'b>, epoch_stakes: &HashMap, epoch_schedule: &EpochSchedule, - ) -> BTreeMap<(Slot, UpdateLabel), UpdateOperation> { - let mut update_operations: BTreeMap<(Slot, UpdateLabel), UpdateOperation> = BTreeMap::new(); - + ) -> UpdateOperations { + let mut update_operations: BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation> = + BTreeMap::new(); + let mut observed_pubkeys: HashMap = HashMap::new(); // Sort the `pubkey_votes` in a BTreeMap by the slot voted - for &(pubkey, new_vote_slot) in pubkey_votes.iter() { - let pubkey_latest_vote = self.latest_votes.get(&pubkey).unwrap_or(&0); - // Filter out any votes or slots <= any slot this pubkey has - // already voted for, we only care about the latest votes - if new_vote_slot <= *pubkey_latest_vote { + for pubkey_vote in pubkey_votes { + let (pubkey, new_vote_slot_hash) = pubkey_vote.borrow(); + let (new_vote_slot, new_vote_hash) = *new_vote_slot_hash; + if new_vote_slot < self.root.0 { + // If the new vote is less than the root we can ignore it. This is because there + // are two cases. Either: + // 1) The validator's latest vote was bigger than the new vote, so we can ignore it + // 2) The validator's latest vote was less than the new vote, then the validator's latest + // vote was also less than root. This means either every node in the current tree has the + // validators stake counted toward it (if the latest vote was an ancestor of the current root), + // OR every node doesn't have this validator's vote counting toward it (if the latest vote + // was not an ancestor of the current root). Thus this validator is essentially a no-op + // and won't affect fork choice. continue; } - // Remove this pubkey stake from previous fork - if let Some(old_latest_vote_slot) = self.latest_votes.insert(pubkey, new_vote_slot) { - let epoch = epoch_schedule.get_epoch(old_latest_vote_slot); - let stake_update = epoch_stakes - .get(&epoch) - .map(|epoch_stakes| epoch_stakes.vote_account_stake(&pubkey)) - .unwrap_or(0); - if stake_update > 0 { - update_operations - .entry((old_latest_vote_slot, UpdateLabel::Subtract)) - .and_modify(|update| update.update_stake(stake_update)) - .or_insert(UpdateOperation::Subtract(stake_update)); - self.insert_aggregate_operations(&mut update_operations, old_latest_vote_slot); + // A pubkey cannot appear in pubkey votes more than once. + match observed_pubkeys.entry(*pubkey) { + Entry::Occupied(_occupied_entry) => { + panic!("Should not get multiple votes for same pubkey in the same batch"); + } + Entry::Vacant(vacant_entry) => { + vacant_entry.insert(new_vote_slot); + false + } + }; + + let mut pubkey_latest_vote = self.latest_votes.get_mut(pubkey); + + // Filter out any votes or slots < any slot this pubkey has + // already voted for, we only care about the latest votes. + // + // If the new vote is for the same slot, but a different, smaller hash, + // then allow processing to continue as this is a duplicate version + // of the same slot. + match pubkey_latest_vote.as_mut() { + Some((pubkey_latest_vote_slot, pubkey_latest_vote_hash)) + if (new_vote_slot < *pubkey_latest_vote_slot) + || (new_vote_slot == *pubkey_latest_vote_slot + && &new_vote_hash >= pubkey_latest_vote_hash) => + { + continue; + } + + _ => { + // We either: + // 1) don't have a vote yet for this pubkey, + // 2) or the new vote slot is bigger than the old vote slot + // 3) or the new vote slot == old_vote slot, but for a smaller bank hash. + // In all above cases, we need to remove this pubkey stake from the previous fork + // of the previous vote + + if let Some((old_latest_vote_slot, old_latest_vote_hash)) = + self.latest_votes.insert(*pubkey, *new_vote_slot_hash) + { + assert!(if new_vote_slot == old_latest_vote_slot { + warn!( + "Got a duplicate vote for + validator: {}, + slot_hash: {:?}", + pubkey, new_vote_slot_hash + ); + // If the slots are equal, then the new + // vote must be for a smaller hash + new_vote_hash < old_latest_vote_hash + } else { + new_vote_slot > old_latest_vote_slot + }); + + let epoch = epoch_schedule.get_epoch(old_latest_vote_slot); + let stake_update = epoch_stakes + .get(&epoch) + .map(|epoch_stakes| epoch_stakes.vote_account_stake(pubkey)) + .unwrap_or(0); + + if stake_update > 0 { + update_operations + .entry(( + (old_latest_vote_slot, old_latest_vote_hash), + UpdateLabel::Subtract, + )) + .and_modify(|update| update.update_stake(stake_update)) + .or_insert(UpdateOperation::Subtract(stake_update)); + self.insert_aggregate_operations( + &mut update_operations, + (old_latest_vote_slot, old_latest_vote_hash), + ); + } + } } } // Add this pubkey stake to new fork - let epoch = epoch_schedule.get_epoch(new_vote_slot); + let epoch = epoch_schedule.get_epoch(new_vote_slot_hash.0); let stake_update = epoch_stakes .get(&epoch) .map(|epoch_stakes| epoch_stakes.vote_account_stake(&pubkey)) .unwrap_or(0); + update_operations - .entry((new_vote_slot, UpdateLabel::Add)) + .entry((*new_vote_slot_hash, UpdateLabel::Add)) .and_modify(|update| update.update_stake(stake_update)) .or_insert(UpdateOperation::Add(stake_update)); - self.insert_aggregate_operations(&mut update_operations, new_vote_slot); + self.insert_aggregate_operations(&mut update_operations, *new_vote_slot_hash); } update_operations } - fn process_update_operations( - &mut self, - update_operations: BTreeMap<(Slot, UpdateLabel), UpdateOperation>, - ) { + fn process_update_operations(&mut self, update_operations: UpdateOperations) { // Iterate through the update operations from greatest to smallest slot - for ((slot, _), operation) in update_operations.into_iter().rev() { + for ((slot_hash_key, _), operation) in update_operations.into_iter().rev() { match operation { - UpdateOperation::Aggregate => self.aggregate_slot(slot), - UpdateOperation::Add(stake) => self.add_slot_stake(slot, stake), - UpdateOperation::Subtract(stake) => self.subtract_slot_stake(slot, stake), + UpdateOperation::MarkValid => self.mark_slot_valid(slot_hash_key), + UpdateOperation::Aggregate => self.aggregate_slot(slot_hash_key), + UpdateOperation::Add(stake) => self.add_slot_stake(&slot_hash_key, stake), + UpdateOperation::Subtract(stake) => self.subtract_slot_stake(&slot_hash_key, stake), } } } - fn add_slot_stake(&mut self, slot: Slot, stake: u64) { - if let Some(fork_info) = self.fork_infos.get_mut(&slot) { + fn add_slot_stake(&mut self, slot_hash_key: &SlotHashKey, stake: u64) { + if let Some(fork_info) = self.fork_infos.get_mut(slot_hash_key) { fork_info.stake_voted_at += stake; fork_info.stake_voted_subtree += stake; } } - fn subtract_slot_stake(&mut self, slot: Slot, stake: u64) { - if let Some(fork_info) = self.fork_infos.get_mut(&slot) { + fn subtract_slot_stake(&mut self, slot_hash_key: &SlotHashKey, stake: u64) { + if let Some(fork_info) = self.fork_infos.get_mut(slot_hash_key) { fork_info.stake_voted_at -= stake; fork_info.stake_voted_subtree -= stake; } } - fn parent(&self, slot: Slot) -> Option { + fn parent(&self, slot_hash_key: &SlotHashKey) -> Option { self.fork_infos - .get(&slot) + .get(slot_hash_key) .map(|fork_info| fork_info.parent) .unwrap_or(None) } fn print_state(&self) { - let best_slot = self.best_overall_slot(); - let mut best_path: VecDeque<_> = self.ancestor_iterator(best_slot).collect(); - best_path.push_front(best_slot); + let best_slot_hash_key = self.best_overall_slot(); + let mut best_path: VecDeque<_> = self.ancestor_iterator(best_slot_hash_key).collect(); + best_path.push_front(best_slot_hash_key); info!( "Latest known votes by vote pubkey: {:#?}, best path: {:?}", self.latest_votes, - best_path.iter().rev().collect::>() + best_path.iter().rev().collect::>() ); } - fn heaviest_slot_on_same_voted_fork(&self, tower: &Tower) -> Option { + fn heaviest_slot_on_same_voted_fork(&self, tower: &Tower) -> Option { tower - .last_voted_slot() - .map(|last_voted_slot| { - let heaviest_slot_on_same_voted_fork = self.best_slot(last_voted_slot); - if heaviest_slot_on_same_voted_fork.is_none() { + .last_voted_slot_hash() + .and_then(|last_voted_slot_hash| { + let heaviest_slot_hash_on_same_voted_fork = self.best_slot(&last_voted_slot_hash); + if heaviest_slot_hash_on_same_voted_fork.is_none() { if !tower.is_stray_last_vote() { // Unless last vote is stray and stale, self.bast_slot(last_voted_slot) must return // Some(_), justifying to panic! here. @@ -508,9 +697,9 @@ impl HeaviestSubtreeForkChoice { // validator has been running, so we must be able to fetch best_slots for all of // them. panic!( - "a bank at last_voted_slot({}) is a frozen bank so must have been \ + "a bank at last_voted_slot({:?}) is a frozen bank so must have been \ added to heaviest_subtree_fork_choice at time of freezing", - last_voted_slot, + last_voted_slot_hash, ) } else { // fork_infos doesn't have corresponding data for the stale stray last vote, @@ -519,61 +708,73 @@ impl HeaviestSubtreeForkChoice { return None; } } - let heaviest_slot_on_same_voted_fork = heaviest_slot_on_same_voted_fork.unwrap(); + let heaviest_slot_hash_on_same_voted_fork = + heaviest_slot_hash_on_same_voted_fork.unwrap(); - if heaviest_slot_on_same_voted_fork == last_voted_slot { + if heaviest_slot_hash_on_same_voted_fork == last_voted_slot_hash { None } else { - Some(heaviest_slot_on_same_voted_fork) + Some(heaviest_slot_hash_on_same_voted_fork) } }) - .unwrap_or(None) } #[cfg(test)] - fn set_stake_voted_at(&mut self, slot: Slot, stake_voted_at: u64) { - self.fork_infos.get_mut(&slot).unwrap().stake_voted_at = stake_voted_at; + fn set_stake_voted_at(&mut self, slot_hash_key: SlotHashKey, stake_voted_at: u64) { + self.fork_infos + .get_mut(&slot_hash_key) + .unwrap() + .stake_voted_at = stake_voted_at; } #[cfg(test)] - fn is_leaf(&self, slot: Slot) -> bool { - self.fork_infos.get(&slot).unwrap().children.is_empty() + fn is_leaf(&self, slot_hash_key: SlotHashKey) -> bool { + self.fork_infos + .get(&slot_hash_key) + .unwrap() + .children + .is_empty() } } impl TreeDiff for HeaviestSubtreeForkChoice { - fn contains_slot(&self, slot: Slot) -> bool { - self.fork_infos.contains_key(&slot) + type TreeKey = SlotHashKey; + fn contains_slot(&self, slot_hash_key: &SlotHashKey) -> bool { + self.fork_infos.contains_key(slot_hash_key) } - fn children(&self, slot: Slot) -> Option<&[Slot]> { + fn children(&self, slot_hash_key: &SlotHashKey) -> Option<&[SlotHashKey]> { self.fork_infos - .get(&slot) + .get(&slot_hash_key) .map(|fork_info| &fork_info.children[..]) } } impl ForkChoice for HeaviestSubtreeForkChoice { + type ForkChoiceKey = SlotHashKey; fn compute_bank_stats( &mut self, bank: &Bank, _tower: &Tower, - _progress: &mut ProgressMap, - computed_bank_state: &ComputedBankState, + latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, ) { - let ComputedBankState { pubkey_votes, .. } = computed_bank_state; - + let mut start = Measure::start("compute_bank_stats_time"); // Update `heaviest_subtree_fork_choice` to find the best fork to build on - let best_overall_slot = self.add_votes( - &pubkey_votes, + let root = self.root.0; + let new_votes = latest_validator_votes_for_frozen_banks.take_votes_dirty_set(root); + let (best_overall_slot, best_overall_hash) = self.add_votes( + new_votes.into_iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); + start.stop(); datapoint_info!( - "best_slot", - ("slot", bank.slot(), i64), - ("best_slot", best_overall_slot, i64), + "compute_bank_stats-best_slot", + ("computed_slot", bank.slot(), i64), + ("overall_best_slot", best_overall_slot, i64), + ("overall_best_hash", best_overall_hash.to_string(), String), + ("elapsed", start.as_us(), i64), ); } @@ -590,47 +791,88 @@ impl ForkChoice for HeaviestSubtreeForkChoice { bank_forks: &RwLock, ) -> (Arc, Option>) { let r_bank_forks = bank_forks.read().unwrap(); - ( - r_bank_forks.get(self.best_overall_slot()).unwrap().clone(), + // BankForks should only contain one valid version of this slot + r_bank_forks + .get_with_checked_hash(self.best_overall_slot()) + .unwrap() + .clone(), self.heaviest_slot_on_same_voted_fork(tower) - .map(|heaviest_slot_on_same_voted_fork| { + .map(|slot_hash| { + // BankForks should only contain one valid version of this slot r_bank_forks - .get(heaviest_slot_on_same_voted_fork) + .get_with_checked_hash(slot_hash) .unwrap() .clone() }), ) } + + fn mark_fork_invalid_candidate(&mut self, invalid_slot_hash_key: &SlotHashKey) { + info!( + "marking fork starting at slot: {:?} invalid candidate", + invalid_slot_hash_key + ); + let fork_info = self.fork_infos.get_mut(invalid_slot_hash_key); + if let Some(fork_info) = fork_info { + if fork_info.is_candidate { + fork_info.is_candidate = false; + // Aggregate to find the new best slots excluding this fork + let mut update_operations = UpdateOperations::default(); + self.insert_aggregate_operations(&mut update_operations, *invalid_slot_hash_key); + self.process_update_operations(update_operations); + } + } + } + + fn mark_fork_valid_candidate(&mut self, valid_slot_hash_key: &SlotHashKey) { + let mut update_operations = UpdateOperations::default(); + let fork_info = self.fork_infos.get_mut(valid_slot_hash_key); + if let Some(fork_info) = fork_info { + // If a bunch of slots on the same fork are confirmed at once, then only the latest + // slot will incur this aggregation operation + fork_info.is_candidate = true; + self.insert_mark_valid_aggregate_operations( + &mut update_operations, + *valid_slot_hash_key, + ); + } + + // Aggregate to find the new best slots including this fork + self.process_update_operations(update_operations); + } } struct AncestorIterator<'a> { - current_slot: Slot, - fork_infos: &'a HashMap, + current_slot_hash_key: SlotHashKey, + fork_infos: &'a HashMap, } impl<'a> AncestorIterator<'a> { - fn new(start_slot: Slot, fork_infos: &'a HashMap) -> Self { + fn new( + start_slot_hash_key: SlotHashKey, + fork_infos: &'a HashMap, + ) -> Self { Self { - current_slot: start_slot, + current_slot_hash_key: start_slot_hash_key, fork_infos, } } } impl<'a> Iterator for AncestorIterator<'a> { - type Item = Slot; + type Item = SlotHashKey; fn next(&mut self) -> Option { - let parent = self + let parent_slot_hash_key = self .fork_infos - .get(&self.current_slot) + .get(&self.current_slot_hash_key) .map(|fork_info| fork_info.parent) .unwrap_or(None); - parent - .map(|parent| { - self.current_slot = parent; - Some(self.current_slot) + parent_slot_hash_key + .map(|parent_slot_hash_key| { + self.current_slot_hash_key = parent_slot_hash_key; + Some(self.current_slot_hash_key) }) .unwrap_or(None) } @@ -661,17 +903,17 @@ mod test { let stake = 100; let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(1, stake); heaviest_subtree_fork_choice.add_votes( - &[(vote_pubkeys[0], 4)], + [(vote_pubkeys[0], (4, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); assert_eq!( - heaviest_subtree_fork_choice.max_by_weight(4, 5), + heaviest_subtree_fork_choice.max_by_weight((4, Hash::default()), (5, Hash::default())), std::cmp::Ordering::Greater ); assert_eq!( - heaviest_subtree_fork_choice.max_by_weight(4, 0), + heaviest_subtree_fork_choice.max_by_weight((4, Hash::default()), (0, Hash::default())), std::cmp::Ordering::Less ); } @@ -691,41 +933,98 @@ mod test { let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(1, stake); let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks); heaviest_subtree_fork_choice.add_votes( - &[(vote_pubkeys[0], 5)], + [(vote_pubkeys[0], (5, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); - heaviest_subtree_fork_choice.add_root_parent(2); - assert_eq!(heaviest_subtree_fork_choice.parent(3).unwrap(), 2); + heaviest_subtree_fork_choice.add_root_parent((2, Hash::default())); + assert_eq!( + heaviest_subtree_fork_choice + .parent(&(3, Hash::default())) + .unwrap(), + (2, Hash::default()) + ); assert_eq!( - heaviest_subtree_fork_choice.stake_voted_subtree(3).unwrap(), + heaviest_subtree_fork_choice + .stake_voted_subtree(&(3, Hash::default())) + .unwrap(), stake ); - assert_eq!(heaviest_subtree_fork_choice.stake_voted_at(2).unwrap(), 0); assert_eq!( - heaviest_subtree_fork_choice.children(2).unwrap().to_vec(), - vec![3] + heaviest_subtree_fork_choice + .stake_voted_at(&(2, Hash::default())) + .unwrap(), + 0 + ); + assert_eq!( + heaviest_subtree_fork_choice + .children(&(2, Hash::default())) + .unwrap() + .to_vec(), + vec![(3, Hash::default())] ); - assert_eq!(heaviest_subtree_fork_choice.best_slot(2).unwrap(), 5); - assert!(heaviest_subtree_fork_choice.parent(2).is_none()); + assert_eq!( + heaviest_subtree_fork_choice + .best_slot(&(2, Hash::default())) + .unwrap() + .0, + 5 + ); + assert!(heaviest_subtree_fork_choice + .parent(&(2, Hash::default())) + .is_none()); } #[test] fn test_ancestor_iterator() { let mut heaviest_subtree_fork_choice = setup_forks(); - let parents: Vec<_> = heaviest_subtree_fork_choice.ancestor_iterator(6).collect(); - assert_eq!(parents, vec![5, 3, 1, 0]); - let parents: Vec<_> = heaviest_subtree_fork_choice.ancestor_iterator(4).collect(); - assert_eq!(parents, vec![2, 1, 0]); - let parents: Vec<_> = heaviest_subtree_fork_choice.ancestor_iterator(1).collect(); - assert_eq!(parents, vec![0]); - let parents: Vec<_> = heaviest_subtree_fork_choice.ancestor_iterator(0).collect(); + let parents: Vec<_> = heaviest_subtree_fork_choice + .ancestor_iterator((6, Hash::default())) + .collect(); + assert_eq!( + parents, + vec![5, 3, 1, 0] + .into_iter() + .map(|s| (s, Hash::default())) + .collect::>() + ); + let parents: Vec<_> = heaviest_subtree_fork_choice + .ancestor_iterator((4, Hash::default())) + .collect(); + assert_eq!( + parents, + vec![2, 1, 0] + .into_iter() + .map(|s| (s, Hash::default())) + .collect::>() + ); + let parents: Vec<_> = heaviest_subtree_fork_choice + .ancestor_iterator((1, Hash::default())) + .collect(); + assert_eq!( + parents, + vec![0] + .into_iter() + .map(|s| (s, Hash::default())) + .collect::>() + ); + let parents: Vec<_> = heaviest_subtree_fork_choice + .ancestor_iterator((0, Hash::default())) + .collect(); assert!(parents.is_empty()); // Set a root, everything but slots 2, 4 should be removed - heaviest_subtree_fork_choice.set_root(2); - let parents: Vec<_> = heaviest_subtree_fork_choice.ancestor_iterator(4).collect(); - assert_eq!(parents, vec![2]); + heaviest_subtree_fork_choice.set_root((2, Hash::default())); + let parents: Vec<_> = heaviest_subtree_fork_choice + .ancestor_iterator((4, Hash::default())) + .collect(); + assert_eq!( + parents, + vec![2] + .into_iter() + .map(|s| (s, Hash::default())) + .collect::>() + ); } #[test] @@ -753,19 +1052,73 @@ mod test { .collect(); frozen_banks.sort_by_key(|bank| bank.slot()); - let root = bank_forks.read().unwrap().root(); + let root_bank = bank_forks.read().unwrap().root_bank(); + let root = root_bank.slot(); + let root_hash = root_bank.hash(); let heaviest_subtree_fork_choice = - HeaviestSubtreeForkChoice::new_from_frozen_banks(root, &frozen_banks); - assert!(heaviest_subtree_fork_choice.parent(0).is_none()); - assert_eq!(heaviest_subtree_fork_choice.children(0).unwrap(), &[1]); - assert_eq!(heaviest_subtree_fork_choice.parent(1), Some(0)); - assert_eq!(heaviest_subtree_fork_choice.children(1).unwrap(), &[2, 3]); - assert_eq!(heaviest_subtree_fork_choice.parent(2), Some(1)); - assert_eq!(heaviest_subtree_fork_choice.children(2).unwrap(), &[4]); - assert_eq!(heaviest_subtree_fork_choice.parent(3), Some(1)); - assert!(heaviest_subtree_fork_choice.children(3).unwrap().is_empty()); - assert_eq!(heaviest_subtree_fork_choice.parent(4), Some(2)); - assert!(heaviest_subtree_fork_choice.children(4).unwrap().is_empty()); + HeaviestSubtreeForkChoice::new_from_frozen_banks((root, root_hash), &frozen_banks); + + let bank0_hash = bank_forks.read().unwrap().get(0).unwrap().hash(); + assert!(heaviest_subtree_fork_choice + .parent(&(0, bank0_hash)) + .is_none()); + + let bank1_hash = bank_forks.read().unwrap().get(1).unwrap().hash(); + assert_eq!( + heaviest_subtree_fork_choice + .children(&(0, bank0_hash)) + .unwrap(), + &[(1, bank1_hash)] + ); + + assert_eq!( + heaviest_subtree_fork_choice.parent(&(1, bank1_hash)), + Some((0, bank0_hash)) + ); + let bank2_hash = bank_forks.read().unwrap().get(2).unwrap().hash(); + let bank3_hash = bank_forks.read().unwrap().get(3).unwrap().hash(); + assert_eq!( + heaviest_subtree_fork_choice + .children(&(1, bank1_hash)) + .unwrap(), + &[(2, bank2_hash), (3, bank3_hash)] + ); + assert_eq!( + heaviest_subtree_fork_choice.parent(&(2, bank2_hash)), + Some((1, bank1_hash)) + ); + let bank4_hash = bank_forks.read().unwrap().get(4).unwrap().hash(); + assert_eq!( + heaviest_subtree_fork_choice + .children(&(2, bank2_hash)) + .unwrap(), + &[(4, bank4_hash)] + ); + // Check parent and children of invalid hash don't exist + let invalid_hash = Hash::new_unique(); + assert!(heaviest_subtree_fork_choice + .children(&(2, invalid_hash)) + .is_none()); + assert!(heaviest_subtree_fork_choice + .parent(&(2, invalid_hash)) + .is_none()); + + assert_eq!( + heaviest_subtree_fork_choice.parent(&(3, bank3_hash)), + Some((1, bank1_hash)) + ); + assert!(heaviest_subtree_fork_choice + .children(&(3, bank3_hash)) + .unwrap() + .is_empty()); + assert_eq!( + heaviest_subtree_fork_choice.parent(&(4, bank4_hash)), + Some((2, bank2_hash)) + ); + assert!(heaviest_subtree_fork_choice + .children(&(4, bank4_hash)) + .unwrap() + .is_empty()); } #[test] @@ -773,21 +1126,25 @@ mod test { let mut heaviest_subtree_fork_choice = setup_forks(); // Set root to 1, should only purge 0 - heaviest_subtree_fork_choice.set_root(1); + heaviest_subtree_fork_choice.set_root((1, Hash::default())); for i in 0..=6 { let exists = i != 0; assert_eq!( - heaviest_subtree_fork_choice.fork_infos.contains_key(&i), + heaviest_subtree_fork_choice + .fork_infos + .contains_key(&(i, Hash::default())), exists ); } // Set root to 5, should purge everything except 5, 6 - heaviest_subtree_fork_choice.set_root(5); + heaviest_subtree_fork_choice.set_root((5, Hash::default())); for i in 0..=6 { let exists = i == 5 || i == 6; assert_eq!( - heaviest_subtree_fork_choice.fork_infos.contains_key(&i), + heaviest_subtree_fork_choice + .fork_infos + .contains_key(&(i, Hash::default())), exists ); } @@ -801,43 +1158,51 @@ mod test { // Vote for slot 2 heaviest_subtree_fork_choice.add_votes( - &[(vote_pubkeys[0], 1)], + [(vote_pubkeys[0], (1, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 4); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 4); // Set a root - heaviest_subtree_fork_choice.set_root(1); + heaviest_subtree_fork_choice.set_root((1, Hash::default())); // Vote again for slot 3 on a different fork than the last vote, // verify this fork is now the best fork heaviest_subtree_fork_choice.add_votes( - &[(vote_pubkeys[0], 3)], + [(vote_pubkeys[0], (3, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 6); - assert_eq!(heaviest_subtree_fork_choice.stake_voted_at(1).unwrap(), 0); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 6); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_at(&(1, Hash::default())) + .unwrap(), + 0 + ); assert_eq!( - heaviest_subtree_fork_choice.stake_voted_at(3).unwrap(), + heaviest_subtree_fork_choice + .stake_voted_at(&(3, Hash::default())) + .unwrap(), stake ); for slot in &[1, 3] { assert_eq!( heaviest_subtree_fork_choice - .stake_voted_subtree(*slot) + .stake_voted_subtree(&(*slot, Hash::default())) .unwrap(), stake ); } // Set a root at last vote - heaviest_subtree_fork_choice.set_root(3); + heaviest_subtree_fork_choice.set_root((3, Hash::default())); // Check new leaf 7 is still propagated properly - heaviest_subtree_fork_choice.add_new_leaf_slot(7, Some(6)); - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 7); + heaviest_subtree_fork_choice + .add_new_leaf_slot((7, Hash::default()), Some((6, Hash::default()))); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 7); } #[test] @@ -848,70 +1213,90 @@ mod test { // Vote for slot 0 heaviest_subtree_fork_choice.add_votes( - &[(vote_pubkeys[0], 0)], + [(vote_pubkeys[0], (0, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); // Set root to 1, should purge 0 from the tree, but // there's still an outstanding vote for slot 0 in `pubkey_votes`. - heaviest_subtree_fork_choice.set_root(1); + heaviest_subtree_fork_choice.set_root((1, Hash::default())); // Vote again for slot 3, verify everything is ok heaviest_subtree_fork_choice.add_votes( - &[(vote_pubkeys[0], 3)], + [(vote_pubkeys[0], (3, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); assert_eq!( - heaviest_subtree_fork_choice.stake_voted_at(3).unwrap(), + heaviest_subtree_fork_choice + .stake_voted_at(&(3, Hash::default())) + .unwrap(), stake ); for slot in &[1, 3] { assert_eq!( heaviest_subtree_fork_choice - .stake_voted_subtree(*slot) + .stake_voted_subtree(&(*slot, Hash::default())) .unwrap(), stake ); } - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 6); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 6); // Set root again on different fork than the last vote - heaviest_subtree_fork_choice.set_root(2); + heaviest_subtree_fork_choice.set_root((2, Hash::default())); // Smaller vote than last vote 3 should be ignored heaviest_subtree_fork_choice.add_votes( - &[(vote_pubkeys[0], 2)], + [(vote_pubkeys[0], (2, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); - assert_eq!(heaviest_subtree_fork_choice.stake_voted_at(2).unwrap(), 0); assert_eq!( - heaviest_subtree_fork_choice.stake_voted_subtree(2).unwrap(), + heaviest_subtree_fork_choice + .stake_voted_at(&(2, Hash::default())) + .unwrap(), + 0 + ); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(&(2, Hash::default())) + .unwrap(), 0 ); - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 4); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 4); // New larger vote than last vote 3 should be processed heaviest_subtree_fork_choice.add_votes( - &[(vote_pubkeys[0], 4)], + [(vote_pubkeys[0], (4, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); - assert_eq!(heaviest_subtree_fork_choice.stake_voted_at(2).unwrap(), 0); assert_eq!( - heaviest_subtree_fork_choice.stake_voted_at(4).unwrap(), + heaviest_subtree_fork_choice + .stake_voted_at(&(2, Hash::default())) + .unwrap(), + 0 + ); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_at(&(4, Hash::default())) + .unwrap(), stake ); assert_eq!( - heaviest_subtree_fork_choice.stake_voted_subtree(2).unwrap(), + heaviest_subtree_fork_choice + .stake_voted_subtree(&(2, Hash::default())) + .unwrap(), stake ); assert_eq!( - heaviest_subtree_fork_choice.stake_voted_subtree(4).unwrap(), + heaviest_subtree_fork_choice + .stake_voted_subtree(&(4, Hash::default())) + .unwrap(), stake ); - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 4); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 4); } #[test] @@ -919,7 +1304,50 @@ mod test { let heaviest_subtree_fork_choice = setup_forks(); // Best overall path is 0 -> 1 -> 2 -> 4, so best leaf // should be 4 - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 4); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 4); + } + + #[test] + fn test_add_new_leaf_duplicate() { + let ( + mut heaviest_subtree_fork_choice, + duplicate_leaves_descended_from_4, + duplicate_leaves_descended_from_5, + ) = setup_duplicate_forks(); + + // Add a child to one of the duplicates + let duplicate_parent = duplicate_leaves_descended_from_4[0]; + let child = (11, Hash::new_unique()); + heaviest_subtree_fork_choice.add_new_leaf_slot(child, Some(duplicate_parent)); + assert_eq!( + heaviest_subtree_fork_choice + .children(&duplicate_parent) + .unwrap(), + &[child] + ); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), child); + + // All the other duplicates should have no children + for duplicate_leaf in duplicate_leaves_descended_from_5 + .iter() + .chain(std::iter::once(&duplicate_leaves_descended_from_4[1])) + { + assert!(heaviest_subtree_fork_choice + .children(&duplicate_leaf) + .unwrap() + .is_empty(),); + } + + // Re-adding same duplicate slot should not overwrite existing one + heaviest_subtree_fork_choice + .add_new_leaf_slot(duplicate_parent, Some((4, Hash::default()))); + assert_eq!( + heaviest_subtree_fork_choice + .children(&duplicate_parent) + .unwrap(), + &[child] + ); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), child); } #[test] @@ -927,30 +1355,42 @@ mod test { let mut heaviest_subtree_fork_choice = setup_forks(); // Add a leaf 10, it should be the best choice - heaviest_subtree_fork_choice.add_new_leaf_slot(10, Some(4)); + heaviest_subtree_fork_choice + .add_new_leaf_slot((10, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice - .ancestor_iterator(10) + .ancestor_iterator((10, Hash::default())) .collect::>(); - for a in ancestors.into_iter().chain(std::iter::once(10)) { - assert_eq!(heaviest_subtree_fork_choice.best_slot(a).unwrap(), 10); + for a in ancestors + .into_iter() + .chain(std::iter::once((10, Hash::default()))) + { + assert_eq!(heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, 10); } // Add a smaller leaf 9, it should be the best choice - heaviest_subtree_fork_choice.add_new_leaf_slot(9, Some(4)); + heaviest_subtree_fork_choice + .add_new_leaf_slot((9, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice - .ancestor_iterator(9) + .ancestor_iterator((9, Hash::default())) .collect::>(); - for a in ancestors.into_iter().chain(std::iter::once(9)) { - assert_eq!(heaviest_subtree_fork_choice.best_slot(a).unwrap(), 9); + for a in ancestors + .into_iter() + .chain(std::iter::once((9, Hash::default()))) + { + assert_eq!(heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, 9); } // Add a higher leaf 11, should not change the best choice - heaviest_subtree_fork_choice.add_new_leaf_slot(11, Some(4)); + heaviest_subtree_fork_choice + .add_new_leaf_slot((11, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice - .ancestor_iterator(11) + .ancestor_iterator((11, Hash::default())) .collect::>(); - for a in ancestors.into_iter().chain(std::iter::once(9)) { - assert_eq!(heaviest_subtree_fork_choice.best_slot(a).unwrap(), 9); + for a in ancestors + .into_iter() + .chain(std::iter::once((9, Hash::default()))) + { + assert_eq!(heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, 9); } // Add a vote for the other branch at slot 3. @@ -960,7 +1400,7 @@ mod test { // Leaf slot 9 stops being the `best_slot` at slot 1 because there // are now votes for the branch at slot 3 heaviest_subtree_fork_choice.add_votes( - &[(vote_pubkeys[0], leaf6)], + [(vote_pubkeys[0], (leaf6, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); @@ -968,14 +1408,18 @@ mod test { // Because slot 1 now sees the child branch at slot 3 has non-zero // weight, adding smaller leaf slot 8 in the other child branch at slot 2 // should not propagate past slot 1 - heaviest_subtree_fork_choice.add_new_leaf_slot(8, Some(4)); + heaviest_subtree_fork_choice + .add_new_leaf_slot((8, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice - .ancestor_iterator(8) + .ancestor_iterator((8, Hash::default())) .collect::>(); - for a in ancestors.into_iter().chain(std::iter::once(8)) { - let best_slot = if a > 1 { 8 } else { leaf6 }; + for a in ancestors + .into_iter() + .chain(std::iter::once((8, Hash::default()))) + { + let best_slot = if a.0 > 1 { 8 } else { leaf6 }; assert_eq!( - heaviest_subtree_fork_choice.best_slot(a).unwrap(), + heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, best_slot ); } @@ -983,26 +1427,33 @@ mod test { // Add vote for slot 8, should now be the best slot (has same weight // as fork containing slot 6, but slot 2 is smaller than slot 3). heaviest_subtree_fork_choice.add_votes( - &[(vote_pubkeys[1], 8)], + [(vote_pubkeys[1], (8, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 8); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 8); // Because slot 4 now sees the child leaf 8 has non-zero // weight, adding smaller leaf slots should not propagate past slot 4 - heaviest_subtree_fork_choice.add_new_leaf_slot(7, Some(4)); + heaviest_subtree_fork_choice + .add_new_leaf_slot((7, Hash::default()), Some((4, Hash::default()))); let ancestors = heaviest_subtree_fork_choice - .ancestor_iterator(7) + .ancestor_iterator((7, Hash::default())) .collect::>(); - for a in ancestors.into_iter().chain(std::iter::once(8)) { - assert_eq!(heaviest_subtree_fork_choice.best_slot(a).unwrap(), 8); + for a in ancestors + .into_iter() + .chain(std::iter::once((8, Hash::default()))) + { + assert_eq!(heaviest_subtree_fork_choice.best_slot(&a).unwrap().0, 8); } // All the leaves should think they are their own best choice for leaf in [8, 9, 10, 11].iter() { assert_eq!( - heaviest_subtree_fork_choice.best_slot(*leaf).unwrap(), + heaviest_subtree_fork_choice + .best_slot(&(*leaf, Hash::default())) + .unwrap() + .0, *leaf ); } @@ -1024,31 +1475,34 @@ mod test { let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(1, stake); // slot 6 should be the best because it's the only leaf - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 6); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 6); // Add a leaf slot 5. Even though 5 is less than the best leaf 6, // it's not less than it's sibling slot 4, so the best overall // leaf should remain unchanged - heaviest_subtree_fork_choice.add_new_leaf_slot(5, Some(0)); - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 6); + heaviest_subtree_fork_choice + .add_new_leaf_slot((5, Hash::default()), Some((0, Hash::default()))); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 6); // Add a leaf slot 2 on a different fork than leaf 6. Slot 2 should // be the new best because it's for a lesser slot - heaviest_subtree_fork_choice.add_new_leaf_slot(2, Some(0)); - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 2); + heaviest_subtree_fork_choice + .add_new_leaf_slot((2, Hash::default()), Some((0, Hash::default()))); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 2); // Add a vote for slot 4, so leaf 6 should be the best again heaviest_subtree_fork_choice.add_votes( - &[(vote_pubkeys[0], 4)], + [(vote_pubkeys[0], (4, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 6); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 6); // Adding a slot 1 that is less than the current best leaf 6 should not change the best // slot because the fork slot 5 is on has a higher weight - heaviest_subtree_fork_choice.add_new_leaf_slot(1, Some(0)); - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 6); + heaviest_subtree_fork_choice + .add_new_leaf_slot((1, Hash::default()), Some((0, Hash::default()))); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 6); } #[test] @@ -1056,16 +1510,41 @@ mod test { let mut heaviest_subtree_fork_choice = setup_forks(); // No weights are present, weights should be zero - heaviest_subtree_fork_choice.aggregate_slot(1); - assert_eq!(heaviest_subtree_fork_choice.stake_voted_at(1).unwrap(), 0); + heaviest_subtree_fork_choice.aggregate_slot((1, Hash::default())); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_at(&(1, Hash::default())) + .unwrap(), + 0 + ); assert_eq!( - heaviest_subtree_fork_choice.stake_voted_subtree(1).unwrap(), + heaviest_subtree_fork_choice + .stake_voted_subtree(&(1, Hash::default())) + .unwrap(), 0 ); // The best leaf when weights are equal should prioritize the lower leaf - assert_eq!(heaviest_subtree_fork_choice.best_slot(1).unwrap(), 4); - assert_eq!(heaviest_subtree_fork_choice.best_slot(2).unwrap(), 4); - assert_eq!(heaviest_subtree_fork_choice.best_slot(3).unwrap(), 6); + assert_eq!( + heaviest_subtree_fork_choice + .best_slot(&(1, Hash::default())) + .unwrap() + .0, + 4 + ); + assert_eq!( + heaviest_subtree_fork_choice + .best_slot(&(2, Hash::default())) + .unwrap() + .0, + 4 + ); + assert_eq!( + heaviest_subtree_fork_choice + .best_slot(&(3, Hash::default())) + .unwrap() + .0, + 6 + ); // Update the weights that have voted *exactly* at each slot, the // branch containing slots {5, 6} has weight 11, so should be heavier @@ -1073,26 +1552,26 @@ mod test { let mut total_stake = 0; let staked_voted_slots: HashSet<_> = vec![2, 4, 5, 6].into_iter().collect(); for slot in &staked_voted_slots { - heaviest_subtree_fork_choice.set_stake_voted_at(*slot, *slot); + heaviest_subtree_fork_choice.set_stake_voted_at((*slot, Hash::default()), *slot); total_stake += *slot; } // Aggregate up each of the two forks (order matters, has to be // reverse order for each fork, and aggregating a slot multiple times // is fine) - let slots_to_aggregate: Vec<_> = std::iter::once(6) - .chain(heaviest_subtree_fork_choice.ancestor_iterator(6)) - .chain(std::iter::once(4)) - .chain(heaviest_subtree_fork_choice.ancestor_iterator(4)) + let slots_to_aggregate: Vec<_> = std::iter::once((6, Hash::default())) + .chain(heaviest_subtree_fork_choice.ancestor_iterator((6, Hash::default()))) + .chain(std::iter::once((4, Hash::default()))) + .chain(heaviest_subtree_fork_choice.ancestor_iterator((4, Hash::default()))) .collect(); - for slot in slots_to_aggregate { - heaviest_subtree_fork_choice.aggregate_slot(slot); + for slot_hash in slots_to_aggregate { + heaviest_subtree_fork_choice.aggregate_slot(slot_hash); } // The best path is now 0 -> 1 -> 3 -> 5 -> 6, so leaf 6 // should be the best choice - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 6); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 6); // Verify `stake_voted_at` for slot in 0..=6 { @@ -1103,7 +1582,9 @@ mod test { }; assert_eq!( - heaviest_subtree_fork_choice.stake_voted_at(slot).unwrap(), + heaviest_subtree_fork_choice + .stake_voted_at(&(slot, Hash::default())) + .unwrap(), expected_stake ); } @@ -1114,7 +1595,7 @@ mod test { // all slots in the subtree assert_eq!( heaviest_subtree_fork_choice - .stake_voted_subtree(*slot) + .stake_voted_subtree(&(*slot, Hash::default())) .unwrap(), total_stake ); @@ -1122,10 +1603,12 @@ mod test { // Verify `stake_voted_subtree` for fork 1 let mut total_expected_stake = 0; for slot in &[4, 2] { - total_expected_stake += heaviest_subtree_fork_choice.stake_voted_at(*slot).unwrap(); + total_expected_stake += heaviest_subtree_fork_choice + .stake_voted_at(&(*slot, Hash::default())) + .unwrap(); assert_eq!( heaviest_subtree_fork_choice - .stake_voted_subtree(*slot) + .stake_voted_subtree(&(*slot, Hash::default())) .unwrap(), total_expected_stake ); @@ -1133,10 +1616,12 @@ mod test { // Verify `stake_voted_subtree` for fork 2 total_expected_stake = 0; for slot in &[6, 5, 3] { - total_expected_stake += heaviest_subtree_fork_choice.stake_voted_at(*slot).unwrap(); + total_expected_stake += heaviest_subtree_fork_choice + .stake_voted_at(&(*slot, Hash::default())) + .unwrap(); assert_eq!( heaviest_subtree_fork_choice - .stake_voted_subtree(*slot) + .stake_voted_subtree(&(*slot, Hash::default())) .unwrap(), total_expected_stake ); @@ -1149,19 +1634,19 @@ mod test { let stake = 100; let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(3, stake); - let pubkey_votes: Vec<(Pubkey, Slot)> = vec![ - (vote_pubkeys[0], 3), - (vote_pubkeys[1], 2), - (vote_pubkeys[2], 1), + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], (3, Hash::default())), + (vote_pubkeys[1], (2, Hash::default())), + (vote_pubkeys[2], (1, Hash::default())), ]; let expected_best_slot = |slot, heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice| -> Slot { - if !heaviest_subtree_fork_choice.is_leaf(slot) { + if !heaviest_subtree_fork_choice.is_leaf((slot, Hash::default())) { // Both branches have equal weight, so should pick the lesser leaf if heaviest_subtree_fork_choice - .ancestor_iterator(4) - .collect::>() - .contains(&slot) + .ancestor_iterator((4, Hash::default())) + .collect::>() + .contains(&(slot, Hash::default())) { 4 } else { @@ -1182,20 +1667,20 @@ mod test { ); // Everyone makes newer votes - let pubkey_votes: Vec<(Pubkey, Slot)> = vec![ - (vote_pubkeys[0], 4), - (vote_pubkeys[1], 3), - (vote_pubkeys[2], 3), + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], (4, Hash::default())), + (vote_pubkeys[1], (3, Hash::default())), + (vote_pubkeys[2], (3, Hash::default())), ]; let expected_best_slot = |slot, heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice| -> Slot { - if !heaviest_subtree_fork_choice.is_leaf(slot) { + if !heaviest_subtree_fork_choice.is_leaf((slot, Hash::default())) { // The branch with leaf 6 now has two votes, so should pick that one if heaviest_subtree_fork_choice - .ancestor_iterator(6) - .collect::>() - .contains(&slot) + .ancestor_iterator((6, Hash::default())) + .collect::>() + .contains(&(slot, Hash::default())) { 6 } else { @@ -1221,108 +1706,176 @@ mod test { let mut heaviest_subtree_fork_choice = setup_forks(); let stake = 100; let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(3, stake); - let pubkey_votes: Vec<(Pubkey, Slot)> = vec![ - (vote_pubkeys[0], 3), - (vote_pubkeys[1], 4), - (vote_pubkeys[2], 1), + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], (3, Hash::default())), + (vote_pubkeys[1], (4, Hash::default())), + (vote_pubkeys[2], (1, Hash::default())), ]; - let expected_update_operations: BTreeMap<(Slot, UpdateLabel), UpdateOperation> = vec![ + let expected_update_operations: UpdateOperations = vec![ // Add/remove from new/old forks - ((1, UpdateLabel::Add), UpdateOperation::Add(stake)), - ((3, UpdateLabel::Add), UpdateOperation::Add(stake)), - ((4, UpdateLabel::Add), UpdateOperation::Add(stake)), + ( + ((1, Hash::default()), UpdateLabel::Add), + UpdateOperation::Add(stake), + ), + ( + ((3, Hash::default()), UpdateLabel::Add), + UpdateOperation::Add(stake), + ), + ( + ((4, Hash::default()), UpdateLabel::Add), + UpdateOperation::Add(stake), + ), // Aggregate all ancestors of changed slots - ((0, UpdateLabel::Aggregate), UpdateOperation::Aggregate), - ((1, UpdateLabel::Aggregate), UpdateOperation::Aggregate), - ((2, UpdateLabel::Aggregate), UpdateOperation::Aggregate), + ( + ((0, Hash::default()), UpdateLabel::Aggregate), + UpdateOperation::Aggregate, + ), + ( + ((1, Hash::default()), UpdateLabel::Aggregate), + UpdateOperation::Aggregate, + ), + ( + ((2, Hash::default()), UpdateLabel::Aggregate), + UpdateOperation::Aggregate, + ), ] .into_iter() .collect(); let generated_update_operations = heaviest_subtree_fork_choice.generate_update_operations( - &pubkey_votes, + pubkey_votes.iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); assert_eq!(expected_update_operations, generated_update_operations); // Everyone makes older/same votes, should be ignored - let pubkey_votes: Vec<(Pubkey, Slot)> = vec![ - (vote_pubkeys[0], 3), - (vote_pubkeys[1], 2), - (vote_pubkeys[2], 1), + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], (3, Hash::default())), + (vote_pubkeys[1], (2, Hash::default())), + (vote_pubkeys[2], (1, Hash::default())), ]; let generated_update_operations = heaviest_subtree_fork_choice.generate_update_operations( - &pubkey_votes, + pubkey_votes.iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); assert!(generated_update_operations.is_empty()); // Some people make newer votes - let pubkey_votes: Vec<(Pubkey, Slot)> = vec![ + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ // old, ignored - (vote_pubkeys[0], 3), + (vote_pubkeys[0], (3, Hash::default())), // new, switched forks - (vote_pubkeys[1], 5), + (vote_pubkeys[1], (5, Hash::default())), // new, same fork - (vote_pubkeys[2], 3), + (vote_pubkeys[2], (3, Hash::default())), ]; - let expected_update_operations: BTreeMap<(Slot, UpdateLabel), UpdateOperation> = vec![ - // Add/remove to/from new/old forks - ((3, UpdateLabel::Add), UpdateOperation::Add(stake)), - ((5, UpdateLabel::Add), UpdateOperation::Add(stake)), - ((1, UpdateLabel::Subtract), UpdateOperation::Subtract(stake)), - ((4, UpdateLabel::Subtract), UpdateOperation::Subtract(stake)), - // Aggregate all ancestors of changed slots - ((0, UpdateLabel::Aggregate), UpdateOperation::Aggregate), - ((1, UpdateLabel::Aggregate), UpdateOperation::Aggregate), - ((2, UpdateLabel::Aggregate), UpdateOperation::Aggregate), - ((3, UpdateLabel::Aggregate), UpdateOperation::Aggregate), - ] - .into_iter() - .collect(); + let expected_update_operations: BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation> = + vec![ + // Add/remove to/from new/old forks + ( + ((3, Hash::default()), UpdateLabel::Add), + UpdateOperation::Add(stake), + ), + ( + ((5, Hash::default()), UpdateLabel::Add), + UpdateOperation::Add(stake), + ), + ( + ((1, Hash::default()), UpdateLabel::Subtract), + UpdateOperation::Subtract(stake), + ), + ( + ((4, Hash::default()), UpdateLabel::Subtract), + UpdateOperation::Subtract(stake), + ), + // Aggregate all ancestors of changed slots + ( + ((0, Hash::default()), UpdateLabel::Aggregate), + UpdateOperation::Aggregate, + ), + ( + ((1, Hash::default()), UpdateLabel::Aggregate), + UpdateOperation::Aggregate, + ), + ( + ((2, Hash::default()), UpdateLabel::Aggregate), + UpdateOperation::Aggregate, + ), + ( + ((3, Hash::default()), UpdateLabel::Aggregate), + UpdateOperation::Aggregate, + ), + ] + .into_iter() + .collect(); let generated_update_operations = heaviest_subtree_fork_choice.generate_update_operations( - &pubkey_votes, + pubkey_votes.iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); assert_eq!(expected_update_operations, generated_update_operations); // People make new votes - let pubkey_votes: Vec<(Pubkey, Slot)> = vec![ + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ // new, switch forks - (vote_pubkeys[0], 4), + (vote_pubkeys[0], (4, Hash::default())), // new, same fork - (vote_pubkeys[1], 6), + (vote_pubkeys[1], (6, Hash::default())), // new, same fork - (vote_pubkeys[2], 6), + (vote_pubkeys[2], (6, Hash::default())), ]; - let expected_update_operations: BTreeMap<(Slot, UpdateLabel), UpdateOperation> = vec![ - // Add/remove from new/old forks - ((4, UpdateLabel::Add), UpdateOperation::Add(stake)), - ((6, UpdateLabel::Add), UpdateOperation::Add(2 * stake)), - ( - (3, UpdateLabel::Subtract), - UpdateOperation::Subtract(2 * stake), - ), - ((5, UpdateLabel::Subtract), UpdateOperation::Subtract(stake)), - // Aggregate all ancestors of changed slots - ((0, UpdateLabel::Aggregate), UpdateOperation::Aggregate), - ((1, UpdateLabel::Aggregate), UpdateOperation::Aggregate), - ((2, UpdateLabel::Aggregate), UpdateOperation::Aggregate), - ((3, UpdateLabel::Aggregate), UpdateOperation::Aggregate), - ((5, UpdateLabel::Aggregate), UpdateOperation::Aggregate), - ] - .into_iter() - .collect(); + let expected_update_operations: BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation> = + vec![ + // Add/remove from new/old forks + ( + ((4, Hash::default()), UpdateLabel::Add), + UpdateOperation::Add(stake), + ), + ( + ((6, Hash::default()), UpdateLabel::Add), + UpdateOperation::Add(2 * stake), + ), + ( + ((3, Hash::default()), UpdateLabel::Subtract), + UpdateOperation::Subtract(2 * stake), + ), + ( + ((5, Hash::default()), UpdateLabel::Subtract), + UpdateOperation::Subtract(stake), + ), + // Aggregate all ancestors of changed slots + ( + ((0, Hash::default()), UpdateLabel::Aggregate), + UpdateOperation::Aggregate, + ), + ( + ((1, Hash::default()), UpdateLabel::Aggregate), + UpdateOperation::Aggregate, + ), + ( + ((2, Hash::default()), UpdateLabel::Aggregate), + UpdateOperation::Aggregate, + ), + ( + ((3, Hash::default()), UpdateLabel::Aggregate), + UpdateOperation::Aggregate, + ), + ( + ((5, Hash::default()), UpdateLabel::Aggregate), + UpdateOperation::Aggregate, + ), + ] + .into_iter() + .collect(); let generated_update_operations = heaviest_subtree_fork_choice.generate_update_operations( - &pubkey_votes, + pubkey_votes.iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); @@ -1335,64 +1888,515 @@ mod test { let stake = 100; let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(3, stake); - let pubkey_votes: Vec<(Pubkey, Slot)> = vec![ - (vote_pubkeys[0], 3), - (vote_pubkeys[1], 2), - (vote_pubkeys[2], 1), + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], (3, Hash::default())), + (vote_pubkeys[1], (2, Hash::default())), + (vote_pubkeys[2], (1, Hash::default())), ]; assert_eq!( - heaviest_subtree_fork_choice.add_votes( - &pubkey_votes, - bank.epoch_stakes_map(), - bank.epoch_schedule() - ), + heaviest_subtree_fork_choice + .add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule() + ) + .0, 4 ); - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 4) + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 4) } #[test] - fn test_is_best_child() { - /* - Build fork structure: - slot 0 - | - slot 4 - / \ - slot 10 slot 9 - */ - let forks = tr(0) / (tr(4) / (tr(9)) / (tr(10))); - let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks); - assert!(heaviest_subtree_fork_choice.is_best_child(0)); - assert!(heaviest_subtree_fork_choice.is_best_child(4)); - - // 9 is better than 10 - assert!(heaviest_subtree_fork_choice.is_best_child(9)); - assert!(!heaviest_subtree_fork_choice.is_best_child(10)); - - // Add new leaf 8, which is better than 9, as both have weight 0 - heaviest_subtree_fork_choice.add_new_leaf_slot(8, Some(4)); - assert!(heaviest_subtree_fork_choice.is_best_child(8)); - assert!(!heaviest_subtree_fork_choice.is_best_child(9)); - assert!(!heaviest_subtree_fork_choice.is_best_child(10)); + fn test_add_votes_duplicate_tie() { + let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _) = + setup_duplicate_forks(); + let stake = 10; + let num_validators = 2; + let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(num_validators, stake); + + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], duplicate_leaves_descended_from_4[0]), + (vote_pubkeys[1], duplicate_leaves_descended_from_4[1]), + ]; + + // duplicate_leaves_descended_from_4 are sorted, and fork choice will pick the smaller + // one in the event of a tie + let expected_best_slot_hash = duplicate_leaves_descended_from_4[0]; + assert_eq!( + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule() + ), + expected_best_slot_hash + ); + + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + expected_best_slot_hash + ); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(&duplicate_leaves_descended_from_4[1]) + .unwrap(), + stake + ); + + // Adding the same vote again will not do anything + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = + vec![(vote_pubkeys[1], duplicate_leaves_descended_from_4[1])]; + assert_eq!( + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule() + ), + expected_best_slot_hash + ); + + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(&duplicate_leaves_descended_from_4[1]) + .unwrap(), + stake + ); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_at(&duplicate_leaves_descended_from_4[1]) + .unwrap(), + stake + ); + + // All common ancestors should have subtree voted stake == 2 * stake, but direct + // voted stake == 0 + let expected_ancestors_stake = 2 * stake; + for ancestor in + heaviest_subtree_fork_choice.ancestor_iterator(duplicate_leaves_descended_from_4[1]) + { + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(&ancestor) + .unwrap(), + expected_ancestors_stake + ); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_at(&ancestor) + .unwrap(), + 0, + ); + } + } + + #[test] + fn test_add_votes_duplicate_greater_hash_ignored() { + let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _) = + setup_duplicate_forks(); + let stake = 10; + let num_validators = 2; + let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(num_validators, stake); + + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], duplicate_leaves_descended_from_4[0]), + (vote_pubkeys[1], duplicate_leaves_descended_from_4[1]), + ]; + + // duplicate_leaves_descended_from_4 are sorted, and fork choice will pick the smaller + // one in the event of a tie + let expected_best_slot_hash = duplicate_leaves_descended_from_4[0]; + assert_eq!( + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule() + ), + expected_best_slot_hash + ); + // Adding a duplicate vote for a validator, for another a greater bank hash, + // should be ignored as we prioritize the smaller bank hash. Thus nothing + // should change. + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = + vec![(vote_pubkeys[0], duplicate_leaves_descended_from_4[1])]; + assert_eq!( + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule() + ), + expected_best_slot_hash + ); + + // Still only has one validator voting on it + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(&duplicate_leaves_descended_from_4[1]) + .unwrap(), + stake + ); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_at(&duplicate_leaves_descended_from_4[1]) + .unwrap(), + stake + ); + // All common ancestors should have subtree voted stake == 2 * stake, but direct + // voted stake == 0 + let expected_ancestors_stake = 2 * stake; + for ancestor in + heaviest_subtree_fork_choice.ancestor_iterator(duplicate_leaves_descended_from_4[1]) + { + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(&ancestor) + .unwrap(), + expected_ancestors_stake + ); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_at(&ancestor) + .unwrap(), + 0, + ); + } + } + + #[test] + fn test_add_votes_duplicate_smaller_hash_prioritized() { + let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _) = + setup_duplicate_forks(); + let stake = 10; + let num_validators = 2; + let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(num_validators, stake); + + // Both voters voted on duplicate_leaves_descended_from_4[1], so thats the heaviest + // branch + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], duplicate_leaves_descended_from_4[1]), + (vote_pubkeys[1], duplicate_leaves_descended_from_4[1]), + ]; + + let expected_best_slot_hash = duplicate_leaves_descended_from_4[1]; + assert_eq!( + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule() + ), + expected_best_slot_hash + ); + + // BEFORE, both validators voting on this leaf + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(&duplicate_leaves_descended_from_4[1]) + .unwrap(), + 2 * stake, + ); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_at(&duplicate_leaves_descended_from_4[1]) + .unwrap(), + 2 * stake, + ); + + // Adding a duplicate vote for a validator, for another a smaller bank hash, + // should be proritized and replace the vote for the greater bank hash. + // Now because both duplicate nodes are tied, the best leaf is the smaller one. + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = + vec![(vote_pubkeys[0], duplicate_leaves_descended_from_4[0])]; + let expected_best_slot_hash = duplicate_leaves_descended_from_4[0]; + assert_eq!( + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule() + ), + expected_best_slot_hash + ); + + // AFTER, only one of the validators is voting on this leaf + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(&duplicate_leaves_descended_from_4[1]) + .unwrap(), + stake, + ); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_at(&duplicate_leaves_descended_from_4[1]) + .unwrap(), + stake, + ); + + // The other leaf now has one of the votes + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(&duplicate_leaves_descended_from_4[0]) + .unwrap(), + stake, + ); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_at(&duplicate_leaves_descended_from_4[0]) + .unwrap(), + stake, + ); + + // All common ancestors should have subtree voted stake == 2 * stake, but direct + // voted stake == 0 + let expected_ancestors_stake = 2 * stake; + for ancestor in + heaviest_subtree_fork_choice.ancestor_iterator(duplicate_leaves_descended_from_4[0]) + { + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(&ancestor) + .unwrap(), + expected_ancestors_stake + ); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_at(&ancestor) + .unwrap(), + 0, + ); + } + } + + #[test] + fn test_add_votes_duplicate_then_outdated() { + let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _) = + setup_duplicate_forks(); + let stake = 10; + let num_validators = 3; + let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(num_validators, stake); + + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], duplicate_leaves_descended_from_4[0]), + (vote_pubkeys[1], duplicate_leaves_descended_from_4[1]), + ]; + + // duplicate_leaves_descended_from_4 are sorted, and fork choice will pick the smaller + // one in the event of a tie + let expected_best_slot_hash = duplicate_leaves_descended_from_4[0]; + assert_eq!( + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule() + ), + expected_best_slot_hash + ); + + // Create two children for slots greater than the duplicate slot, + // 1) descended from the current best slot (which also happens to be a duplicate slot) + // 2) another descended from a non-duplicate slot. + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + duplicate_leaves_descended_from_4[0] + ); + // Create new child with heaviest duplicate parent + let duplicate_parent = duplicate_leaves_descended_from_4[0]; + let duplicate_slot = duplicate_parent.0; + + // Create new child with non-duplicate parent + let nonduplicate_parent = (2, Hash::default()); + let higher_child_with_duplicate_parent = (duplicate_slot + 1, Hash::new_unique()); + let higher_child_with_nonduplicate_parent = (duplicate_slot + 2, Hash::new_unique()); + heaviest_subtree_fork_choice + .add_new_leaf_slot(higher_child_with_duplicate_parent, Some(duplicate_parent)); + heaviest_subtree_fork_choice.add_new_leaf_slot( + higher_child_with_nonduplicate_parent, + Some(nonduplicate_parent), + ); + + // vote_pubkeys[0] and vote_pubkeys[1] should both have their latest votes + // erased after a vote for a higher parent + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], higher_child_with_duplicate_parent), + (vote_pubkeys[1], higher_child_with_nonduplicate_parent), + (vote_pubkeys[2], higher_child_with_nonduplicate_parent), + ]; + let expected_best_slot_hash = higher_child_with_nonduplicate_parent; + assert_eq!( + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule() + ), + expected_best_slot_hash + ); + + // All the stake dirctly voting on the duplicates have been outdated + for (i, duplicate_leaf) in duplicate_leaves_descended_from_4.iter().enumerate() { + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_at(duplicate_leaf) + .unwrap(), + 0, + ); + + if i == 0 { + // The subtree stake of the first duplicate however, has one vote still + // because it's the parent of the `higher_child_with_duplicate_parent`, + // which has one vote + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(duplicate_leaf) + .unwrap(), + stake, + ); + } else { + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(duplicate_leaf) + .unwrap(), + 0, + ); + } + } + + // Node 4 has subtree voted stake == stake since it only has one voter on it + let node4 = (4, Hash::default()); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(&node4) + .unwrap(), + stake, + ); + assert_eq!( + heaviest_subtree_fork_choice.stake_voted_at(&node4).unwrap(), + 0, + ); + + // All ancestors of 4 should have subtree voted stake == num_validators * stake, + // but direct voted stake == 0 + let expected_ancestors_stake = num_validators as u64 * stake; + for ancestor in heaviest_subtree_fork_choice.ancestor_iterator(node4) { + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_subtree(&ancestor) + .unwrap(), + expected_ancestors_stake + ); + assert_eq!( + heaviest_subtree_fork_choice + .stake_voted_at(&ancestor) + .unwrap(), + 0, + ); + } + } + + #[test] + fn test_add_votes_duplicate_zero_stake() { + let (mut heaviest_subtree_fork_choice, duplicate_leaves_descended_from_4, _): ( + HeaviestSubtreeForkChoice, + Vec, + Vec, + ) = setup_duplicate_forks(); + + let stake = 0; + let num_validators = 2; + let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(num_validators, stake); + + // Make new vote with vote_pubkeys[0] for a higher slot + // Create new child with heaviest duplicate parent + let duplicate_parent = duplicate_leaves_descended_from_4[0]; + let duplicate_slot = duplicate_parent.0; + let higher_child_with_duplicate_parent = (duplicate_slot + 1, Hash::new_unique()); + heaviest_subtree_fork_choice + .add_new_leaf_slot(higher_child_with_duplicate_parent, Some(duplicate_parent)); + + // Vote for pubkey 0 on one of the duplicate slots + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = + vec![(vote_pubkeys[0], duplicate_leaves_descended_from_4[1])]; + + // Stake is zero, so because duplicate_leaves_descended_from_4[0] and + // duplicate_leaves_descended_from_4[1] are tied, the child of the smaller + // node duplicate_leaves_descended_from_4[0] is the one that is picked + let expected_best_slot_hash = higher_child_with_duplicate_parent; + assert_eq!( + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule() + ), + expected_best_slot_hash + ); + assert_eq!( + *heaviest_subtree_fork_choice + .latest_votes + .get(&vote_pubkeys[0]) + .unwrap(), + duplicate_leaves_descended_from_4[1] + ); + + // Now add a vote for a higher slot, and ensure the latest votes + // for this pubkey were updated + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = + vec![(vote_pubkeys[0], higher_child_with_duplicate_parent)]; + + assert_eq!( + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule() + ), + expected_best_slot_hash + ); + assert_eq!( + *heaviest_subtree_fork_choice + .latest_votes + .get(&vote_pubkeys[0]) + .unwrap(), + higher_child_with_duplicate_parent + ); + } + + #[test] + fn test_is_best_child() { + /* + Build fork structure: + slot 0 + | + slot 4 + / \ + slot 10 slot 9 + */ + let forks = tr(0) / (tr(4) / (tr(9)) / (tr(10))); + let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks); + assert!(heaviest_subtree_fork_choice.is_best_child(&(0, Hash::default()))); + assert!(heaviest_subtree_fork_choice.is_best_child(&(4, Hash::default()))); + + // 9 is better than 10 + assert!(heaviest_subtree_fork_choice.is_best_child(&(9, Hash::default()))); + assert!(!heaviest_subtree_fork_choice.is_best_child(&(10, Hash::default()))); + + // Add new leaf 8, which is better than 9, as both have weight 0 + heaviest_subtree_fork_choice + .add_new_leaf_slot((8, Hash::default()), Some((4, Hash::default()))); + assert!(heaviest_subtree_fork_choice.is_best_child(&(8, Hash::default()))); + assert!(!heaviest_subtree_fork_choice.is_best_child(&(9, Hash::default()))); + assert!(!heaviest_subtree_fork_choice.is_best_child(&(10, Hash::default()))); // Add vote for 9, it's the best again let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(3, 100); heaviest_subtree_fork_choice.add_votes( - &[(vote_pubkeys[0], 9)], + [(vote_pubkeys[0], (9, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); - assert!(heaviest_subtree_fork_choice.is_best_child(9)); - assert!(!heaviest_subtree_fork_choice.is_best_child(8)); - assert!(!heaviest_subtree_fork_choice.is_best_child(10)); + assert!(heaviest_subtree_fork_choice.is_best_child(&(9, Hash::default()))); + assert!(!heaviest_subtree_fork_choice.is_best_child(&(8, Hash::default()))); + assert!(!heaviest_subtree_fork_choice.is_best_child(&(10, Hash::default()))); } #[test] fn test_merge() { let stake = 100; - let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(3, stake); + let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(4, stake); /* Build fork structure: slot 0 @@ -1408,13 +2412,13 @@ mod test { */ let forks = tr(0) / (tr(3) / (tr(5) / (tr(7))) / (tr(9) / (tr(11) / (tr(12))))); let mut tree1 = HeaviestSubtreeForkChoice::new_from_tree(forks); - let pubkey_votes: Vec<(Pubkey, Slot)> = vec![ - (vote_pubkeys[0], 5), - (vote_pubkeys[1], 3), - (vote_pubkeys[2], 12), + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], (5, Hash::default())), + (vote_pubkeys[1], (3, Hash::default())), + (vote_pubkeys[2], (12, Hash::default())), ]; tree1.add_votes( - &pubkey_votes, + pubkey_votes.iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); @@ -1430,58 +2434,193 @@ mod test { slot 17 | slot 19 (vote pubkey 1) | - slot 20 - */ + slot 20 (vote pubkey 3) + */ let forks = tr(10) / (tr(15) / (tr(16) / (tr(17))) / (tr(18) / (tr(19) / (tr(20))))); let mut tree2 = HeaviestSubtreeForkChoice::new_from_tree(forks); - let pubkey_votes: Vec<(Pubkey, Slot)> = vec![ + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ // more than tree 1 - (vote_pubkeys[0], 16), + (vote_pubkeys[0], (16, Hash::default())), // more than tree1 - (vote_pubkeys[1], 19), + (vote_pubkeys[1], (19, Hash::default())), // less than tree1 - (vote_pubkeys[2], 10), + (vote_pubkeys[2], (10, Hash::default())), + // Add a pubkey that only voted on this tree + (vote_pubkeys[3], (20, Hash::default())), ]; tree2.add_votes( - &pubkey_votes, + pubkey_votes.iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); // Merge tree2 at leaf 7 of tree1 - tree1.merge(tree2, 7, bank.epoch_stakes_map(), bank.epoch_schedule()); + tree1.merge( + tree2, + &(7, Hash::default()), + bank.epoch_stakes_map(), + bank.epoch_schedule(), + ); // Check ancestry information is correct - let ancestors: Vec<_> = tree1.ancestor_iterator(20).collect(); - assert_eq!(ancestors, vec![19, 18, 15, 10, 7, 5, 3, 0]); - let ancestors: Vec<_> = tree1.ancestor_iterator(17).collect(); - assert_eq!(ancestors, vec![16, 15, 10, 7, 5, 3, 0]); + let ancestors: Vec<_> = tree1.ancestor_iterator((20, Hash::default())).collect(); + assert_eq!( + ancestors, + vec![19, 18, 15, 10, 7, 5, 3, 0] + .into_iter() + .map(|s| (s, Hash::default())) + .collect::>() + ); + let ancestors: Vec<_> = tree1.ancestor_iterator((17, Hash::default())).collect(); + assert_eq!( + ancestors, + vec![16, 15, 10, 7, 5, 3, 0] + .into_iter() + .map(|s| (s, Hash::default())) + .collect::>() + ); - // Check correctness off votes + // Check correctness of votes // Pubkey 0 - assert_eq!(tree1.stake_voted_at(16).unwrap(), stake); - assert_eq!(tree1.stake_voted_at(5).unwrap(), 0); + assert_eq!(tree1.stake_voted_at(&(16, Hash::default())).unwrap(), stake); + assert_eq!(tree1.stake_voted_at(&(5, Hash::default())).unwrap(), 0); // Pubkey 1 - assert_eq!(tree1.stake_voted_at(19).unwrap(), stake); - assert_eq!(tree1.stake_voted_at(3).unwrap(), 0); + assert_eq!(tree1.stake_voted_at(&(19, Hash::default())).unwrap(), stake); + assert_eq!(tree1.stake_voted_at(&(3, Hash::default())).unwrap(), 0); // Pubkey 2 - assert_eq!(tree1.stake_voted_at(10).unwrap(), 0); - assert_eq!(tree1.stake_voted_at(12).unwrap(), stake); + assert_eq!(tree1.stake_voted_at(&(10, Hash::default())).unwrap(), 0); + assert_eq!(tree1.stake_voted_at(&(12, Hash::default())).unwrap(), stake); + // Pubkey 3 + assert_eq!(tree1.stake_voted_at(&(20, Hash::default())).unwrap(), stake); for slot in &[0, 3] { - assert_eq!(tree1.stake_voted_subtree(*slot).unwrap(), 3 * stake); + assert_eq!( + tree1 + .stake_voted_subtree(&(*slot, Hash::default())) + .unwrap(), + 4 * stake + ); } for slot in &[5, 7, 10, 15] { - assert_eq!(tree1.stake_voted_subtree(*slot).unwrap(), 2 * stake); + assert_eq!( + tree1 + .stake_voted_subtree(&(*slot, Hash::default())) + .unwrap(), + 3 * stake + ); + } + for slot in &[18, 19] { + assert_eq!( + tree1 + .stake_voted_subtree(&(*slot, Hash::default())) + .unwrap(), + 2 * stake + ); } - for slot in &[9, 11, 12, 16, 18, 19] { - assert_eq!(tree1.stake_voted_subtree(*slot).unwrap(), stake); + for slot in &[9, 11, 12, 16, 20] { + assert_eq!( + tree1 + .stake_voted_subtree(&(*slot, Hash::default())) + .unwrap(), + stake + ); } - for slot in &[17, 20] { - assert_eq!(tree1.stake_voted_subtree(*slot).unwrap(), 0); + for slot in &[17] { + assert_eq!( + tree1 + .stake_voted_subtree(&(*slot, Hash::default())) + .unwrap(), + 0 + ); } - assert_eq!(tree1.best_overall_slot(), 17); + assert_eq!(tree1.best_overall_slot().0, 20); + } + + #[test] + fn test_merge_duplicate() { + let stake = 100; + let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(2, stake); + let mut slot_5_duplicate_hashes = std::iter::repeat_with(|| (5, Hash::new_unique())) + .take(2) + .collect::>(); + slot_5_duplicate_hashes.sort(); + + /* + Build fork structure: + slot 0 + / \ + slot 2 slot 5 (bigger hash) + */ + let forks = + tr((0, Hash::default())) / tr((2, Hash::default())) / tr(slot_5_duplicate_hashes[1]); + let mut tree1 = HeaviestSubtreeForkChoice::new_from_tree(forks); + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], (2, Hash::default())), + (vote_pubkeys[1], slot_5_duplicate_hashes[1]), + ]; + tree1.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule(), + ); + + /* + Build fork structure: + slot 3 + | + slot 5 (smaller hash, prioritized over previous version) + */ + let forks = tr((3, Hash::default())) / tr(slot_5_duplicate_hashes[0]); + let mut tree2 = HeaviestSubtreeForkChoice::new_from_tree(forks); + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], (3, Hash::default())), + // Pubkey 1 voted on another version of slot 5 + (vote_pubkeys[1], slot_5_duplicate_hashes[0]), + ]; + + tree2.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule(), + ); + + // Merge tree2 at leaf 2 of tree1 + tree1.merge( + tree2, + &(2, Hash::default()), + bank.epoch_stakes_map(), + bank.epoch_schedule(), + ); + + // Pubkey 1 voted on both versions of slot 5, but should prioritize the one in + // the merged branch because it's for a smaller hash + assert_eq!( + tree1.stake_voted_at(&slot_5_duplicate_hashes[1]).unwrap(), + 0 + ); + assert_eq!( + tree1.stake_voted_at(&slot_5_duplicate_hashes[0]).unwrap(), + stake + ); + assert_eq!(tree1.best_overall_slot(), slot_5_duplicate_hashes[0]); + + // Check the ancestors are correct + let ancestors: Vec<_> = tree1 + .ancestor_iterator(slot_5_duplicate_hashes[1]) + .collect(); + assert_eq!(ancestors, vec![(0, Hash::default())]); + let ancestors: Vec<_> = tree1 + .ancestor_iterator(slot_5_duplicate_hashes[0]) + .collect(); + assert_eq!( + ancestors, + vec![ + (3, Hash::default()), + (2, Hash::default()), + (0, Hash::default()) + ] + ); } #[test] @@ -1489,36 +2628,53 @@ mod test { let mut heaviest_subtree_fork_choice = setup_forks(); // Diff of same root is empty, no matter root, intermediate node, or leaf - assert!(heaviest_subtree_fork_choice.subtree_diff(0, 0).is_empty()); - assert!(heaviest_subtree_fork_choice.subtree_diff(5, 5).is_empty()); - assert!(heaviest_subtree_fork_choice.subtree_diff(6, 6).is_empty()); + assert!(heaviest_subtree_fork_choice + .subtree_diff((0, Hash::default()), (0, Hash::default())) + .is_empty()); + assert!(heaviest_subtree_fork_choice + .subtree_diff((5, Hash::default()), (5, Hash::default())) + .is_empty()); + assert!(heaviest_subtree_fork_choice + .subtree_diff((6, Hash::default()), (6, Hash::default())) + .is_empty()); // The set reachable from slot 3, excluding subtree 1, is just everything // in slot 3 since subtree 1 is an ancestor assert_eq!( - heaviest_subtree_fork_choice.subtree_diff(3, 1), - vec![3, 5, 6].into_iter().collect::>() + heaviest_subtree_fork_choice.subtree_diff((3, Hash::default()), (1, Hash::default())), + vec![3, 5, 6] + .into_iter() + .map(|s| (s, Hash::default())) + .collect::>() ); // The set reachable from slot 1, excluding subtree 3, is just 1 and // the subtree at 2 assert_eq!( - heaviest_subtree_fork_choice.subtree_diff(1, 3), - vec![1, 2, 4].into_iter().collect::>() + heaviest_subtree_fork_choice.subtree_diff((1, Hash::default()), (3, Hash::default())), + vec![1, 2, 4] + .into_iter() + .map(|s| (s, Hash::default())) + .collect::>() ); // The set reachable from slot 1, excluding leaf 6, is just everything // except leaf 6 assert_eq!( - heaviest_subtree_fork_choice.subtree_diff(0, 6), - vec![0, 1, 3, 5, 2, 4].into_iter().collect::>() + heaviest_subtree_fork_choice.subtree_diff((0, Hash::default()), (6, Hash::default())), + vec![0, 1, 3, 5, 2, 4] + .into_iter() + .map(|s| (s, Hash::default())) + .collect::>() ); // Set root at 1 - heaviest_subtree_fork_choice.set_root(1); + heaviest_subtree_fork_choice.set_root((1, Hash::default())); // Zero no longer exists, set reachable from 0 is empty - assert!(heaviest_subtree_fork_choice.subtree_diff(0, 6).is_empty()); + assert!(heaviest_subtree_fork_choice + .subtree_diff((0, Hash::default()), (6, Hash::default())) + .is_empty()); } #[test] @@ -1532,7 +2688,7 @@ mod test { assert_eq!(tower.is_stray_last_vote(), false); assert_eq!( heaviest_subtree_fork_choice.heaviest_slot_on_same_voted_fork(&tower), - Some(2) + Some((2, Hash::default())) ); // Make slot 1 (existing in bank_forks) a restored stray slot @@ -1547,7 +2703,7 @@ mod test { assert_eq!(tower.is_stray_last_vote(), true); assert_eq!( heaviest_subtree_fork_choice.heaviest_slot_on_same_voted_fork(&tower), - Some(2) + Some((2, Hash::default())) ); // Make slot 3 (NOT existing in bank_forks) a restored stray slot @@ -1563,6 +2719,168 @@ mod test { ); } + #[test] + fn test_mark_valid_invalid_forks() { + let mut heaviest_subtree_fork_choice = setup_forks(); + let stake = 100; + let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(3, stake); + + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], (6, Hash::default())), + (vote_pubkeys[1], (6, Hash::default())), + (vote_pubkeys[2], (2, Hash::default())), + ]; + let expected_best_slot = 6; + assert_eq!( + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule() + ), + (expected_best_slot, Hash::default()), + ); + + // Mark slot 5 as invalid, the best fork should be its ancestor 3, + // not the other for at 4. + let invalid_candidate = (5, Hash::default()); + heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_candidate); + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 3); + assert!(!heaviest_subtree_fork_choice + .is_candidate_slot(&invalid_candidate) + .unwrap()); + + // The ancestor is still a candidate + assert!(heaviest_subtree_fork_choice + .is_candidate_slot(&(3, Hash::default())) + .unwrap()); + + // Adding another descendant to the invalid candidate won't + // update the best slot, even if it contains votes + let new_leaf_slot7 = 7; + heaviest_subtree_fork_choice.add_new_leaf_slot( + (new_leaf_slot7, Hash::default()), + Some((6, Hash::default())), + ); + let invalid_slot_ancestor = 3; + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot().0, + invalid_slot_ancestor + ); + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = + vec![(vote_pubkeys[0], (new_leaf_slot7, Hash::default()))]; + assert_eq!( + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule() + ), + (invalid_slot_ancestor, Hash::default()), + ); + + // Adding a descendant to the ancestor of the invalid candidate *should* update + // the best slot though, since the ancestor is on the heaviest fork + let new_leaf_slot8 = 8; + heaviest_subtree_fork_choice.add_new_leaf_slot( + (new_leaf_slot8, Hash::default()), + Some((invalid_slot_ancestor, Hash::default())), + ); + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot().0, + new_leaf_slot8, + ); + + // If we mark slot a descendant of `invalid_candidate` as valid, then that + // should also mark `invalid_candidate` as valid, and the best slot should + // be the leaf of the heaviest fork, `new_leaf_slot`. + heaviest_subtree_fork_choice.mark_fork_valid_candidate(&invalid_candidate); + assert!(heaviest_subtree_fork_choice + .is_candidate_slot(&invalid_candidate) + .unwrap()); + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot().0, + // Should pick the smaller slot of the two new equally weighted leaves + new_leaf_slot7 + ); + } + + #[test] + fn test_mark_valid_invalid_forks_duplicate() { + let ( + mut heaviest_subtree_fork_choice, + duplicate_leaves_descended_from_4, + duplicate_leaves_descended_from_5, + ) = setup_duplicate_forks(); + let stake = 100; + let (bank, vote_pubkeys) = bank_utils::setup_bank_and_vote_pubkeys(3, stake); + + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], duplicate_leaves_descended_from_4[0]), + (vote_pubkeys[1], duplicate_leaves_descended_from_5[0]), + ]; + + // The best slot should be the the smallest leaf descended from 4 + assert_eq!( + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule(), + ), + duplicate_leaves_descended_from_4[0] + ); + + // If we mark slot 4 as invalid, the ancestor 2 should be the heaviest, not + // the other branch at slot 5 + let invalid_candidate = (4, Hash::default()); + heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_candidate); + + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + (2, Hash::default()) + ); + + // Marking candidate as valid again will choose the the heaviest leaf of + // the newly valid branch + let duplicate_slot = duplicate_leaves_descended_from_4[0].0; + let duplicate_descendant = (duplicate_slot + 1, Hash::new_unique()); + heaviest_subtree_fork_choice.add_new_leaf_slot( + duplicate_descendant, + Some(duplicate_leaves_descended_from_4[0]), + ); + heaviest_subtree_fork_choice.mark_fork_valid_candidate(&invalid_candidate); + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + duplicate_descendant + ); + + // Mark the current heaviest branch as invalid again + heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_candidate); + + // If we add a new version of the duplicate slot that is not descended from the invalid + // candidate and votes for that duplicate slot, the new duplicate slot should be picked + // once it has more weight + let new_duplicate_hash = Hash::default(); + // The hash has to be smaller in order for the votes to be counted + assert!(new_duplicate_hash < duplicate_leaves_descended_from_4[0].1); + let new_duplicate = (duplicate_slot, new_duplicate_hash); + heaviest_subtree_fork_choice.add_new_leaf_slot(new_duplicate, Some((3, Hash::default()))); + + let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![ + (vote_pubkeys[0], new_duplicate), + (vote_pubkeys[1], new_duplicate), + ]; + + heaviest_subtree_fork_choice.add_votes( + pubkey_votes.iter(), + bank.epoch_stakes_map(), + bank.epoch_schedule(), + ); + + assert_eq!( + heaviest_subtree_fork_choice.best_overall_slot(), + new_duplicate + ); + } + fn setup_forks() -> HeaviestSubtreeForkChoice { /* Build fork structure: @@ -1581,9 +2899,75 @@ mod test { HeaviestSubtreeForkChoice::new_from_tree(forks) } + fn setup_duplicate_forks() -> ( + HeaviestSubtreeForkChoice, + Vec, + Vec, + ) { + /* + Build fork structure: + slot 0 + | + slot 1 + / \ + slot 2 | + | slot 3 + slot 4 \ + / \ slot 5 + slot 10 slot 10 / | \ + slot 6 slot 10 slot 10 + */ + + let mut heaviest_subtree_fork_choice = setup_forks(); + let duplicate_slot = 10; + let mut duplicate_leaves_descended_from_4 = + std::iter::repeat_with(|| (duplicate_slot, Hash::new_unique())) + .take(2) + .collect::>(); + let mut duplicate_leaves_descended_from_5 = + std::iter::repeat_with(|| (duplicate_slot, Hash::new_unique())) + .take(2) + .collect::>(); + duplicate_leaves_descended_from_4.sort(); + duplicate_leaves_descended_from_5.sort(); + + // Add versions of leaf 10, some with different ancestors, some with the same + // ancestors + for duplicate_leaf in &duplicate_leaves_descended_from_4 { + heaviest_subtree_fork_choice + .add_new_leaf_slot(*duplicate_leaf, Some((4, Hash::default()))); + } + for duplicate_leaf in &duplicate_leaves_descended_from_5 { + heaviest_subtree_fork_choice + .add_new_leaf_slot(*duplicate_leaf, Some((5, Hash::default()))); + } + + let mut dup_children = heaviest_subtree_fork_choice + .children(&(4, Hash::default())) + .unwrap() + .to_vec(); + dup_children.sort(); + assert_eq!(dup_children, duplicate_leaves_descended_from_4); + let mut dup_children: Vec<_> = heaviest_subtree_fork_choice + .children(&(5, Hash::default())) + .unwrap() + .iter() + .copied() + .filter(|(slot, _)| *slot == duplicate_slot) + .collect(); + dup_children.sort(); + assert_eq!(dup_children, duplicate_leaves_descended_from_5); + + ( + heaviest_subtree_fork_choice, + duplicate_leaves_descended_from_4, + duplicate_leaves_descended_from_5, + ) + } + fn check_process_update_correctness( heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, - pubkey_votes: &[(Pubkey, Slot)], + pubkey_votes: &[(Pubkey, SlotHashKey)], slots_range: Range, bank: &Bank, stake: u64, @@ -1591,22 +2975,24 @@ mod test { ) where F: FnMut(Slot, &HeaviestSubtreeForkChoice) -> Slot, { - let unique_votes: HashSet = pubkey_votes - .iter() - .map(|(_, vote_slot)| *vote_slot) - .collect(); - let vote_ancestors: HashMap> = unique_votes + let unique_votes: HashSet = pubkey_votes.iter().map(|(_, (slot, _))| *slot).collect(); + let vote_ancestors: HashMap> = unique_votes .iter() .map(|v| { ( *v, - heaviest_subtree_fork_choice.ancestor_iterator(*v).collect(), + heaviest_subtree_fork_choice + .ancestor_iterator((*v, Hash::default())) + .collect(), ) }) .collect(); let mut vote_count: HashMap = HashMap::new(); for (_, vote) in pubkey_votes { - vote_count.entry(*vote).and_modify(|c| *c += 1).or_insert(1); + vote_count + .entry(vote.0) + .and_modify(|c| *c += 1) + .or_insert(1); } // Maps a slot to the number of descendants of that slot @@ -1617,7 +3003,8 @@ mod test { let num_voted_descendants = vote_ancestors .iter() .map(|(vote_slot, ancestors)| { - (ancestors.contains(&slot) || *vote_slot == slot) as usize + (ancestors.contains(&(slot, Hash::default())) || *vote_slot == slot) + as usize * vote_count.get(vote_slot).unwrap() }) .sum(); @@ -1625,13 +3012,13 @@ mod test { }) .collect(); - let update_operations = heaviest_subtree_fork_choice.generate_update_operations( - &pubkey_votes, + let update_operations_batch = heaviest_subtree_fork_choice.generate_update_operations( + pubkey_votes.iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); - heaviest_subtree_fork_choice.process_update_operations(update_operations); + heaviest_subtree_fork_choice.process_update_operations(update_operations_batch); for slot in slots_range { let expected_stake_voted_at = vote_count.get(&slot).cloned().unwrap_or(0) as u64 * stake; @@ -1639,17 +3026,22 @@ mod test { *num_voted_descendants.get(&slot).unwrap() as u64 * stake; assert_eq!( expected_stake_voted_at, - heaviest_subtree_fork_choice.stake_voted_at(slot).unwrap() + heaviest_subtree_fork_choice + .stake_voted_at(&(slot, Hash::default())) + .unwrap() ); assert_eq!( expected_stake_voted_subtree, heaviest_subtree_fork_choice - .stake_voted_subtree(slot) + .stake_voted_subtree(&(slot, Hash::default())) .unwrap() ); assert_eq!( expected_best_slot(slot, heaviest_subtree_fork_choice), - heaviest_subtree_fork_choice.best_slot(slot).unwrap() + heaviest_subtree_fork_choice + .best_slot(&(slot, Hash::default())) + .unwrap() + .0 ); } } diff --git a/core/src/latest_validator_votes_for_frozen_banks.rs b/core/src/latest_validator_votes_for_frozen_banks.rs new file mode 100644 index 0000000000..3e498f1e02 --- /dev/null +++ b/core/src/latest_validator_votes_for_frozen_banks.rs @@ -0,0 +1,517 @@ +use crate::heaviest_subtree_fork_choice::SlotHashKey; +use solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey}; +use std::collections::{hash_map::Entry, HashMap}; + +#[derive(Default)] +pub(crate) struct LatestValidatorVotesForFrozenBanks { + // TODO: Clean outdated/unstaked pubkeys from this list. + max_gossip_frozen_votes: HashMap)>, + max_replay_frozen_votes: HashMap)>, + // Pubkeys that had their `max_frozen_votes` updated since the last + // fork choice update + fork_choice_dirty_set: HashMap)>, +} + +impl LatestValidatorVotesForFrozenBanks { + // `frozen_hash.is_some()` if the bank with slot == `vote_slot` is frozen + // Returns whether the vote was actually added, and the latest voted frozen slot + pub(crate) fn check_add_vote( + &mut self, + vote_pubkey: Pubkey, + vote_slot: Slot, + frozen_hash: Option, + is_replay_vote: bool, + ) -> (bool, Option) { + let vote_map = if is_replay_vote { + &mut self.max_replay_frozen_votes + } else { + &mut self.max_gossip_frozen_votes + }; + let pubkey_max_frozen_votes = vote_map.entry(vote_pubkey); + if let Some(frozen_hash) = frozen_hash { + match pubkey_max_frozen_votes { + Entry::Occupied(mut occupied_entry) => { + let (latest_frozen_vote_slot, latest_frozen_vote_hashes) = + occupied_entry.get_mut(); + if vote_slot > *latest_frozen_vote_slot { + if is_replay_vote { + // Only record votes detected through replaying blocks, + // because votes in gossip are not consistently observable + // if the validator is replacing them. + self.fork_choice_dirty_set + .insert(vote_pubkey, (vote_slot, vec![frozen_hash])); + } + *latest_frozen_vote_slot = vote_slot; + *latest_frozen_vote_hashes = vec![frozen_hash]; + return (true, Some(vote_slot)); + } else if vote_slot == *latest_frozen_vote_slot + && !latest_frozen_vote_hashes.contains(&frozen_hash) + { + if is_replay_vote { + // Only record votes detected through replaying blocks, + // because votes in gossip are not consistently observable + // if the validator is replacing them. + let (_, dirty_frozen_hashes) = + self.fork_choice_dirty_set.entry(vote_pubkey).or_default(); + assert!(!dirty_frozen_hashes.contains(&frozen_hash)); + dirty_frozen_hashes.push(frozen_hash); + } + latest_frozen_vote_hashes.push(frozen_hash); + return (true, Some(vote_slot)); + } else { + // We have newer votes for this validator, we don't care about this vote + return (false, Some(*latest_frozen_vote_slot)); + } + } + + Entry::Vacant(vacant_entry) => { + vacant_entry.insert((vote_slot, vec![frozen_hash])); + if is_replay_vote { + self.fork_choice_dirty_set + .insert(vote_pubkey, (vote_slot, vec![frozen_hash])); + } + return (true, Some(vote_slot)); + } + } + } + + // Non-frozen banks are not inserted because we only track frozen votes in this + // struct + ( + false, + match pubkey_max_frozen_votes { + Entry::Occupied(occupied_entry) => Some(occupied_entry.get().0), + Entry::Vacant(_) => None, + }, + ) + } + + pub(crate) fn take_votes_dirty_set(&mut self, root: Slot) -> Vec<(Pubkey, SlotHashKey)> { + let new_votes = std::mem::take(&mut self.fork_choice_dirty_set); + new_votes + .into_iter() + .filter(|(_, (slot, _))| *slot >= root) + .flat_map(|(pk, (slot, hashes))| { + hashes + .into_iter() + .map(|hash| (pk, (slot, hash))) + .collect::>() + }) + .collect() + } + + #[cfg(test)] + fn latest_vote(&self, pubkey: &Pubkey, is_replay_vote: bool) -> Option<&(Slot, Vec)> { + let vote_map = if is_replay_vote { + &self.max_replay_frozen_votes + } else { + &self.max_gossip_frozen_votes + }; + vote_map.get(pubkey) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn run_test_latest_validator_votes_for_frozen_banks_check_add_vote(is_replay_vote: bool) { + let mut latest_validator_votes_for_frozen_banks = + LatestValidatorVotesForFrozenBanks::default(); + + // Case 1: Non-frozen banks shouldn't be added + let vote_pubkey = Pubkey::new_unique(); + let mut vote_slot = 1; + let frozen_hash = None; + assert_eq!( + latest_validator_votes_for_frozen_banks.check_add_vote( + vote_pubkey, + vote_slot, + frozen_hash, + is_replay_vote, + ), + // Non-frozen bank isn't inserted, so should return None for + // the highest voted frozen slot + (false, None) + ); + assert!(latest_validator_votes_for_frozen_banks + .max_replay_frozen_votes + .is_empty()); + assert!(latest_validator_votes_for_frozen_banks + .max_gossip_frozen_votes + .is_empty()); + assert!(latest_validator_votes_for_frozen_banks + .fork_choice_dirty_set + .is_empty()); + + // Case 2: Frozen vote should be added, but the same vote added again + // shouldn't update state + let num_repeated_iterations = 3; + let frozen_hash = Some(Hash::new_unique()); + for i in 0..num_repeated_iterations { + let expected_result = if i == 0 { + (true, Some(vote_slot)) + } else { + (false, Some(vote_slot)) + }; + assert_eq!( + latest_validator_votes_for_frozen_banks.check_add_vote( + vote_pubkey, + vote_slot, + frozen_hash, + is_replay_vote, + ), + expected_result + ); + assert_eq!( + *latest_validator_votes_for_frozen_banks + .latest_vote(&vote_pubkey, is_replay_vote) + .unwrap(), + (vote_slot, vec![frozen_hash.unwrap()]) + ); + if is_replay_vote { + assert_eq!( + *latest_validator_votes_for_frozen_banks + .fork_choice_dirty_set + .get(&vote_pubkey) + .unwrap(), + (vote_slot, vec![frozen_hash.unwrap()]) + ); + } else { + assert!(latest_validator_votes_for_frozen_banks + .fork_choice_dirty_set + .get(&vote_pubkey) + .is_none()); + } + } + + // Case 3: Adding duplicate vote for same slot should update the state + let duplicate_frozen_hash = Some(Hash::new_unique()); + let all_frozen_hashes = vec![frozen_hash.unwrap(), duplicate_frozen_hash.unwrap()]; + assert_eq!( + latest_validator_votes_for_frozen_banks.check_add_vote( + vote_pubkey, + vote_slot, + duplicate_frozen_hash, + is_replay_vote, + ), + (true, Some(vote_slot)) + ); + assert_eq!( + *latest_validator_votes_for_frozen_banks + .latest_vote(&vote_pubkey, is_replay_vote) + .unwrap(), + (vote_slot, all_frozen_hashes.clone()) + ); + if is_replay_vote { + assert_eq!( + *latest_validator_votes_for_frozen_banks + .fork_choice_dirty_set + .get(&vote_pubkey) + .unwrap(), + (vote_slot, all_frozen_hashes.clone()) + ); + } else { + assert!(latest_validator_votes_for_frozen_banks + .fork_choice_dirty_set + .get(&vote_pubkey) + .is_none()); + } + + // Case 4: Adding duplicate vote that is not frozen should not update the state + let frozen_hash = None; + assert_eq!( + latest_validator_votes_for_frozen_banks.check_add_vote( + vote_pubkey, + vote_slot, + frozen_hash, + is_replay_vote, + ), + (false, Some(vote_slot)) + ); + assert_eq!( + *latest_validator_votes_for_frozen_banks + .latest_vote(&vote_pubkey, is_replay_vote) + .unwrap(), + (vote_slot, all_frozen_hashes.clone()) + ); + if is_replay_vote { + assert_eq!( + *latest_validator_votes_for_frozen_banks + .fork_choice_dirty_set + .get(&vote_pubkey) + .unwrap(), + (vote_slot, all_frozen_hashes.clone()) + ); + } else { + assert!(latest_validator_votes_for_frozen_banks + .fork_choice_dirty_set + .get(&vote_pubkey) + .is_none()); + } + + // Case 5: Adding a vote for a new higher slot that is not yet frozen + // should not update the state + let frozen_hash = None; + let old_vote_slot = vote_slot; + vote_slot += 1; + assert_eq!( + latest_validator_votes_for_frozen_banks.check_add_vote( + vote_pubkey, + vote_slot, + frozen_hash, + is_replay_vote, + ), + (false, Some(old_vote_slot)) + ); + assert_eq!( + *latest_validator_votes_for_frozen_banks + .latest_vote(&vote_pubkey, is_replay_vote) + .unwrap(), + (old_vote_slot, all_frozen_hashes.clone()) + ); + if is_replay_vote { + assert_eq!( + *latest_validator_votes_for_frozen_banks + .fork_choice_dirty_set + .get(&vote_pubkey) + .unwrap(), + (old_vote_slot, all_frozen_hashes) + ); + } else { + assert!(latest_validator_votes_for_frozen_banks + .fork_choice_dirty_set + .get(&vote_pubkey) + .is_none()); + } + + // Case 6: Adding a vote for a new higher slot that *is* frozen + // should upate the state + let frozen_hash = Some(Hash::new_unique()); + assert_eq!( + latest_validator_votes_for_frozen_banks.check_add_vote( + vote_pubkey, + vote_slot, + frozen_hash, + is_replay_vote, + ), + (true, Some(vote_slot)) + ); + assert_eq!( + *latest_validator_votes_for_frozen_banks + .latest_vote(&vote_pubkey, is_replay_vote) + .unwrap(), + (vote_slot, vec![frozen_hash.unwrap()]) + ); + if is_replay_vote { + assert_eq!( + *latest_validator_votes_for_frozen_banks + .fork_choice_dirty_set + .get(&vote_pubkey) + .unwrap(), + (vote_slot, vec![frozen_hash.unwrap()]) + ); + } else { + assert!(latest_validator_votes_for_frozen_banks + .fork_choice_dirty_set + .get(&vote_pubkey) + .is_none()); + } + + // Case 7: Adding a vote for a new pubkey should also update the state + vote_slot += 1; + let frozen_hash = Some(Hash::new_unique()); + let vote_pubkey = Pubkey::new_unique(); + assert_eq!( + latest_validator_votes_for_frozen_banks.check_add_vote( + vote_pubkey, + vote_slot, + frozen_hash, + is_replay_vote, + ), + (true, Some(vote_slot)) + ); + assert_eq!( + *latest_validator_votes_for_frozen_banks + .latest_vote(&vote_pubkey, is_replay_vote) + .unwrap(), + (vote_slot, vec![frozen_hash.unwrap()]) + ); + if is_replay_vote { + assert_eq!( + *latest_validator_votes_for_frozen_banks + .fork_choice_dirty_set + .get(&vote_pubkey) + .unwrap(), + (vote_slot, vec![frozen_hash.unwrap()]) + ); + } else { + assert!(latest_validator_votes_for_frozen_banks + .fork_choice_dirty_set + .get(&vote_pubkey) + .is_none()); + } + } + + #[test] + fn test_latest_validator_votes_for_frozen_banks_check_add_vote_is_replay() { + run_test_latest_validator_votes_for_frozen_banks_check_add_vote(true) + } + + #[test] + fn test_latest_validator_votes_for_frozen_banks_check_add_vote_is_not_replay() { + run_test_latest_validator_votes_for_frozen_banks_check_add_vote(false) + } + + fn run_test_latest_validator_votes_for_frozen_banks_take_votes_dirty_set(is_replay: bool) { + let mut latest_validator_votes_for_frozen_banks = + LatestValidatorVotesForFrozenBanks::default(); + let num_validators = 10; + + let setup_dirty_set = + |latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks| { + (0..num_validators) + .flat_map(|vote_slot| { + let vote_pubkey = Pubkey::new_unique(); + let frozen_hash1 = Hash::new_unique(); + assert_eq!( + latest_validator_votes_for_frozen_banks.check_add_vote( + vote_pubkey, + vote_slot, + Some(frozen_hash1), + is_replay + ), + // This vote slot was frozen, and is the highest slot inserted thus far, + // so the highest vote should be Some(vote_slot) + (true, Some(vote_slot)) + ); + // Add a duplicate + let frozen_hash2 = Hash::new_unique(); + assert_eq!( + latest_validator_votes_for_frozen_banks.check_add_vote( + vote_pubkey, + vote_slot, + Some(frozen_hash2), + is_replay + ), + // This vote slot was frozen, and is for a duplicate version of the highest slot + // inserted thus far, so the highest vote should be Some(vote_slot). + (true, Some(vote_slot)) + ); + if is_replay { + // Only replayed vote should modify the dirty set, which is used for fork fork choice. + vec![ + (vote_pubkey, (vote_slot, frozen_hash1)), + (vote_pubkey, (vote_slot, frozen_hash2)), + ] + } else { + vec![] + } + }) + .collect() + }; + + // Taking all the dirty votes >= 0 will return everything + let root = 0; + let mut expected_dirty_set: Vec<(Pubkey, SlotHashKey)> = + setup_dirty_set(&mut latest_validator_votes_for_frozen_banks); + let mut votes_dirty_set_output = + latest_validator_votes_for_frozen_banks.take_votes_dirty_set(root); + votes_dirty_set_output.sort(); + expected_dirty_set.sort(); + assert_eq!(votes_dirty_set_output, expected_dirty_set); + assert!(latest_validator_votes_for_frozen_banks + .take_votes_dirty_set(0) + .is_empty()); + + // Taking all the dirty votes >= num_validators - 1 will only return the last vote + let root = num_validators - 1; + let dirty_set = setup_dirty_set(&mut latest_validator_votes_for_frozen_banks); + let mut expected_dirty_set: Vec<(Pubkey, SlotHashKey)> = + // dirty_set could be empty if `is_replay == false`, so use saturating_sub + dirty_set[dirty_set.len().saturating_sub(2)..dirty_set.len()].to_vec(); + let mut votes_dirty_set_output = + latest_validator_votes_for_frozen_banks.take_votes_dirty_set(root); + votes_dirty_set_output.sort(); + expected_dirty_set.sort(); + assert_eq!(votes_dirty_set_output, expected_dirty_set); + assert!(latest_validator_votes_for_frozen_banks + .take_votes_dirty_set(0) + .is_empty()); + } + + #[test] + fn test_latest_validator_votes_for_frozen_banks_take_votes_dirty_set_is_replay() { + run_test_latest_validator_votes_for_frozen_banks_take_votes_dirty_set(true) + } + + #[test] + fn test_latest_validator_votes_for_frozen_banks_take_votes_dirty_set_is_not_replay() { + run_test_latest_validator_votes_for_frozen_banks_take_votes_dirty_set(false) + } + + #[test] + fn test_latest_validator_votes_for_frozen_banks_add_replay_and_gossip_vote() { + let mut latest_validator_votes_for_frozen_banks = + LatestValidatorVotesForFrozenBanks::default(); + + // First simulate vote from gossip + let vote_pubkey = Pubkey::new_unique(); + let vote_slot = 1; + let frozen_hash = Hash::new_unique(); + let mut is_replay_vote = false; + assert_eq!( + latest_validator_votes_for_frozen_banks.check_add_vote( + vote_pubkey, + vote_slot, + Some(frozen_hash), + is_replay_vote, + ), + (true, Some(vote_slot)) + ); + + // Should find the vote in the gossip votes + assert_eq!( + *latest_validator_votes_for_frozen_banks + .latest_vote(&vote_pubkey, is_replay_vote) + .unwrap(), + (vote_slot, vec![frozen_hash]) + ); + // Shouldn't find the vote in the replayed votes + assert!(latest_validator_votes_for_frozen_banks + .latest_vote(&vote_pubkey, !is_replay_vote) + .is_none()); + assert!(latest_validator_votes_for_frozen_banks + .take_votes_dirty_set(0) + .is_empty()); + + // Next simulate vote from replay + is_replay_vote = true; + assert_eq!( + latest_validator_votes_for_frozen_banks.check_add_vote( + vote_pubkey, + vote_slot, + Some(frozen_hash), + is_replay_vote, + ), + (true, Some(vote_slot)) + ); + // Should find the vote in the gossip and replay votes + assert_eq!( + *latest_validator_votes_for_frozen_banks + .latest_vote(&vote_pubkey, is_replay_vote) + .unwrap(), + (vote_slot, vec![frozen_hash]) + ); + assert_eq!( + *latest_validator_votes_for_frozen_banks + .latest_vote(&vote_pubkey, !is_replay_vote) + .unwrap(), + (vote_slot, vec![frozen_hash]) + ); + assert_eq!( + latest_validator_votes_for_frozen_banks.take_votes_dirty_set(0), + vec![(vote_pubkey, (vote_slot, frozen_hash))] + ); + } +} diff --git a/core/src/ledger_cleanup_service.rs b/core/src/ledger_cleanup_service.rs index b149dda0a8..8b1a1fc203 100644 --- a/core/src/ledger_cleanup_service.rs +++ b/core/src/ledger_cleanup_service.rs @@ -48,10 +48,6 @@ impl LedgerCleanupService { compaction_interval: Option, max_compaction_jitter: Option, ) -> Self { - info!( - "LedgerCleanupService active. Max Ledger Slots {}", - max_ledger_shreds - ); let exit = exit.clone(); let mut last_purge_slot = 0; let mut last_compaction_slot = 0; @@ -60,6 +56,11 @@ impl LedgerCleanupService { let last_compact_slot = Arc::new(AtomicU64::new(0)); let last_compact_slot2 = last_compact_slot.clone(); + info!( + "LedgerCleanupService active. max ledger shreds={}, compaction interval={}", + max_ledger_shreds, compaction_interval, + ); + let exit_compact = exit.clone(); let blockstore_compact = blockstore.clone(); @@ -206,11 +207,25 @@ impl LedgerCleanupService { ); let mut purge_time = Measure::start("purge_slots"); + blockstore.purge_slots( purge_first_slot, lowest_cleanup_slot, - PurgeType::PrimaryIndex, + PurgeType::CompactionFilter, ); + // Update only after purge operation. + // Safety: This value can be used by compaction_filters shared via Arc. + // Compactions are async and run as a multi-threaded background job. However, this + // shouldn't cause consistency issues for iterators and getters because we have + // already expired all affected keys (older than or equal to lowest_cleanup_slot) + // by the above `purge_slots`. According to the general RocksDB design where SST + // files are immutable, even running iterators aren't affected; the database grabs + // a snapshot of the live set of sst files at iterator's creation. + // Also, we passed the PurgeType::CompactionFilter, meaning no delete_range for + // transaction_status and address_signatures CFs. These are fine because they + // don't require strong consistent view for their operation. + blockstore.set_max_expired_slot(lowest_cleanup_slot); + purge_time.stop(); info!("{}", purge_time); diff --git a/core/src/lib.rs b/core/src/lib.rs index c7b2ffa89c..cdf542c17b 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -11,7 +11,7 @@ pub mod accounts_hash_verifier; pub mod banking_stage; pub mod bigtable_upload_service; pub mod broadcast_stage; -pub mod cache_block_time_service; +pub mod cache_block_meta_service; pub mod cluster_info_vote_listener; pub mod commitment_service; pub mod completed_data_sets_service; @@ -22,6 +22,8 @@ pub mod shred_fetch_stage; #[macro_use] pub mod contact_info; pub mod cluster_info; +mod cluster_info_metrics; +pub mod cluster_slot_state_verifier; pub mod cluster_slots; pub mod cluster_slots_service; pub mod consensus; @@ -42,6 +44,7 @@ pub mod fork_choice; pub mod gen_keys; pub mod gossip_service; pub mod heaviest_subtree_fork_choice; +pub mod latest_validator_votes_for_frozen_banks; pub mod ledger_cleanup_service; pub mod non_circulating_supply; pub mod optimistic_confirmation_verifier; @@ -78,6 +81,7 @@ pub mod tpu; pub mod transaction_status_service; pub mod tree_diff; pub mod tvu; +pub mod unfrozen_gossip_verified_vote_hashes; pub mod validator; pub mod verified_vote_packets; pub mod vote_stake_tracker; diff --git a/core/src/non_circulating_supply.rs b/core/src/non_circulating_supply.rs index 739a5af810..ae29ab8f0f 100644 --- a/core/src/non_circulating_supply.rs +++ b/core/src/non_circulating_supply.rs @@ -40,7 +40,7 @@ pub fn calculate_non_circulating_supply(bank: &Arc) -> NonCirculatingSuppl bank.get_program_accounts(&solana_stake_program::id()) }; for (pubkey, account) in stake_accounts.iter() { - let stake_account = StakeState::from(&account).unwrap_or_default(); + let stake_account = StakeState::from(account).unwrap_or_default(); match stake_account { StakeState::Initialized(meta) => { if meta.lockup.is_in_force(&clock, None) @@ -119,6 +119,52 @@ solana_sdk::pubkeys!( "CUageMFi49kzoDqtdU8NvQ4Bq3sbtJygjKDAXJ45nmAi", "5smrYwb1Hr2T8XMnvsqccTgXxuqQs14iuE8RbHFYf2Cf", "xQadXQiUTCCFhfHjvQx1hyJK6KVWr1w2fD6DT3cdwj7", + "8DE8fqPfv1fp9DHyGyDFFaMjpopMgDeXspzoi9jpBJjC", + "3itU5ME8L6FDqtMiRoUiT1F7PwbkTtHBbW51YWD5jtjm", + "AsrYX4FeLXnZcrjcZmrASY2Eq1jvEeQfwxtNTxS5zojA", + "8rT45mqpuDBR1vcnDc9kwP9DrZAXDR4ZeuKWw3u1gTGa", + "nGME7HgBT6tAJN1f6YuCCngpqT5cvSTndZUVLjQ4jwA", + "CzAHrrrHKx9Lxf6wdCMrsZkLvk74c7J2vGv8VYPUmY6v", + "AzHQ8Bia1grVVbcGyci7wzueSWkgvu7YZVZ4B9rkL5P6", + "FiWYY85b58zEEcPtxe3PuqzWPjqBJXqdwgZeqSBmT9Cn", + "GpxpMVhrBBBEYbEJxdR62w3daWz444V7m6dxYDZKH77D", + "3bTGcGB9F98XxnrBNftmmm48JGfPgi5sYxDEKiCjQYk3", + "8pNBEppa1VcFAsx4Hzq9CpdXUXZjUXbvQwLX2K7QsCwb", + "HKJgYGTTYYR2ZkfJKHbn58w676fKueQXmvbtpyvrSM3N", + "3jnknRabs7G2V9dKhxd2KP85pNWXKXiedYnYxtySnQMs", + "4sxwau4mdqZ8zEJsfryXq4QFYnMJSCp3HWuZQod8WU5k", + "Fg12tB1tz8w6zJSQ4ZAGotWoCztdMJF9hqK8R11pakog", + "GEWSkfWgHkpiLbeKaAnwvqnECGdRNf49at5nFccVey7c", + "CND6ZjRTzaCFVdX7pSSWgjTfHZuhxqFDoUBqWBJguNoA", + "2WWb1gRzuXDd5viZLQF7pNRR6Y7UiyeaPpaL35X6j3ve", + "BUnRE27mYXN9p8H1Ay24GXhJC88q2CuwLoNU2v2CrW4W", + "CsUqV42gVQLJwQsKyjWHqGkfHarxn9hcY4YeSjgaaeTd", + "5khMKAcvmsFaAhoKkdg3u5abvKsmjUQNmhTNP624WB1F", + "GpYnVDgB7dzvwSgsjQFeHznjG6Kt1DLBFYrKxjGU1LuD", + "DQQGPtj7pphPHCLzzBuEyDDQByUcKGrsJdsH7SP3hAug", + "FwfaykN7ACnsEUDHANzGHqTGQZMcGnUSsahAHUqbdPrz", + "JCwT5Ygmq3VeBEbDjL8s8E82Ra2rP9bq45QfZE7Xyaq7", + "H3Ni7vG1CsmJZdTvxF7RkAf9UM5qk4RsohJsmPvtZNnu", + "CVgyXrbEd1ctEuvq11QdpnCQVnPit8NLdhyqXQHLprM2", + "EAJJD6nDqtXcZ4DnQb19F9XEz8y8bRDHxbWbahatZNbL", + "6o5v1HC7WhBnLfRHp8mQTtCP2khdXXjhuyGyYEoy2Suy", + "3ZrsTmNM6AkMcqFfv3ryfhQ2jMfqP64RQbqVyAaxqhrQ", + "6zw7em7uQdmMpuS9fGz8Nq9TLHa5YQhEKKwPjo5PwDK4", + "CuatS6njAcfkFHnvai7zXCs7syA9bykXWsDCJEWfhjHG", + "Hz9nydgN1k15wnwffKX7CSmZp4VFTnTwLXAEdomFGNXy", + "Ep5Y58PaSyALPrdFxDVAdfKtVdP55vApvsWjb3jSmXsG", + "EziVYi3Sv5kJWxmU77PnbrT8jmkVuqwdiFLLzZpLVEn7", + "H1rt8KvXkNhQExTRfkY8r9wjZbZ8yCih6J4wQ5Fz9HGP", + "6nN69B4uZuESZYxr9nrLDjmKRtjDZQXrehwkfQTKw62U", + "Hm9JW7of5i9dnrboS8pCUCSeoQUPh7JsP1rkbJnW7An4", + "5D5NxsNVTgXHyVziwV7mDFwVDS6voaBsyyGxUbhQrhNW", + "EMAY24PrS6rWfvpqffFCsTsFJypeeYYmtUc26wdh3Wup", + "Br3aeVGapRb2xTq17RU2pYZCoJpWA7bq6TKBCcYtMSmt", + "BUjkdqUuH5Lz9XzcMcR4DdEMnFG6r8QzUMBm16Rfau96", + "Es13uD2p64UVPFpEWfDtd6SERdoNR2XVgqBQBZcZSLqW", + "AVYpwVou2BhdLivAwLxKPALZQsY7aZNkNmGbP2fZw7RU", + "DrKzW5koKSZp4mg4BdHLwr72MMXscd2kTiWgckCvvPXz", + "9hknftBZAQL4f48tWfk3bUEV5YSLcYYtDRqNmpNnhCWG", ] ); @@ -130,6 +176,11 @@ solana_sdk::pubkeys!( "3FFaheyqtyAXZSYxDzsr5CVKvJuvZD1WE1VEsBtDbRqB", "FdGYQdiRky8NZzN9wZtczTBcWLYYRXrJ3LMDhqDPn5rM", "4e6KwQpyzGQPfgVr5Jn3g5jLjbXB4pKPa2jRLohEb1QA", + "FjiEiVKyMGzSLpqoB27QypukUfyWHrwzPcGNtopzZVdh", + "DwbVjia1mYeSGoJipzhaf4L5hfer2DJ1Ys681VzQm5YY", + "GeMGyvsTEsANVvcT5cme65Xq5MVU8fVVzMQ13KAZFNS2", + "Bj3aQ2oFnZYfNR1njzRjmWizzuhvfcYLckh76cqsbuBM", + "4ZJhPQAgUseCsWhKvJLTmmRRUV74fdoTpQLNfKoekbPY", ] ); @@ -138,6 +189,7 @@ mod tests { use super::*; use solana_sdk::{ account::Account, + account::AccountSharedData, epoch_schedule::EpochSchedule, genesis_config::{ClusterType, GenesisConfig}, }; @@ -214,7 +266,10 @@ mod tests { bank = Arc::new(new_from_parent(&bank)); let new_balance = 11; for key in non_circulating_accounts { - bank.store_account(&key, &Account::new(new_balance, 0, &Pubkey::default())); + bank.store_account( + &key, + &AccountSharedData::new(new_balance, 0, &Pubkey::default()), + ); } let non_circulating_supply = calculate_non_circulating_supply(&bank); assert_eq!( diff --git a/core/src/ping_pong.rs b/core/src/ping_pong.rs index c1ad136a0d..254ced3e67 100644 --- a/core/src/ping_pong.rs +++ b/core/src/ping_pong.rs @@ -243,6 +243,11 @@ impl PingCache { } clone } + + /// Only for tests and simulations. + pub fn mock_pong(&mut self, node: Pubkey, socket: SocketAddr, now: Instant) { + self.pongs.put((node, socket), now); + } } #[cfg(test)] diff --git a/core/src/poh_recorder.rs b/core/src/poh_recorder.rs index abd140b9f5..b6fdac5fe0 100644 --- a/core/src/poh_recorder.rs +++ b/core/src/poh_recorder.rs @@ -10,6 +10,10 @@ //! For Entries: //! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::max_tick_height //! +use crate::poh_service::PohService; +use crossbeam_channel::{ + unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Sender as CrossbeamSender, +}; use solana_ledger::blockstore::Blockstore; use solana_ledger::entry::Entry; use solana_ledger::leader_schedule_cache::LeaderScheduleCache; @@ -23,9 +27,12 @@ use solana_sdk::pubkey::Pubkey; use solana_sdk::timing; use solana_sdk::transaction::Transaction; use std::cmp; -use std::sync::mpsc::{channel, Receiver, SendError, Sender, SyncSender}; -use std::sync::{Arc, Mutex}; -use std::time::Instant; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + mpsc::{channel, Receiver, SendError, Sender, SyncSender}, + {Arc, Mutex}, +}; +use std::time::{Duration, Instant}; use thiserror::Error; pub const GRACE_TICKS_FACTOR: u64 = 2; @@ -51,6 +58,90 @@ type Result = std::result::Result; pub type WorkingBankEntry = (Arc, (Entry, u64)); pub type BankStart = (Arc, Arc); +pub struct Record { + pub mixin: Hash, + pub transactions: Vec, + pub slot: Slot, + pub sender: CrossbeamSender>, +} +impl Record { + pub fn new( + mixin: Hash, + transactions: Vec, + slot: Slot, + sender: CrossbeamSender>, + ) -> Self { + Self { + mixin, + transactions, + slot, + sender, + } + } +} + +pub struct TransactionRecorder { + // shared by all users of PohRecorder + pub record_sender: CrossbeamSender, + pub is_exited: Arc, +} + +impl Clone for TransactionRecorder { + fn clone(&self) -> Self { + TransactionRecorder::new(self.record_sender.clone(), self.is_exited.clone()) + } +} + +impl TransactionRecorder { + pub fn new(record_sender: CrossbeamSender, is_exited: Arc) -> Self { + Self { + // shared + record_sender, + // shared + is_exited, + } + } + pub fn record( + &self, + bank_slot: Slot, + mixin: Hash, + transactions: Vec, + ) -> Result<()> { + // create a new channel so that there is only 1 sender and when it goes out of scope, the receiver fails + let (result_sender, result_receiver) = unbounded(); + let res = + self.record_sender + .send(Record::new(mixin, transactions, bank_slot, result_sender)); + if res.is_err() { + // If the channel is dropped, then the validator is shutting down so return that we are hitting + // the max tick height to stop transaction processing and flush any transactions in the pipeline. + return Err(PohRecorderError::MaxHeightReached); + } + // Besides validator exit, this timeout should primarily be seen to affect test execution environments where the various pieces can be shutdown abruptly + let mut is_exited = false; + loop { + let res = result_receiver.recv_timeout(Duration::from_millis(1000)); + match res { + Err(RecvTimeoutError::Timeout) => { + if is_exited { + return Err(PohRecorderError::MaxHeightReached); + } else { + // A result may have come in between when we timed out checking this + // bool, so check the channel again, even if is_exited == true + is_exited = self.is_exited.load(Ordering::SeqCst); + } + } + Err(RecvTimeoutError::Disconnected) => { + return Err(PohRecorderError::MaxHeightReached); + } + Ok(result) => { + return result; + } + } + } + } +} + #[derive(Clone)] pub struct WorkingBank { pub bank: Arc, @@ -76,10 +167,20 @@ pub struct PohRecorder { leader_schedule_cache: Arc, poh_config: Arc, ticks_per_slot: u64, + target_ns_per_tick: u64, record_lock_contention_us: u64, + flush_cache_no_tick_us: u64, + flush_cache_tick_us: u64, + prepare_send_us: u64, + send_us: u64, tick_lock_contention_us: u64, tick_overhead_us: u64, + total_sleep_us: u64, record_us: u64, + ticks_from_record: u64, + last_metric: Instant, + record_sender: CrossbeamSender, + pub is_exited: Arc, } impl PohRecorder { @@ -156,6 +257,10 @@ impl PohRecorder { self.ticks_per_slot } + pub fn recorder(&self) -> TransactionRecorder { + TransactionRecorder::new(self.record_sender.clone(), self.is_exited.clone()) + } + fn is_same_fork_as_previous_leader(&self, slot: Slot) -> bool { (slot.saturating_sub(NUM_CONSECUTIVE_LEADER_SLOTS)..slot).any(|slot| { // Check if the last slot Poh reset to was any of the @@ -190,6 +295,10 @@ impl PohRecorder { || !self.is_same_fork_as_previous_leader(current_slot))) } + pub fn last_reset_slot(&self) -> Slot { + self.start_slot + } + /// returns if leader slot has been reached, how many grace ticks were afforded, /// imputed leader_slot and self.start_slot /// reached_leader_slot() == true means "ready for a bank" @@ -262,14 +371,15 @@ impl PohRecorder { ) { self.clear_bank(); let mut cache = vec![]; - { + let poh_hash = { let mut poh = self.poh.lock().unwrap(); - info!( - "reset poh from: {},{},{} to: {},{}", - poh.hash, self.tick_height, self.start_slot, blockhash, start_slot - ); poh.reset(blockhash, self.poh_config.hashes_per_tick); - } + poh.hash + }; + info!( + "reset poh from: {},{},{} to: {},{}", + poh_hash, self.tick_height, self.start_slot, blockhash, start_slot + ); std::mem::swap(&mut cache, &mut self.tick_cache); @@ -371,7 +481,17 @@ impl PohRecorder { pub fn tick(&mut self) { let now = Instant::now(); - let poh_entry = self.poh.lock().unwrap().tick(); + let (poh_entry, target_time) = { + let mut poh_l = self.poh.lock().unwrap(); + let poh_entry = poh_l.tick(); + let target_time = if poh_entry.is_some() { + Some(poh_l.target_poh_time(self.target_ns_per_tick)) + } else { + None + }; + + (poh_entry, target_time) + }; self.tick_lock_contention_us += timing::duration_as_us(&now.elapsed()); let now = Instant::now(); if let Some(poh_entry) = poh_entry { @@ -388,31 +508,57 @@ impl PohRecorder { hash: poh_entry.hash, transactions: vec![], }; + self.tick_overhead_us += timing::duration_as_us(&now.elapsed()); + let now = Instant::now(); self.tick_cache.push((entry, self.tick_height)); let _ = self.flush_cache(true); + self.flush_cache_tick_us += timing::duration_as_us(&now.elapsed()); + let target_time = target_time.unwrap(); + // sleep is not accurate enough to get a predictable time. + // Kernel can not schedule the thread for a while. + let started_waiting = Instant::now(); + while Instant::now() < target_time { + // TODO: a caller could possibly desire to reset or record while we're spinning here + std::hint::spin_loop(); + } + self.total_sleep_us += started_waiting.elapsed().as_nanos() as u64 / 1000; } - self.tick_overhead_us += timing::duration_as_us(&now.elapsed()); } fn report_metrics(&mut self, bank_slot: Slot) { - datapoint_info!( - "poh_recorder", - ("slot", bank_slot, i64), - ("tick_lock_contention", self.tick_lock_contention_us, i64), - ("record_us", self.record_us, i64), - ("tick_overhead", self.tick_overhead_us, i64), - ( - "record_lock_contention", - self.record_lock_contention_us, - i64 - ), - ); + if self.last_metric.elapsed().as_millis() > 1000 { + datapoint_info!( + "poh_recorder", + ("slot", bank_slot, i64), + ("tick_lock_contention", self.tick_lock_contention_us, i64), + ("record_us", self.record_us, i64), + ("flush_cache_no_tick_us", self.flush_cache_no_tick_us, i64), + ("flush_cache_tick_us", self.flush_cache_tick_us, i64), + ("prepare_send_us", self.prepare_send_us, i64), + ("send_us", self.send_us, i64), + ("ticks_from_record", self.ticks_from_record, i64), + ("total_sleep_us", self.total_sleep_us, i64), + ("tick_overhead", self.tick_overhead_us, i64), + ( + "record_lock_contention", + self.record_lock_contention_us, + i64 + ), + ); - self.tick_lock_contention_us = 0; - self.record_us = 0; - self.tick_overhead_us = 0; - self.record_lock_contention_us = 0; + self.tick_lock_contention_us = 0; + self.record_us = 0; + self.tick_overhead_us = 0; + self.total_sleep_us = 0; + self.record_lock_contention_us = 0; + self.flush_cache_no_tick_us = 0; + self.flush_cache_tick_us = 0; + self.prepare_send_us = 0; + self.send_us = 0; + self.ticks_from_record = 0; + self.last_metric = Instant::now(); + } } pub fn record( @@ -424,15 +570,17 @@ impl PohRecorder { // Entries without transactions are used to track real-time passing in the ledger and // cannot be generated by `record()` assert!(!transactions.is_empty(), "No transactions provided"); + self.report_metrics(bank_slot); loop { + let now = Instant::now(); self.flush_cache(false)?; + self.flush_cache_no_tick_us += timing::duration_as_us(&now.elapsed()); let working_bank = self .working_bank .as_ref() .ok_or(PohRecorderError::MaxHeightReached)?; if bank_slot != working_bank.bank.slot() { - self.report_metrics(bank_slot); return Err(PohRecorderError::MaxHeightReached); } @@ -443,20 +591,26 @@ impl PohRecorder { self.record_lock_contention_us += timing::duration_as_us(&now.elapsed()); let now = Instant::now(); let res = poh_lock.record(mixin); + drop(poh_lock); self.record_us += timing::duration_as_us(&now.elapsed()); + let now = Instant::now(); if let Some(poh_entry) = res { let entry = Entry { num_hashes: poh_entry.num_hashes, hash: poh_entry.hash, transactions, }; - self.sender - .send((working_bank.bank.clone(), (entry, self.tick_height)))?; + let bank_clone = working_bank.bank.clone(); + self.prepare_send_us += timing::duration_as_us(&now.elapsed()); + let now = Instant::now(); + self.sender.send((bank_clone, (entry, self.tick_height)))?; + self.send_us += timing::duration_as_us(&now.elapsed()); return Ok(()); } } // record() might fail if the next PoH hash needs to be a tick. But that's ok, tick() // and re-record() + self.ticks_from_record += 1; self.tick(); } } @@ -473,12 +627,22 @@ impl PohRecorder { clear_bank_signal: Option>, leader_schedule_cache: &Arc, poh_config: &Arc, - ) -> (Self, Receiver) { - let poh = Arc::new(Mutex::new(Poh::new( + is_exited: Arc, + ) -> (Self, Receiver, CrossbeamReceiver) { + let tick_number = 0; + let poh = Arc::new(Mutex::new(Poh::new_with_slot_info( last_entry_hash, poh_config.hashes_per_tick, + ticks_per_slot, + tick_number, ))); + + let target_ns_per_tick = PohService::target_ns_per_tick( + ticks_per_slot, + poh_config.target_tick_duration.as_nanos() as u64, + ); let (sender, receiver) = channel(); + let (record_sender, record_receiver) = unbounded(); let (leader_first_tick_height, leader_last_tick_height, grace_ticks) = Self::compute_leader_slot_tick_heights(next_leader_slot, ticks_per_slot); ( @@ -498,19 +662,31 @@ impl PohRecorder { blockstore: blockstore.clone(), leader_schedule_cache: leader_schedule_cache.clone(), ticks_per_slot, + target_ns_per_tick, poh_config: poh_config.clone(), record_lock_contention_us: 0, + flush_cache_tick_us: 0, + flush_cache_no_tick_us: 0, + prepare_send_us: 0, + send_us: 0, tick_lock_contention_us: 0, record_us: 0, tick_overhead_us: 0, + total_sleep_us: 0, + ticks_from_record: 0, + last_metric: Instant::now(), + record_sender, + is_exited, }, receiver, + record_receiver, ) } /// A recorder to synchronize PoH with the following data structures /// * bank - the LastId's queue is updated on `tick` and `record` events /// * sender - the Entry channel that outputs to the ledger + #[allow(clippy::too_many_arguments)] pub fn new( tick_height: u64, last_entry_hash: Hash, @@ -521,7 +697,8 @@ impl PohRecorder { blockstore: &Arc, leader_schedule_cache: &Arc, poh_config: &Arc, - ) -> (Self, Receiver) { + is_exited: Arc, + ) -> (Self, Receiver, CrossbeamReceiver) { Self::new_with_clear_signal( tick_height, last_entry_hash, @@ -533,6 +710,7 @@ impl PohRecorder { None, leader_schedule_cache, poh_config, + is_exited, ) } @@ -573,7 +751,7 @@ mod tests { let blockstore = Blockstore::open(&ledger_path) .expect("Expected to be able to open database ledger"); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -583,6 +761,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::default()), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); poh_recorder.tick(); assert_eq!(poh_recorder.tick_cache.len(), 1); @@ -600,7 +779,7 @@ mod tests { let blockstore = Blockstore::open(&ledger_path) .expect("Expected to be able to open database ledger"); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -610,6 +789,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::default()), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); poh_recorder.tick(); poh_recorder.tick(); @@ -626,7 +806,7 @@ mod tests { { let blockstore = Blockstore::open(&ledger_path) .expect("Expected to be able to open database ledger"); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, Hash::default(), 0, @@ -636,6 +816,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::default()), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); poh_recorder.tick(); assert_eq!(poh_recorder.tick_cache.len(), 1); @@ -654,7 +835,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -664,6 +845,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); let start = Arc::new(Instant::now()); @@ -690,7 +872,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, entry_receiver) = PohRecorder::new( + let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -700,6 +882,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); let start = Arc::new(Instant::now()); @@ -741,7 +924,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, entry_receiver) = PohRecorder::new( + let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -751,6 +934,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); poh_recorder.tick(); @@ -790,7 +974,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, entry_receiver) = PohRecorder::new( + let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -800,6 +984,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); let start = Arc::new(Instant::now()); @@ -828,7 +1013,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -838,6 +1023,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); let start = Arc::new(Instant::now()); @@ -870,7 +1056,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, entry_receiver) = PohRecorder::new( + let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -880,6 +1066,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); let start = Arc::new(Instant::now()); @@ -916,7 +1103,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, entry_receiver) = PohRecorder::new( + let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -926,6 +1113,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); let start = Arc::new(Instant::now()); @@ -960,7 +1148,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, entry_receiver) = PohRecorder::new( + let (mut poh_recorder, entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -970,6 +1158,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); let start = Arc::new(Instant::now()); @@ -997,7 +1186,7 @@ mod tests { { let blockstore = Blockstore::open(&ledger_path) .expect("Expected to be able to open database ledger"); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, Hash::default(), 0, @@ -1007,6 +1196,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::default()), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); poh_recorder.tick(); poh_recorder.tick(); @@ -1024,7 +1214,7 @@ mod tests { { let blockstore = Blockstore::open(&ledger_path) .expect("Expected to be able to open database ledger"); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, Hash::default(), 0, @@ -1034,6 +1224,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::default()), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); poh_recorder.tick(); poh_recorder.tick(); @@ -1052,7 +1243,7 @@ mod tests { { let blockstore = Blockstore::open(&ledger_path) .expect("Expected to be able to open database ledger"); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, Hash::default(), 0, @@ -1062,6 +1253,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::default()), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); poh_recorder.tick(); poh_recorder.tick(); @@ -1085,7 +1277,7 @@ mod tests { .expect("Expected to be able to open database ledger"); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, Hash::default(), 0, @@ -1095,6 +1287,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); let start = Arc::new(Instant::now()); let working_bank = WorkingBank { @@ -1119,18 +1312,20 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let (sender, receiver) = sync_channel(1); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new_with_clear_signal( - 0, - Hash::default(), - 0, - None, - bank.ticks_per_slot(), - &Pubkey::default(), - &Arc::new(blockstore), - Some(sender), - &Arc::new(LeaderScheduleCache::default()), - &Arc::new(PohConfig::default()), - ); + let (mut poh_recorder, _entry_receiver, _record_receiver) = + PohRecorder::new_with_clear_signal( + 0, + Hash::default(), + 0, + None, + bank.ticks_per_slot(), + &Pubkey::default(), + &Arc::new(blockstore), + Some(sender), + &Arc::new(LeaderScheduleCache::default()), + &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), + ); poh_recorder.set_bank(&bank); poh_recorder.clear_bank(); assert!(receiver.try_recv().is_ok()); @@ -1153,7 +1348,7 @@ mod tests { let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -1163,6 +1358,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); let end_slot = 3; @@ -1202,7 +1398,7 @@ mod tests { let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -1212,6 +1408,7 @@ mod tests { &Arc::new(blockstore), &leader_schedule_cache, &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); let bootstrap_validator_id = leader_schedule_cache.slot_leader_at(0, None).unwrap(); @@ -1264,7 +1461,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -1274,6 +1471,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); // Test that with no next leader slot, we don't reach the leader slot @@ -1393,7 +1591,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new(&genesis_config)); let prev_hash = bank.last_blockhash(); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, 0, @@ -1403,6 +1601,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); // Test that with no leader slot, we don't reach the leader tick @@ -1461,7 +1660,7 @@ mod tests { let bank = Arc::new(Bank::new(&genesis_config)); let genesis_hash = bank.last_blockhash(); - let (mut poh_recorder, _entry_receiver) = PohRecorder::new( + let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, bank.last_blockhash(), 0, @@ -1471,6 +1670,7 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); //create a new bank let bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 2)); diff --git a/core/src/poh_service.rs b/core/src/poh_service.rs index 3bc03ff423..c72bc6b669 100644 --- a/core/src/poh_service.rs +++ b/core/src/poh_service.rs @@ -1,11 +1,14 @@ //! The `poh_service` module implements a service that records the passing of //! "ticks", a measure of time in the PoH stream -use crate::poh_recorder::PohRecorder; +use crate::poh_recorder::{PohRecorder, Record}; +use crossbeam_channel::Receiver; +use solana_ledger::poh::Poh; +use solana_measure::measure::Measure; use solana_sdk::poh_config::PohConfig; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::thread::{self, sleep, Builder, JoinHandle}; -use std::time::Instant; +use std::time::{Duration, Instant}; pub struct PohService { tick_producer: JoinHandle<()>, @@ -23,6 +26,58 @@ pub const DEFAULT_PINNED_CPU_CORE: usize = 0; const TARGET_SLOT_ADJUSTMENT_NS: u64 = 50_000_000; +#[derive(Debug)] +struct PohTiming { + num_ticks: u64, + num_hashes: u64, + total_sleep_us: u64, + total_lock_time_ns: u64, + total_hash_time_ns: u64, + total_tick_time_ns: u64, + last_metric: Instant, + total_record_time_us: u64, +} + +impl PohTiming { + fn new() -> Self { + Self { + num_ticks: 0, + num_hashes: 0, + total_sleep_us: 0, + total_lock_time_ns: 0, + total_hash_time_ns: 0, + total_tick_time_ns: 0, + last_metric: Instant::now(), + total_record_time_us: 0, + } + } + fn report(&mut self, ticks_per_slot: u64) { + if self.last_metric.elapsed().as_millis() > 1000 { + let elapsed_us = self.last_metric.elapsed().as_micros() as u64; + let us_per_slot = (elapsed_us * ticks_per_slot) / self.num_ticks; + datapoint_info!( + "poh-service", + ("ticks", self.num_ticks as i64, i64), + ("hashes", self.num_hashes as i64, i64), + ("elapsed_us", us_per_slot, i64), + ("total_sleep_us", self.total_sleep_us, i64), + ("total_tick_time_us", self.total_tick_time_ns / 1000, i64), + ("total_lock_time_us", self.total_lock_time_ns / 1000, i64), + ("total_hash_time_us", self.total_hash_time_ns / 1000, i64), + ("total_record_time_us", self.total_record_time_us, i64), + ); + self.total_sleep_us = 0; + self.num_ticks = 0; + self.num_hashes = 0; + self.total_tick_time_ns = 0; + self.total_lock_time_ns = 0; + self.total_hash_time_ns = 0; + self.last_metric = Instant::now(); + self.total_record_time_us = 0; + } + } +} + impl PohService { pub fn new( poh_recorder: Arc>, @@ -31,6 +86,7 @@ impl PohService { ticks_per_slot: u64, pinned_cpu_core: usize, hashes_per_batch: u64, + record_receiver: Receiver, ) -> Self { let poh_exit_ = poh_exit.clone(); let poh_config = poh_config.clone(); @@ -40,12 +96,18 @@ impl PohService { solana_sys_tuner::request_realtime_poh(); if poh_config.hashes_per_tick.is_none() { if poh_config.target_tick_count.is_none() { - Self::sleepy_tick_producer(poh_recorder, &poh_config, &poh_exit_); + Self::sleepy_tick_producer( + poh_recorder, + &poh_config, + &poh_exit_, + record_receiver, + ); } else { Self::short_lived_sleepy_tick_producer( poh_recorder, &poh_config, &poh_exit_, + record_receiver, ); } } else { @@ -55,19 +117,16 @@ impl PohService { if let Some(cores) = core_affinity::get_core_ids() { core_affinity::set_for_current(cores[pinned_cpu_core]); } - // Account for some extra time outside of PoH generation to account - // for processing time outside PoH. - let adjustment_per_tick = if ticks_per_slot > 0 { - TARGET_SLOT_ADJUSTMENT_NS / ticks_per_slot - } else { - 0 - }; Self::tick_producer( poh_recorder, &poh_exit_, - poh_config.target_tick_duration.as_nanos() as u64 - adjustment_per_tick, ticks_per_slot, hashes_per_batch, + record_receiver, + Self::target_ns_per_tick( + ticks_per_slot, + poh_config.target_tick_duration.as_nanos() as u64, + ), ); } poh_exit_.store(true, Ordering::Relaxed); @@ -77,24 +136,68 @@ impl PohService { Self { tick_producer } } + pub fn target_ns_per_tick(ticks_per_slot: u64, target_tick_duration_ns: u64) -> u64 { + // Account for some extra time outside of PoH generation to account + // for processing time outside PoH. + let adjustment_per_tick = if ticks_per_slot > 0 { + TARGET_SLOT_ADJUSTMENT_NS / ticks_per_slot + } else { + 0 + }; + target_tick_duration_ns.saturating_sub(adjustment_per_tick) + } + fn sleepy_tick_producer( poh_recorder: Arc>, poh_config: &PohConfig, poh_exit: &AtomicBool, + record_receiver: Receiver, ) { while !poh_exit.load(Ordering::Relaxed) { + Self::read_record_receiver_and_process( + &poh_recorder, + &record_receiver, + Duration::from_millis(0), + ); sleep(poh_config.target_tick_duration); poh_recorder.lock().unwrap().tick(); } } + pub fn read_record_receiver_and_process( + poh_recorder: &Arc>, + record_receiver: &Receiver, + timeout: Duration, + ) { + let record = record_receiver.recv_timeout(timeout); + if let Ok(record) = record { + if record + .sender + .send(poh_recorder.lock().unwrap().record( + record.slot, + record.mixin, + record.transactions, + )) + .is_err() + { + panic!("Error returning mixin hash"); + } + } + } + fn short_lived_sleepy_tick_producer( poh_recorder: Arc>, poh_config: &PohConfig, poh_exit: &AtomicBool, + record_receiver: Receiver, ) { let mut warned = false; for _ in 0..poh_config.target_tick_count.unwrap() { + Self::read_record_receiver_and_process( + &poh_recorder, + &record_receiver, + Duration::from_millis(0), + ); sleep(poh_config.target_tick_duration); poh_recorder.lock().unwrap().tick(); if poh_exit.load(Ordering::Relaxed) && !warned { @@ -104,49 +207,133 @@ impl PohService { } } + // returns true if we need to tick + fn record_or_hash( + next_record: &mut Option, + poh_recorder: &Arc>, + timing: &mut PohTiming, + record_receiver: &Receiver, + hashes_per_batch: u64, + poh: &Arc>, + target_ns_per_tick: u64, + ) -> bool { + match next_record.take() { + Some(mut record) => { + // received message to record + // so, record for as long as we have queued up record requests + let mut lock_time = Measure::start("lock"); + let mut poh_recorder_l = poh_recorder.lock().unwrap(); + lock_time.stop(); + timing.total_lock_time_ns += lock_time.as_ns(); + let mut record_time = Measure::start("record"); + loop { + let res = poh_recorder_l.record( + record.slot, + record.mixin, + std::mem::take(&mut record.transactions), + ); + let _ = record.sender.send(res); // what do we do on failure here? Ignore for now. + timing.num_hashes += 1; // note: may have also ticked inside record + + let new_record_result = record_receiver.try_recv(); + match new_record_result { + Ok(new_record) => { + // we already have second request to record, so record again while we still have the mutex + record = new_record; + } + Err(_) => { + break; + } + } + } + record_time.stop(); + timing.total_record_time_us += record_time.as_us(); + // PohRecorder.record would have ticked if it needed to, so should_tick will be false + } + None => { + // did not receive instructions to record, so hash until we notice we've been asked to record (or we need to tick) and then remember what to record + let mut lock_time = Measure::start("lock"); + let mut poh_l = poh.lock().unwrap(); + lock_time.stop(); + timing.total_lock_time_ns += lock_time.as_ns(); + loop { + timing.num_hashes += hashes_per_batch; + let mut hash_time = Measure::start("hash"); + let should_tick = poh_l.hash(hashes_per_batch); + let ideal_time = poh_l.target_poh_time(target_ns_per_tick); + hash_time.stop(); + timing.total_hash_time_ns += hash_time.as_ns(); + if should_tick { + // nothing else can be done. tick required. + return true; + } + // check to see if a record request has been sent + if let Ok(record) = record_receiver.try_recv() { + // remember the record we just received as the next record to occur + *next_record = Some(record); + break; + } + // check to see if we need to wait to catch up to ideal + let wait_start = Instant::now(); + if ideal_time <= wait_start { + // no, keep hashing. We still hold the lock. + continue; + } + + // busy wait, polling for new records and after dropping poh lock (reset can occur, for example) + drop(poh_l); + while ideal_time > Instant::now() { + // check to see if a record request has been sent + if let Ok(record) = record_receiver.try_recv() { + // remember the record we just received as the next record to occur + *next_record = Some(record); + break; + } + } + timing.total_sleep_us += wait_start.elapsed().as_micros() as u64; + break; + } + } + }; + false // should_tick = false for all code that reaches here + } + fn tick_producer( poh_recorder: Arc>, poh_exit: &AtomicBool, - target_tick_ns: u64, ticks_per_slot: u64, hashes_per_batch: u64, + record_receiver: Receiver, + target_ns_per_tick: u64, ) { let poh = poh_recorder.lock().unwrap().poh.clone(); - let mut now = Instant::now(); - let mut last_metric = Instant::now(); - let mut num_ticks = 0; - let mut num_hashes = 0; - let mut total_sleep_us = 0; + let mut timing = PohTiming::new(); + let mut next_record = None; loop { - num_hashes += hashes_per_batch; - if poh.lock().unwrap().hash(hashes_per_batch) { - // Lock PohRecorder only for the final hash... - poh_recorder.lock().unwrap().tick(); - num_ticks += 1; - let elapsed_ns = now.elapsed().as_nanos() as u64; - // sleep is not accurate enough to get a predictable time. - // Kernel can not schedule the thread for a while. - while (now.elapsed().as_nanos() as u64) < target_tick_ns { - std::hint::spin_loop(); - } - total_sleep_us += (now.elapsed().as_nanos() as u64 - elapsed_ns) / 1000; - now = Instant::now(); - - if last_metric.elapsed().as_millis() > 1000 { - let elapsed_ms = last_metric.elapsed().as_millis() as u64; - let ms_per_slot = (elapsed_ms * ticks_per_slot) / num_ticks; - datapoint_info!( - "poh-service", - ("ticks", num_ticks as i64, i64), - ("hashes", num_hashes as i64, i64), - ("elapsed_ms", ms_per_slot, i64), - ("total_sleep_ms", total_sleep_us / 1000, i64), - ); - total_sleep_us = 0; - num_ticks = 0; - num_hashes = 0; - last_metric = Instant::now(); + let should_tick = Self::record_or_hash( + &mut next_record, + &poh_recorder, + &mut timing, + &record_receiver, + hashes_per_batch, + &poh, + target_ns_per_tick, + ); + if should_tick { + // Lock PohRecorder only for the final hash. record_or_hash will lock PohRecorder for record calls but not for hashing. + { + let mut lock_time = Measure::start("lock"); + let mut poh_recorder_l = poh_recorder.lock().unwrap(); + lock_time.stop(); + timing.total_lock_time_ns += lock_time.as_ns(); + let mut tick_time = Measure::start("tick"); + poh_recorder_l.tick(); + tick_time.stop(); + timing.total_tick_time_ns += tick_time.as_ns(); } + timing.num_ticks += 1; + + timing.report(ticks_per_slot); if poh_exit.load(Ordering::Relaxed) { break; } @@ -177,6 +364,7 @@ mod tests { use std::time::Duration; #[test] + #[ignore] fn test_poh_service() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); @@ -195,7 +383,9 @@ mod tests { target_tick_duration, target_tick_count: None, }); - let (poh_recorder, entry_receiver) = PohRecorder::new( + let exit = Arc::new(AtomicBool::new(false)); + + let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( bank.tick_height(), prev_hash, bank.slot(), @@ -205,9 +395,9 @@ mod tests { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &poh_config, + exit.clone(), ); let poh_recorder = Arc::new(Mutex::new(poh_recorder)); - let exit = Arc::new(AtomicBool::new(false)); let start = Arc::new(Instant::now()); let working_bank = WorkingBank { bank: bank.clone(), @@ -275,6 +465,7 @@ mod tests { 0, DEFAULT_PINNED_CPU_CORE, hashes_per_batch, + record_receiver, ); poh_recorder.lock().unwrap().set_working_bank(working_bank); diff --git a/core/src/progress_map.rs b/core/src/progress_map.rs index 35d5ecec48..0682d431dc 100644 --- a/core/src/progress_map.rs +++ b/core/src/progress_map.rs @@ -63,6 +63,46 @@ impl ReplaySlotStats { ("load_us", self.execute_timings.load_us, i64), ("execute_us", self.execute_timings.execute_us, i64), ("store_us", self.execute_timings.store_us, i64), + ( + "serialize_us", + self.execute_timings.details.serialize_us, + i64 + ), + ( + "create_vm_us", + self.execute_timings.details.create_vm_us, + i64 + ), + ( + "execute_inner_us", + self.execute_timings.details.execute_us, + i64 + ), + ( + "deserialize_us", + self.execute_timings.details.deserialize_us, + i64 + ), + ( + "changed_account_count", + self.execute_timings.details.changed_account_count, + i64 + ), + ( + "total_account_count", + self.execute_timings.details.total_account_count, + i64 + ), + ( + "total_data_size", + self.execute_timings.details.total_data_size, + i64 + ), + ( + "data_size_changed", + self.execute_timings.details.data_size_changed, + i64 + ), ); } } @@ -100,6 +140,7 @@ pub(crate) struct ForkProgress { pub(crate) propagated_stats: PropagatedStats, pub(crate) replay_stats: ReplaySlotStats, pub(crate) replay_progress: ConfirmationProgress, + pub(crate) duplicate_stats: DuplicateStats, // Note `num_blocks_on_fork` and `num_dropped_blocks_on_fork` only // count new blocks replayed since last restart, which won't include // blocks already existing in the ledger/before snapshot at start, @@ -112,6 +153,7 @@ impl ForkProgress { pub fn new( last_entry: Hash, prev_leader_slot: Option, + duplicate_stats: DuplicateStats, validator_stake_info: Option, num_blocks_on_fork: u64, num_dropped_blocks_on_fork: u64, @@ -145,6 +187,7 @@ impl ForkProgress { fork_stats: ForkStats::default(), replay_stats: ReplaySlotStats::default(), replay_progress: ConfirmationProgress::new(last_entry), + duplicate_stats, num_blocks_on_fork, num_dropped_blocks_on_fork, propagated_stats: PropagatedStats { @@ -164,6 +207,7 @@ impl ForkProgress { my_pubkey: &Pubkey, voting_pubkey: &Pubkey, prev_leader_slot: Option, + duplicate_stats: DuplicateStats, num_blocks_on_fork: u64, num_dropped_blocks_on_fork: u64, ) -> Self { @@ -183,11 +227,20 @@ impl ForkProgress { Self::new( bank.last_blockhash(), prev_leader_slot, + duplicate_stats, validator_fork_info, num_blocks_on_fork, num_dropped_blocks_on_fork, ) } + + pub fn is_duplicate_confirmed(&self) -> bool { + self.duplicate_stats.is_duplicate_confirmed + } + + pub fn set_duplicate_confirmed(&mut self) { + self.duplicate_stats.set_duplicate_confirmed(); + } } #[derive(Debug, Clone, Default)] @@ -202,9 +255,11 @@ pub(crate) struct ForkStats { pub(crate) vote_threshold: bool, pub(crate) is_locked_out: bool, pub(crate) voted_stakes: VotedStakes, - pub(crate) confirmation_reported: bool, + pub(crate) is_supermajority_confirmed: bool, pub(crate) computed: bool, pub(crate) lockout_intervals: LockoutIntervals, + pub(crate) bank_hash: Option, + pub(crate) my_latest_landed_vote: Option, } #[derive(Clone, Default)] @@ -220,6 +275,38 @@ pub(crate) struct PropagatedStats { pub(crate) total_epoch_stake: u64, } +#[derive(Clone, Default)] +pub(crate) struct DuplicateStats { + latest_unconfirmed_duplicate_ancestor: Option, + is_duplicate_confirmed: bool, +} + +impl DuplicateStats { + pub fn new_with_unconfirmed_duplicate_ancestor( + latest_unconfirmed_duplicate_ancestor: Option, + ) -> Self { + Self { + latest_unconfirmed_duplicate_ancestor, + is_duplicate_confirmed: false, + } + } + + fn set_duplicate_confirmed(&mut self) { + self.is_duplicate_confirmed = true; + self.latest_unconfirmed_duplicate_ancestor = None; + } + + fn update_with_newly_confirmed_duplicate_ancestor(&mut self, newly_confirmed_ancestor: Slot) { + if let Some(latest_unconfirmed_duplicate_ancestor) = + self.latest_unconfirmed_duplicate_ancestor + { + if latest_unconfirmed_duplicate_ancestor <= newly_confirmed_ancestor { + self.latest_unconfirmed_duplicate_ancestor = None; + } + } + } +} + impl PropagatedStats { pub fn add_vote_pubkey(&mut self, vote_pubkey: Pubkey, stake: u64) { if self.propagated_validators.insert(vote_pubkey) { @@ -308,6 +395,18 @@ impl ProgressMap { .map(|fork_progress| &mut fork_progress.fork_stats) } + pub fn is_dead(&self, slot: Slot) -> Option { + self.progress_map + .get(&slot) + .map(|fork_progress| fork_progress.is_dead) + } + + pub fn get_hash(&self, slot: Slot) -> Option { + self.progress_map + .get(&slot) + .and_then(|fork_progress| fork_progress.fork_stats.bank_hash) + } + pub fn is_propagated(&self, slot: Slot) -> bool { let leader_slot_to_check = self.get_latest_leader_slot(slot); @@ -339,6 +438,125 @@ impl ProgressMap { } } + #[cfg(test)] + pub fn is_unconfirmed_duplicate(&self, slot: Slot) -> Option { + self.get(&slot).map(|p| { + p.duplicate_stats + .latest_unconfirmed_duplicate_ancestor + .map(|ancestor| ancestor == slot) + .unwrap_or(false) + }) + } + + pub fn latest_unconfirmed_duplicate_ancestor(&self, slot: Slot) -> Option { + self.get(&slot) + .map(|p| p.duplicate_stats.latest_unconfirmed_duplicate_ancestor) + .unwrap_or(None) + } + + pub fn set_unconfirmed_duplicate_slot(&mut self, slot: Slot, descendants: &HashSet) { + if let Some(fork_progress) = self.get_mut(&slot) { + if fork_progress.is_duplicate_confirmed() { + assert!(fork_progress + .duplicate_stats + .latest_unconfirmed_duplicate_ancestor + .is_none()); + return; + } + + if fork_progress + .duplicate_stats + .latest_unconfirmed_duplicate_ancestor + == Some(slot) + { + // Already been marked + return; + } + fork_progress + .duplicate_stats + .latest_unconfirmed_duplicate_ancestor = Some(slot); + + for d in descendants { + if let Some(fork_progress) = self.get_mut(&d) { + fork_progress + .duplicate_stats + .latest_unconfirmed_duplicate_ancestor = Some(std::cmp::max( + fork_progress + .duplicate_stats + .latest_unconfirmed_duplicate_ancestor + .unwrap_or(0), + slot, + )); + } + } + } + } + + pub fn set_confirmed_duplicate_slot( + &mut self, + slot: Slot, + ancestors: &HashSet, + descendants: &HashSet, + ) { + for a in ancestors { + if let Some(fork_progress) = self.get_mut(&a) { + fork_progress.set_duplicate_confirmed(); + } + } + + if let Some(slot_fork_progress) = self.get_mut(&slot) { + // Setting the fields here is nly correct and necessary if the loop above didn't + // already do this, so check with an assert. + assert!(!ancestors.contains(&slot)); + let slot_had_unconfirmed_duplicate_ancestor = slot_fork_progress + .duplicate_stats + .latest_unconfirmed_duplicate_ancestor + .is_some(); + slot_fork_progress.set_duplicate_confirmed(); + + if slot_had_unconfirmed_duplicate_ancestor { + for d in descendants { + if let Some(descendant_fork_progress) = self.get_mut(&d) { + descendant_fork_progress + .duplicate_stats + .update_with_newly_confirmed_duplicate_ancestor(slot); + } + } + } else { + // Neither this slot `S`, nor earlier ancestors were marked as duplicate, + // so this means all descendants either: + // 1) Have no duplicate ancestors + // 2) Have a duplicate ancestor > `S` + + // In both cases, there's no need to iterate through descendants because + // this confirmation on `S` is irrelevant to them. + } + } + } + + pub fn my_latest_landed_vote(&self, slot: Slot) -> Option { + self.progress_map + .get(&slot) + .and_then(|s| s.fork_stats.my_latest_landed_vote) + } + + pub fn set_supermajority_confirmed_slot(&mut self, slot: Slot) { + let slot_progress = self.get_mut(&slot).unwrap(); + slot_progress.fork_stats.is_supermajority_confirmed = true; + } + + pub fn is_supermajority_confirmed(&self, slot: Slot) -> Option { + self.progress_map + .get(&slot) + .map(|s| s.fork_stats.is_supermajority_confirmed) + } + + pub fn is_duplicate_confirmed(&self, slot: Slot) -> Option { + self.progress_map + .get(&slot) + .map(|s| s.is_duplicate_confirmed()) + } + pub fn get_bank_prev_leader_slot(&self, bank: &Bank) -> Option { let parent_slot = bank.parent_slot(); self.get_propagated_stats(parent_slot) @@ -381,6 +599,8 @@ impl ProgressMap { #[cfg(test)] mod test { use super::*; + use crate::consensus::test::VoteSimulator; + use trees::tr; #[test] fn test_add_vote_pubkey() { @@ -471,13 +691,21 @@ mod test { fn test_is_propagated_status_on_construction() { // If the given ValidatorStakeInfo == None, then this is not // a leader slot and is_propagated == false - let progress = ForkProgress::new(Hash::default(), Some(9), None, 0, 0); + let progress = ForkProgress::new( + Hash::default(), + Some(9), + DuplicateStats::default(), + None, + 0, + 0, + ); assert!(!progress.propagated_stats.is_propagated); // If the stake is zero, then threshold is always achieved let progress = ForkProgress::new( Hash::default(), Some(9), + DuplicateStats::default(), Some(ValidatorStakeInfo { total_epoch_stake: 0, ..ValidatorStakeInfo::default() @@ -492,6 +720,7 @@ mod test { let progress = ForkProgress::new( Hash::default(), Some(9), + DuplicateStats::default(), Some(ValidatorStakeInfo { total_epoch_stake: 2, ..ValidatorStakeInfo::default() @@ -505,6 +734,7 @@ mod test { let progress = ForkProgress::new( Hash::default(), Some(9), + DuplicateStats::default(), Some(ValidatorStakeInfo { stake: 1, total_epoch_stake: 2, @@ -521,6 +751,7 @@ mod test { let progress = ForkProgress::new( Hash::default(), Some(9), + DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -534,12 +765,23 @@ mod test { // Insert new ForkProgress for slot 10 (not a leader slot) and its // previous leader slot 9 (leader slot) - progress_map.insert(10, ForkProgress::new(Hash::default(), Some(9), None, 0, 0)); + progress_map.insert( + 10, + ForkProgress::new( + Hash::default(), + Some(9), + DuplicateStats::default(), + None, + 0, + 0, + ), + ); progress_map.insert( 9, ForkProgress::new( Hash::default(), None, + DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -554,7 +796,17 @@ mod test { // The previous leader before 8, slot 7, does not exist in // progress map, so is_propagated(8) should return true as // this implies the parent is rooted - progress_map.insert(8, ForkProgress::new(Hash::default(), Some(7), None, 0, 0)); + progress_map.insert( + 8, + ForkProgress::new( + Hash::default(), + Some(7), + DuplicateStats::default(), + None, + 0, + 0, + ), + ); assert!(progress_map.is_propagated(8)); // If we set the is_propagated = true, is_propagated should return true @@ -577,4 +829,157 @@ mod test { .is_leader_slot = true; assert!(!progress_map.is_propagated(10)); } + + fn setup_set_unconfirmed_and_confirmed_duplicate_slot_tests( + smaller_duplicate_slot: Slot, + larger_duplicate_slot: Slot, + ) -> (ProgressMap, RwLock) { + // Create simple fork 0 -> 1 -> 2 -> 3 -> 4 -> 5 + let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5))))); + let mut vote_simulator = VoteSimulator::new(1); + vote_simulator.fill_bank_forks(forks, &HashMap::new()); + let VoteSimulator { + mut progress, + bank_forks, + .. + } = vote_simulator; + let descendants = bank_forks.read().unwrap().descendants().clone(); + + // Mark the slots as unconfirmed duplicates + progress.set_unconfirmed_duplicate_slot( + smaller_duplicate_slot, + &descendants.get(&smaller_duplicate_slot).unwrap(), + ); + progress.set_unconfirmed_duplicate_slot( + larger_duplicate_slot, + &descendants.get(&larger_duplicate_slot).unwrap(), + ); + + // Correctness checks + for slot in bank_forks.read().unwrap().banks().keys() { + if *slot < smaller_duplicate_slot { + assert!(progress + .latest_unconfirmed_duplicate_ancestor(*slot) + .is_none()); + } else if *slot < larger_duplicate_slot { + assert_eq!( + progress + .latest_unconfirmed_duplicate_ancestor(*slot) + .unwrap(), + smaller_duplicate_slot + ); + } else { + assert_eq!( + progress + .latest_unconfirmed_duplicate_ancestor(*slot) + .unwrap(), + larger_duplicate_slot + ); + } + } + + (progress, bank_forks) + } + + #[test] + fn test_set_unconfirmed_duplicate_confirm_smaller_slot_first() { + let smaller_duplicate_slot = 1; + let larger_duplicate_slot = 4; + let (mut progress, bank_forks) = setup_set_unconfirmed_and_confirmed_duplicate_slot_tests( + smaller_duplicate_slot, + larger_duplicate_slot, + ); + let descendants = bank_forks.read().unwrap().descendants().clone(); + let ancestors = bank_forks.read().unwrap().ancestors(); + + // Mark the smaller duplicate slot as confirmed + progress.set_confirmed_duplicate_slot( + smaller_duplicate_slot, + &ancestors.get(&smaller_duplicate_slot).unwrap(), + &descendants.get(&smaller_duplicate_slot).unwrap(), + ); + for slot in bank_forks.read().unwrap().banks().keys() { + if *slot < larger_duplicate_slot { + // Only slots <= smaller_duplicate_slot have been duplicate confirmed + if *slot <= smaller_duplicate_slot { + assert!(progress.is_duplicate_confirmed(*slot).unwrap()); + } else { + assert!(!progress.is_duplicate_confirmed(*slot).unwrap()); + } + // The unconfirmed duplicate flag has been cleared on the smaller + // descendants because their most recent duplicate ancestor has + // been confirmed + assert!(progress + .latest_unconfirmed_duplicate_ancestor(*slot) + .is_none()); + } else { + assert!(!progress.is_duplicate_confirmed(*slot).unwrap(),); + // The unconfirmed duplicate flag has not been cleared on the smaller + // descendants because their most recent duplicate ancestor, + // `larger_duplicate_slot` has not yet been confirmed + assert_eq!( + progress + .latest_unconfirmed_duplicate_ancestor(*slot) + .unwrap(), + larger_duplicate_slot + ); + } + } + + // Mark the larger duplicate slot as confirmed, all slots should no longer + // have any unconfirmed duplicate ancestors, and should be marked as duplciate confirmed + progress.set_confirmed_duplicate_slot( + larger_duplicate_slot, + &ancestors.get(&larger_duplicate_slot).unwrap(), + &descendants.get(&larger_duplicate_slot).unwrap(), + ); + for slot in bank_forks.read().unwrap().banks().keys() { + // All slots <= the latest duplciate confirmed slot are ancestors of + // that slot, so they should all be marked duplicate confirmed + assert_eq!( + progress.is_duplicate_confirmed(*slot).unwrap(), + *slot <= larger_duplicate_slot + ); + assert!(progress + .latest_unconfirmed_duplicate_ancestor(*slot) + .is_none()); + } + } + + #[test] + fn test_set_unconfirmed_duplicate_confirm_larger_slot_first() { + let smaller_duplicate_slot = 1; + let larger_duplicate_slot = 4; + let (mut progress, bank_forks) = setup_set_unconfirmed_and_confirmed_duplicate_slot_tests( + smaller_duplicate_slot, + larger_duplicate_slot, + ); + let descendants = bank_forks.read().unwrap().descendants().clone(); + let ancestors = bank_forks.read().unwrap().ancestors(); + + // Mark the larger duplicate slot as confirmed + progress.set_confirmed_duplicate_slot( + larger_duplicate_slot, + &ancestors.get(&larger_duplicate_slot).unwrap(), + &descendants.get(&larger_duplicate_slot).unwrap(), + ); + + // All slots should no longer have any unconfirmed duplicate ancestors + progress.set_confirmed_duplicate_slot( + larger_duplicate_slot, + &ancestors.get(&larger_duplicate_slot).unwrap(), + &descendants.get(&larger_duplicate_slot).unwrap(), + ); + for slot in bank_forks.read().unwrap().banks().keys() { + // All slots <= the latest duplciate confirmed slot are ancestors of + // that slot, so they should all be marked duplicate confirmed + assert_eq!( + progress.is_duplicate_confirmed(*slot).unwrap(), + *slot <= larger_duplicate_slot + ); + assert!(progress + .latest_unconfirmed_duplicate_ancestor(*slot) + .is_none()); + } + } } diff --git a/core/src/repair_weight.rs b/core/src/repair_weight.rs index 12e7db38d5..26cce442e1 100644 --- a/core/src/repair_weight.rs +++ b/core/src/repair_weight.rs @@ -8,6 +8,7 @@ use solana_runtime::{contains::Contains, epoch_stakes::EpochStakes}; use solana_sdk::{ clock::Slot, epoch_schedule::{Epoch, EpochSchedule}, + hash::Hash, pubkey::Pubkey, }; use std::collections::{BTreeSet, HashMap, HashSet, VecDeque}; @@ -30,7 +31,7 @@ pub struct RepairWeight { impl RepairWeight { pub fn new(root: Slot) -> Self { - let root_tree = HeaviestSubtreeForkChoice::new(root); + let root_tree = HeaviestSubtreeForkChoice::new((root, Hash::default())); let slot_to_tree: HashMap = vec![(root, root)].into_iter().collect(); let trees: HashMap = vec![(root, root_tree)].into_iter().collect(); @@ -102,7 +103,12 @@ impl RepairWeight { new_ancestors.push_back(slot); if new_ancestors.len() > 1 { for i in 0..new_ancestors.len() - 1 { - tree.add_new_leaf_slot(new_ancestors[i + 1], Some(new_ancestors[i])); + // TODO: Repair right now does not distinguish between votes for different + // versions of the same slot. + tree.add_new_leaf_slot( + (new_ancestors[i + 1], Hash::default()), + Some((new_ancestors[i], Hash::default())), + ); self.slot_to_tree.insert(new_ancestors[i + 1], tree_root); } } @@ -122,7 +128,13 @@ impl RepairWeight { .get_mut(&tree_root) .expect("`slot_to_tree` and `self.trees` must be in sync"); let updates: Vec<_> = updates.into_iter().collect(); - tree.add_votes(&updates, epoch_stakes, epoch_schedule); + tree.add_votes( + updates + .iter() + .map(|(pubkey, slot)| (*pubkey, (*slot, Hash::default()))), + epoch_stakes, + epoch_schedule, + ); } } @@ -189,7 +201,9 @@ impl RepairWeight { .remove(&subtree_root) .expect("Must exist, was found in `self.trees` above"); self.remove_tree_slots( - subtree.all_slots_stake_voted_subtree().iter().map(|x| &x.0), + subtree + .all_slots_stake_voted_subtree() + .map(|((slot, _), _)| slot), new_root, ); } @@ -202,10 +216,16 @@ impl RepairWeight { // Find all descendants of `self.root` that are not reachable from `new_root`. // These are exactly the unrooted slots, which can be purged and added to // `self.unrooted_slots`. - let unrooted_slots = new_root_tree.subtree_diff(new_root_tree_root, new_root); - self.remove_tree_slots(unrooted_slots.iter(), new_root); + let unrooted_slots = new_root_tree.subtree_diff( + (new_root_tree_root, Hash::default()), + (new_root, Hash::default()), + ); + self.remove_tree_slots( + unrooted_slots.iter().map(|slot_hash| &slot_hash.0), + new_root, + ); - new_root_tree.set_root(new_root); + new_root_tree.set_root((new_root, Hash::default())); // Update `self.slot_to_tree` to reflect new root self.rename_tree_root(&new_root_tree, new_root); @@ -259,7 +279,7 @@ impl RepairWeight { .map(|(slot, tree)| { ( *slot, - tree.stake_voted_subtree(*slot) + tree.stake_voted_subtree(&(*slot, Hash::default())) .expect("Tree must have weight at its own root"), ) }) @@ -310,7 +330,7 @@ impl RepairWeight { // Attempts to chain the orphan subtree rooted at `orphan_tree_root` // to any earlier subtree with new any ancestry information in `blockstore`. - // Returns the earliest known ancestor of `heavest_tree_root`. + // Returns the earliest known ancestor of `heaviest_tree_root`. fn update_orphan_ancestors( &mut self, blockstore: &Blockstore, @@ -343,7 +363,7 @@ impl RepairWeight { for ancestor in new_ancestors.iter().skip(num_skip).rev() { self.slot_to_tree.insert(*ancestor, orphan_tree_root); - heaviest_tree.add_root_parent(*ancestor); + heaviest_tree.add_root_parent((*ancestor, Hash::default())); } } if let Some(parent_tree_root) = parent_tree_root { @@ -386,8 +406,7 @@ impl RepairWeight { self.remove_tree_slots( orphan_tree .all_slots_stake_voted_subtree() - .iter() - .map(|x| &x.0), + .map(|((slot, _), _)| slot), self.root, ); None @@ -403,8 +422,10 @@ impl RepairWeight { // Update `self.slot_to_tree` self.slot_to_tree.insert(new_tree_root, new_tree_root); - self.trees - .insert(new_tree_root, HeaviestSubtreeForkChoice::new(new_tree_root)); + self.trees.insert( + new_tree_root, + HeaviestSubtreeForkChoice::new((new_tree_root, Hash::default())), + ); } fn find_ancestor_subtree_of_slot( @@ -460,13 +481,18 @@ impl RepairWeight { .get_mut(&root2) .expect("tree to be merged into must exist"); - tree2.merge(tree1, merge_leaf, epoch_stakes, epoch_schedule); + tree2.merge( + tree1, + &(merge_leaf, Hash::default()), + epoch_stakes, + epoch_schedule, + ); } // Update all slots in the `tree1` to point to `root2`, fn rename_tree_root(&mut self, tree1: &HeaviestSubtreeForkChoice, root2: Slot) { let all_slots = tree1.all_slots_stake_voted_subtree(); - for (slot, _) in all_slots { + for ((slot, _), _) in all_slots { *self .slot_to_tree .get_mut(&slot) @@ -560,7 +586,14 @@ mod test { // repair_weight should contain one subtree 0->1 assert_eq!(repair_weight.trees.len(), 1); - assert_eq!(repair_weight.trees.get(&0).unwrap().ancestors(1), vec![0]); + assert_eq!( + repair_weight + .trees + .get(&0) + .unwrap() + .ancestors((1, Hash::default())), + vec![(0, Hash::default())] + ); for i in &[0, 1] { assert_eq!(*repair_weight.slot_to_tree.get(i).unwrap(), 0); } @@ -577,11 +610,25 @@ mod test { ); assert_eq!(repair_weight.trees.len(), 1); assert_eq!( - repair_weight.trees.get(&0).unwrap().ancestors(4), + repair_weight + .trees + .get(&0) + .unwrap() + .ancestors((4, Hash::default())) + .into_iter() + .map(|slot_hash| slot_hash.0) + .collect::>(), vec![2, 1, 0] ); assert_eq!( - repair_weight.trees.get(&0).unwrap().ancestors(6), + repair_weight + .trees + .get(&0) + .unwrap() + .ancestors((6, Hash::default())) + .into_iter() + .map(|slot_hash| slot_hash.0) + .collect::>(), vec![5, 3, 1, 0] ); for slot in 0..=6 { @@ -590,7 +637,7 @@ mod test { .trees .get(&0) .unwrap() - .stake_voted_at(slot) + .stake_voted_at(&(slot, Hash::default())) .unwrap(); if slot == 6 { assert_eq!(stake_voted_at, 3 * stake); @@ -604,7 +651,7 @@ mod test { .trees .get(&0) .unwrap() - .stake_voted_subtree(*slot) + .stake_voted_subtree(&(*slot, Hash::default())) .unwrap(); assert_eq!(stake_voted_subtree, 3 * stake); } @@ -613,7 +660,7 @@ mod test { .trees .get(&0) .unwrap() - .stake_voted_subtree(*slot) + .stake_voted_subtree(&(*slot, Hash::default())) .unwrap(); assert_eq!(stake_voted_subtree, 0); } @@ -637,8 +684,20 @@ mod test { // Should contain two trees, one for main fork, one for the orphan // branch assert_eq!(repair_weight.trees.len(), 2); - assert_eq!(repair_weight.trees.get(&0).unwrap().ancestors(1), vec![0]); - assert!(repair_weight.trees.get(&8).unwrap().ancestors(8).is_empty()); + assert_eq!( + repair_weight + .trees + .get(&0) + .unwrap() + .ancestors((1, Hash::default())), + vec![(0, Hash::default())] + ); + assert!(repair_weight + .trees + .get(&8) + .unwrap() + .ancestors((8, Hash::default())) + .is_empty()); let votes = vec![(1, vote_pubkeys.clone()), (10, vote_pubkeys.clone())]; let mut repair_weight = RepairWeight::new(0); @@ -652,8 +711,22 @@ mod test { // Should contain two trees, one for main fork, one for the orphan // branch assert_eq!(repair_weight.trees.len(), 2); - assert_eq!(repair_weight.trees.get(&0).unwrap().ancestors(1), vec![0]); - assert_eq!(repair_weight.trees.get(&8).unwrap().ancestors(10), vec![8]); + assert_eq!( + repair_weight + .trees + .get(&0) + .unwrap() + .ancestors((1, Hash::default())), + vec![(0, Hash::default())] + ); + assert_eq!( + repair_weight + .trees + .get(&8) + .unwrap() + .ancestors((10, Hash::default())), + vec![(8, Hash::default())] + ); // Connect orphan back to main fork blockstore.add_tree(tr(6) / (tr(8)), true, true, 2, Hash::default()); @@ -672,7 +745,14 @@ mod test { bank.epoch_schedule(), ); assert_eq!( - repair_weight.trees.get(&8).unwrap().ancestors(11), + repair_weight + .trees + .get(&8) + .unwrap() + .ancestors((11, Hash::default())) + .into_iter() + .map(|slot_hash| slot_hash.0) + .collect::>(), vec![10, 8] ); @@ -784,13 +864,13 @@ mod test { .trees .get(&8) .unwrap() - .stake_voted_subtree(8) + .stake_voted_subtree(&(8, Hash::default())) .unwrap(), repair_weight .trees .get(&20) .unwrap() - .stake_voted_subtree(20) + .stake_voted_subtree(&(20, Hash::default())) .unwrap() ); assert_eq!(repairs.len(), 1); @@ -929,11 +1009,11 @@ mod test { assert_eq!(*repair_weight.slot_to_tree.get(&2).unwrap(), 1); // Trees tracked should be updated - assert_eq!(repair_weight.trees.get(&1).unwrap().root(), 1); + assert_eq!(repair_weight.trees.get(&1).unwrap().root().0, 1); // Orphan slots should not be changed for orphan in &[8, 20] { - assert_eq!(repair_weight.trees.get(orphan).unwrap().root(), *orphan); + assert_eq!(repair_weight.trees.get(orphan).unwrap().root().0, *orphan); assert_eq!(repair_weight.slot_to_tree.get(orphan).unwrap(), orphan); } } @@ -957,7 +1037,7 @@ mod test { // Orphan slots should not be changed for orphan in &[8, 20] { - assert_eq!(repair_weight.trees.get(orphan).unwrap().root(), *orphan); + assert_eq!(repair_weight.trees.get(orphan).unwrap().root().0, *orphan); assert_eq!(repair_weight.slot_to_tree.get(orphan).unwrap(), orphan); } } @@ -979,7 +1059,7 @@ mod test { assert!(!repair_weight.slot_to_tree.contains_key(&8)); // Other higher orphan branch rooted at slot `20` remains unchanged - assert_eq!(repair_weight.trees.get(&20).unwrap().root(), 20); + assert_eq!(repair_weight.trees.get(&20).unwrap().root().0, 20); assert_eq!(*repair_weight.slot_to_tree.get(&20).unwrap(), 20); } @@ -1020,7 +1100,7 @@ mod test { // Orphan 20 should still exist assert_eq!(repair_weight.trees.len(), 2); - assert_eq!(repair_weight.trees.get(&20).unwrap().root(), 20); + assert_eq!(repair_weight.trees.get(&20).unwrap().root().0, 20); assert_eq!(*repair_weight.slot_to_tree.get(&20).unwrap(), 20); // Now set root at a slot 30 that doesnt exist in `repair_weight`, but is @@ -1191,7 +1271,7 @@ mod test { // Check orphans are present for orphan in &[8, 20] { - assert_eq!(repair_weight.trees.get(orphan).unwrap().root(), *orphan); + assert_eq!(repair_weight.trees.get(orphan).unwrap().root().0, *orphan); assert_eq!(repair_weight.slot_to_tree.get(orphan).unwrap(), orphan); } (blockstore, bank, repair_weight) @@ -1208,7 +1288,10 @@ mod test { assert!(!repair_weight.unrooted_slots.contains(&old_root)); // Validate new root - assert_eq!(repair_weight.trees.get(&new_root).unwrap().root(), new_root); + assert_eq!( + repair_weight.trees.get(&new_root).unwrap().root().0, + new_root + ); assert_eq!( *repair_weight.slot_to_tree.get(&new_root).unwrap(), new_root diff --git a/core/src/repair_weighted_traversal.rs b/core/src/repair_weighted_traversal.rs index 59371a7c3d..534ef4841d 100644 --- a/core/src/repair_weighted_traversal.rs +++ b/core/src/repair_weighted_traversal.rs @@ -4,7 +4,7 @@ use crate::{ }; use solana_ledger::blockstore::Blockstore; use solana_runtime::contains::Contains; -use solana_sdk::clock::Slot; +use solana_sdk::{clock::Slot, hash::Hash}; use std::collections::{HashMap, HashSet}; #[derive(Debug, PartialEq)] @@ -32,7 +32,7 @@ impl<'a> RepairWeightTraversal<'a> { pub fn new(tree: &'a HeaviestSubtreeForkChoice) -> Self { Self { tree, - pending: vec![Visit::Unvisited(tree.root())], + pending: vec![Visit::Unvisited(tree.root().0)], } } } @@ -48,16 +48,20 @@ impl<'a> Iterator for RepairWeightTraversal<'a> { self.pending.push(Visit::Visited(slot)); let mut children: Vec<_> = self .tree - .children(slot) + .children(&(slot, Hash::default())) .unwrap() .iter() - .map(|child_slot| Visit::Unvisited(*child_slot)) + .map(|(child_slot, _)| Visit::Unvisited(*child_slot)) .collect(); // Sort children by weight to prioritize visiting the heaviest // ones first - children - .sort_by(|slot1, slot2| self.tree.max_by_weight(slot1.slot(), slot2.slot())); + children.sort_by(|slot1, slot2| { + self.tree.max_by_weight( + (slot1.slot(), Hash::default()), + (slot2.slot(), Hash::default()), + ) + }); self.pending.extend(children); } next @@ -87,6 +91,9 @@ pub fn get_best_repair_shreds<'a>( .entry(next.slot()) .or_insert_with(|| blockstore.meta(next.slot()).unwrap()); + // May not exist if blockstore purged the SlotMeta due to something + // like duplicate slots. TODO: Account for duplicate slot may be in orphans, especially + // if earlier duplicate was already removed if let Some(slot_meta) = slot_meta { match next { Visit::Unvisited(slot) => { @@ -137,7 +144,7 @@ pub mod test { #[test] fn test_weighted_repair_traversal_single() { - let heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new(42); + let heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new((42, Hash::default())); let weighted_traversal = RepairWeightTraversal::new(&heaviest_subtree_fork_choice); let steps: Vec<_> = weighted_traversal.collect(); assert_eq!(steps, vec![Visit::Unvisited(42), Visit::Visited(42)]); @@ -174,7 +181,7 @@ pub mod test { // Add a vote to branch with slot 5, // should prioritize that branch heaviest_subtree_fork_choice.add_votes( - &[(vote_pubkeys[0], 5)], + [(vote_pubkeys[0], (5, Hash::default()))].iter(), bank.epoch_stakes_map(), bank.epoch_schedule(), ); @@ -227,8 +234,8 @@ pub mod test { // Add some leaves to blockstore, attached to the current best leaf, should prioritize // repairing those new leaves before trying other branches repairs = vec![]; - let best_overall_slot = heaviest_subtree_fork_choice.best_overall_slot(); - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), 4); + let best_overall_slot = heaviest_subtree_fork_choice.best_overall_slot().0; + assert_eq!(best_overall_slot, 4); blockstore.add_tree( tr(best_overall_slot) / (tr(6) / tr(7)), true, @@ -406,6 +413,7 @@ pub mod test { slot 4 | slot 5 */ + let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / (tr(3) / (tr(5)))); let ledger_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&ledger_path).unwrap(); diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 860b53296d..8227aed1ab 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -2,22 +2,30 @@ use crate::{ broadcast_stage::RetransmitSlotsSender, - cache_block_time_service::CacheBlockTimeSender, + cache_block_meta_service::CacheBlockMetaSender, cluster_info::ClusterInfo, - cluster_info_vote_listener::VoteTracker, + cluster_info_vote_listener::{ + GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, VoteTracker, + }, + cluster_slot_state_verifier::*, cluster_slots::ClusterSlots, commitment_service::{AggregateCommitmentService, CommitmentAggregationData}, - consensus::{ComputedBankState, Stake, SwitchForkDecision, Tower, VotedStakes}, + consensus::{ + ComputedBankState, Stake, SwitchForkDecision, Tower, VotedStakes, SWITCH_FORK_THRESHOLD, + }, evm_services::EvmRecorderSender, fork_choice::{ForkChoice, SelectVoteAndResetForkResult}, heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice, + latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks, optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender}, poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, - progress_map::{ForkProgress, ProgressMap, PropagatedStats}, + progress_map::{DuplicateStats, ForkProgress, ProgressMap, PropagatedStats}, repair_service::DuplicateSlotsResetReceiver, result::Result, rewards_recorder_service::RewardsRecorderSender, rpc_subscriptions::RpcSubscriptions, + unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes, + window_service::DuplicateSlotReceiver, }; use solana_client::rpc_response::SlotUpdate; use solana_ledger::{ @@ -34,16 +42,18 @@ use solana_runtime::{ commitment::BlockCommitmentCache, vote_sender_types::ReplayVoteSender, }; use solana_sdk::{ - clock::{Slot, NUM_CONSECUTIVE_LEADER_SLOTS}, + clock::{Slot, MAX_PROCESSING_AGE, NUM_CONSECUTIVE_LEADER_SLOTS}, + genesis_config::ClusterType, hash::Hash, pubkey::Pubkey, + signature::Signature, signature::{Keypair, Signer}, timing::timestamp, transaction::Transaction, }; -use solana_vote_program::{vote_instruction, vote_state::Vote}; +use solana_vote_program::vote_state::Vote; use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, result, sync::{ atomic::{AtomicBool, Ordering}, @@ -51,12 +61,16 @@ use std::{ Arc, Mutex, RwLock, }, thread::{self, Builder, JoinHandle}, - time::Duration, + time::{Duration, Instant}, }; pub const MAX_ENTRY_RECV_PER_ITER: usize = 512; pub const SUPERMINORITY_THRESHOLD: f64 = 1f64 / 3f64; pub const MAX_UNCONFIRMED_SLOTS: usize = 5; +pub const DUPLICATE_LIVENESS_THRESHOLD: f64 = 0.1; +pub const DUPLICATE_THRESHOLD: f64 = 1.0 - SWITCH_FORK_THRESHOLD - DUPLICATE_LIVENESS_THRESHOLD; +const MAX_VOTE_SIGNATURES: usize = 200; +const MAX_VOTE_REFRESH_INTERVAL_MILLIS: usize = 5000; #[derive(PartialEq, Debug)] pub(crate) enum HeaviestForkFailures { @@ -85,6 +99,11 @@ impl Drop for Finalizer { } } +struct LastVoteRefreshTime { + last_refresh_time: Instant, + last_print_time: Instant, +} + #[derive(Default)] struct SkippedSlotsInfo { last_retransmit_slot: u64, @@ -94,7 +113,7 @@ struct SkippedSlotsInfo { pub struct ReplayStageConfig { pub my_pubkey: Pubkey, pub vote_account: Pubkey, - pub authorized_voter_keypairs: Vec>, + pub authorized_voter_keypairs: Arc>>>, pub exit: Arc, pub subscriptions: Arc, pub leader_schedule_cache: Arc, @@ -103,9 +122,10 @@ pub struct ReplayStageConfig { pub block_commitment_cache: Arc>, pub transaction_status_sender: Option, pub rewards_recorder_sender: Option, - pub cache_block_time_sender: Option, + pub cache_block_meta_sender: Option, pub evm_block_recorder_sender: Option, pub bank_notification_sender: Option, + pub wait_for_vote_to_start_leader: bool, } #[derive(Default)] @@ -121,10 +141,12 @@ pub struct ReplayTiming { compute_slot_stats_elapsed: u64, generate_new_bank_forks_elapsed: u64, replay_active_banks_elapsed: u64, - reset_duplicate_slots_elapsed: u64, wait_receive_elapsed: u64, heaviest_fork_failures_elapsed: u64, bank_count: u64, + process_gossip_duplicate_confirmed_slots_elapsed: u64, + process_duplicate_slots_elapsed: u64, + process_unfrozen_gossip_verified_vote_hashes_elapsed: u64, } impl ReplayTiming { #[allow(clippy::too_many_arguments)] @@ -140,10 +162,12 @@ impl ReplayTiming { compute_slot_stats_elapsed: u64, generate_new_bank_forks_elapsed: u64, replay_active_banks_elapsed: u64, - reset_duplicate_slots_elapsed: u64, wait_receive_elapsed: u64, heaviest_fork_failures_elapsed: u64, bank_count: u64, + process_gossip_duplicate_confirmed_slots_elapsed: u64, + process_unfrozen_gossip_verified_vote_hashes_elapsed: u64, + process_duplicate_slots_elapsed: u64, ) { self.collect_frozen_banks_elapsed += collect_frozen_banks_elapsed; self.compute_bank_stats_elapsed += compute_bank_stats_elapsed; @@ -155,10 +179,14 @@ impl ReplayTiming { self.compute_slot_stats_elapsed += compute_slot_stats_elapsed; self.generate_new_bank_forks_elapsed += generate_new_bank_forks_elapsed; self.replay_active_banks_elapsed += replay_active_banks_elapsed; - self.reset_duplicate_slots_elapsed += reset_duplicate_slots_elapsed; self.wait_receive_elapsed += wait_receive_elapsed; self.heaviest_fork_failures_elapsed += heaviest_fork_failures_elapsed; self.bank_count += bank_count; + self.process_gossip_duplicate_confirmed_slots_elapsed += + process_gossip_duplicate_confirmed_slots_elapsed; + self.process_unfrozen_gossip_verified_vote_hashes_elapsed += + process_unfrozen_gossip_verified_vote_hashes_elapsed; + self.process_duplicate_slots_elapsed += process_duplicate_slots_elapsed; let now = timestamp(); let elapsed_ms = now - self.last_print; if elapsed_ms > 1000 { @@ -208,8 +236,13 @@ impl ReplayTiming { i64 ), ( - "reset_duplicate_slots_elapsed", - self.reset_duplicate_slots_elapsed as i64, + "process_gossip_duplicate_confirmed_slots_elapsed", + self.process_gossip_duplicate_confirmed_slots_elapsed as i64, + i64 + ), + ( + "process_unfrozen_gossip_verified_vote_hashes_elapsed", + self.process_unfrozen_gossip_verified_vote_hashes_elapsed as i64, i64 ), ( @@ -223,6 +256,11 @@ impl ReplayTiming { i64 ), ("bank_count", self.bank_count as i64, i64), + ( + "process_duplicate_slots_elapsed", + self.process_duplicate_slots_elapsed as i64, + i64 + ) ); *self = ReplayTiming::default(); @@ -244,13 +282,16 @@ impl ReplayStage { bank_forks: Arc>, cluster_info: Arc, ledger_signal_receiver: Receiver, + duplicate_slots_receiver: DuplicateSlotReceiver, poh_recorder: Arc>, mut tower: Tower, vote_tracker: Arc, cluster_slots: Arc, retransmit_slots_sender: RetransmitSlotsSender, - duplicate_slots_reset_receiver: DuplicateSlotsResetReceiver, + _duplicate_slots_reset_receiver: DuplicateSlotsResetReceiver, replay_vote_sender: ReplayVoteSender, + gossip_duplicate_confirmed_slots_receiver: GossipDuplicateConfirmedSlotsReceiver, + gossip_verified_vote_hash_receiver: GossipVerifiedVoteHashReceiver, ) -> Self { let ReplayStageConfig { my_pubkey, @@ -264,9 +305,10 @@ impl ReplayStage { block_commitment_cache, transaction_status_sender, rewards_recorder_sender, - cache_block_time_sender, + cache_block_meta_sender, evm_block_recorder_sender, bank_notification_sender, + wait_for_vote_to_start_leader, } = config; trace!("replay stage"); @@ -296,6 +338,16 @@ impl ReplayStage { let mut partition_exists = false; let mut skipped_slots_info = SkippedSlotsInfo::default(); let mut replay_timing = ReplayTiming::default(); + let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); + let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + let mut unfrozen_gossip_verified_vote_hashes = UnfrozenGossipVerifiedVoteHashes::default(); + let mut latest_validator_votes_for_frozen_banks = LatestValidatorVotesForFrozenBanks::default(); + let mut voted_signatures = Vec::new(); + let mut has_new_vote_been_rooted = !wait_for_vote_to_start_leader; + let mut last_vote_refresh_time = LastVoteRefreshTime { + last_refresh_time: Instant::now(), + last_print_time: Instant::now(), + }; loop { let allocated = thread_mem_usage::Allocatedp::default(); @@ -322,13 +374,16 @@ impl ReplayStage { let start = allocated.get(); let mut replay_active_banks_time = Measure::start("replay_active_banks_time"); + let ancestors = bank_forks.read().unwrap().ancestors(); + let descendants = bank_forks.read().unwrap().descendants().clone(); let did_complete_bank = Self::replay_active_banks( &blockstore, &bank_forks, &my_pubkey, &vote_account, &mut progress, - transaction_status_sender.clone(), + transaction_status_sender.as_ref(), + cache_block_meta_sender.as_ref(), &verify_recyclers, &mut heaviest_subtree_fork_choice, &replay_vote_sender, @@ -336,19 +391,23 @@ impl ReplayStage { &rewards_recorder_sender, &subscriptions, &evm_block_recorder_sender, + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &ancestors, + &descendants, + &mut unfrozen_gossip_verified_vote_hashes, + &mut latest_validator_votes_for_frozen_banks, ); replay_active_banks_time.stop(); Self::report_memory(&allocated, "replay_active_banks", start); - let mut reset_duplicate_slots_time = Measure::start("reset_duplicate_slots"); - let mut ancestors = bank_forks.read().unwrap().ancestors(); - let mut descendants = bank_forks.read().unwrap().descendants().clone(); let forks_root = bank_forks.read().unwrap().root(); let start = allocated.get(); // Reset any duplicate slots that have been confirmed // by the network in anticipation of the confirmed version of // the slot + /*let mut reset_duplicate_slots_time = Measure::start("reset_duplicate_slots"); Self::reset_duplicate_slots( &duplicate_slots_reset_receiver, &mut ancestors, @@ -356,7 +415,52 @@ impl ReplayStage { &mut progress, &bank_forks, ); - reset_duplicate_slots_time.stop(); + reset_duplicate_slots_time.stop();*/ + + // Check for any newly confirmed slots detected from gossip. + let mut process_gossip_duplicate_confirmed_slots_time = Measure::start("process_gossip_duplicate_confirmed_slots"); + Self::process_gossip_duplicate_confirmed_slots( + &gossip_duplicate_confirmed_slots_receiver, + &mut duplicate_slots_tracker, + &mut gossip_duplicate_confirmed_slots, + &bank_forks, + &mut progress, + &mut heaviest_subtree_fork_choice, + &ancestors, + &descendants, + ); + process_gossip_duplicate_confirmed_slots_time.stop(); + + + // Ingest any new verified votes from gossip. Important for fork choice + // and switching proofs because these may be votes that haven't yet been + // included in a block, so we may not have yet observed these votes just + // by replaying blocks. + let mut process_unfrozen_gossip_verified_vote_hashes_time = Measure::start("process_gossip_duplicate_confirmed_slots"); + Self::process_gossip_verified_vote_hashes( + &gossip_verified_vote_hash_receiver, + &mut unfrozen_gossip_verified_vote_hashes, + &heaviest_subtree_fork_choice, + &mut latest_validator_votes_for_frozen_banks, + ); + for _ in gossip_verified_vote_hash_receiver.try_iter() {} + process_unfrozen_gossip_verified_vote_hashes_time.stop(); + + // Check to remove any duplicated slots from fork choice + let mut process_duplicate_slots_time = Measure::start("process_duplicate_slots"); + if !tpu_has_bank { + Self::process_duplicate_slots( + &duplicate_slots_receiver, + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &bank_forks, + &ancestors, + &descendants, + &mut progress, + &mut heaviest_subtree_fork_choice, + ); + } + process_duplicate_slots_time.stop(); let mut collect_frozen_banks_time = Measure::start("frozen_banks"); let mut frozen_banks: Vec<_> = bank_forks @@ -371,7 +475,7 @@ impl ReplayStage { let mut compute_bank_stats_time = Measure::start("compute_bank_stats"); let newly_computed_slot_stats = Self::compute_bank_stats( - &my_pubkey, + &vote_account, &ancestors, &mut frozen_banks, &tower, @@ -380,6 +484,7 @@ impl ReplayStage { &cluster_slots, &bank_forks, &mut heaviest_subtree_fork_choice, + &mut latest_validator_votes_for_frozen_banks, ); compute_bank_stats_time.stop(); @@ -394,25 +499,25 @@ impl ReplayStage { &bank_forks, ); - for slot in confirmed_forks { - progress - .get_mut(&slot) - .unwrap() - .fork_stats - .confirmation_reported = true; - } + Self::mark_slots_confirmed(&confirmed_forks, &bank_forks, &mut progress, + &mut duplicate_slots_tracker, + &ancestors, &descendants, &mut + heaviest_subtree_fork_choice); } compute_slot_stats_time.stop(); let mut select_forks_time = Measure::start("select_forks_time"); - let fork_choice: &mut dyn ForkChoice = - &mut heaviest_subtree_fork_choice; - let (heaviest_bank, heaviest_bank_on_same_voted_fork) = fork_choice + let (heaviest_bank, heaviest_bank_on_same_voted_fork) = heaviest_subtree_fork_choice .select_forks(&frozen_banks, &tower, &progress, &ancestors, &bank_forks); select_forks_time.stop(); - Self::report_memory(&allocated, "select_fork", start); + if let Some(heaviest_bank_on_same_voted_fork) = heaviest_bank_on_same_voted_fork.as_ref() { + if let Some(my_latest_landed_vote) = progress.my_latest_landed_vote(heaviest_bank_on_same_voted_fork.slot()) { + Self::refresh_last_vote(&mut tower, &cluster_info, heaviest_bank_on_same_voted_fork, &poh_recorder, my_latest_landed_vote, &vote_account, &authorized_voter_keypairs.read().unwrap(), &mut voted_signatures, has_new_vote_been_rooted, &mut last_vote_refresh_time); + } + } + let mut select_vote_and_reset_forks_time = Measure::start("select_vote_and_reset_forks"); let SelectVoteAndResetForkResult { @@ -421,7 +526,7 @@ impl ReplayStage { heaviest_fork_failures, } = Self::select_vote_and_reset_forks( &heaviest_bank, - &heaviest_bank_on_same_voted_fork, + heaviest_bank_on_same_voted_fork.as_ref(), &ancestors, &descendants, &progress, @@ -467,12 +572,13 @@ impl ReplayStage { Self::handle_votable_bank( &vote_bank, + &poh_recorder, switch_fork_decision, &bank_forks, &mut tower, &mut progress, &vote_account, - &authorized_voter_keypairs, + &authorized_voter_keypairs.read().unwrap(), &cluster_info, &blockstore, &leader_schedule_cache, @@ -482,8 +588,12 @@ impl ReplayStage { &subscriptions, &block_commitment_cache, &mut heaviest_subtree_fork_choice, - &cache_block_time_sender, &bank_notification_sender, + &mut duplicate_slots_tracker, + &mut gossip_duplicate_confirmed_slots, + &mut unfrozen_gossip_verified_vote_hashes, + &mut voted_signatures, + &mut has_new_vote_been_rooted, ); }; voting_time.stop(); @@ -575,6 +685,7 @@ impl ReplayStage { &progress, &retransmit_slots_sender, &mut skipped_slots_info, + has_new_vote_been_rooted, ); let poh_bank = poh_recorder.lock().unwrap().bank(); @@ -615,10 +726,12 @@ impl ReplayStage { compute_slot_stats_time.as_us(), generate_new_bank_forks_time.as_us(), replay_active_banks_time.as_us(), - reset_duplicate_slots_time.as_us(), wait_receive_time.as_us(), heaviest_fork_failures_time.as_us(), if did_complete_bank {1} else {0}, + process_gossip_duplicate_confirmed_slots_time.as_us(), + process_unfrozen_gossip_verified_vote_hashes_time.as_us(), + process_duplicate_slots_time.as_us(), ); } Ok(()) @@ -677,6 +790,9 @@ impl ReplayStage { // Initialize progress map with any root banks for bank in &frozen_banks { let prev_leader_slot = progress.get_bank_prev_leader_slot(bank); + let duplicate_stats = DuplicateStats::new_with_unconfirmed_duplicate_ancestor( + progress.latest_unconfirmed_duplicate_ancestor(bank.parent_slot()), + ); progress.insert( bank.slot(), ForkProgress::new_from_bank( @@ -684,14 +800,17 @@ impl ReplayStage { &my_pubkey, &vote_account, prev_leader_slot, + duplicate_stats, 0, 0, ), ); } let root = root_bank.slot(); - let heaviest_subtree_fork_choice = - HeaviestSubtreeForkChoice::new_from_frozen_banks(root, &frozen_banks); + let heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_frozen_banks( + (root, root_bank.hash()), + &frozen_banks, + ); (progress, heaviest_subtree_fork_choice) } @@ -707,6 +826,7 @@ impl ReplayStage { ); } + #[allow(dead_code)] fn reset_duplicate_slots( duplicate_slots_reset_receiver: &DuplicateSlotsResetReceiver, ancestors: &mut HashMap>, @@ -725,6 +845,7 @@ impl ReplayStage { } } + #[allow(dead_code)] fn purge_unconfirmed_duplicate_slot( duplicate_slot: Slot, ancestors: &mut HashMap>, @@ -810,6 +931,112 @@ impl ReplayStage { .expect("must exist based on earlier check"); } + // Check for any newly confirmed slots by the cluster. This is only detects + // optimistic and in the future, duplicate slot confirmations on the exact + // single slots and does not account for votes on their descendants. Used solely + // for duplicate slot recovery. + fn process_gossip_duplicate_confirmed_slots( + gossip_duplicate_confirmed_slots_receiver: &GossipDuplicateConfirmedSlotsReceiver, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, + gossip_duplicate_confirmed_slots: &mut GossipDuplicateConfirmedSlots, + bank_forks: &RwLock, + progress: &mut ProgressMap, + fork_choice: &mut HeaviestSubtreeForkChoice, + ancestors: &HashMap>, + descendants: &HashMap>, + ) { + let root = bank_forks.read().unwrap().root(); + for new_confirmed_slots in gossip_duplicate_confirmed_slots_receiver.try_iter() { + for (confirmed_slot, confirmed_hash) in new_confirmed_slots { + if confirmed_slot <= root { + continue; + } else if let Some(prev_hash) = + gossip_duplicate_confirmed_slots.insert(confirmed_slot, confirmed_hash) + { + assert_eq!(prev_hash, confirmed_hash); + // Already processed this signal + return; + } + + check_slot_agrees_with_cluster( + confirmed_slot, + root, + bank_forks + .read() + .unwrap() + .get(confirmed_slot) + .map(|b| b.hash()), + duplicate_slots_tracker, + gossip_duplicate_confirmed_slots, + ancestors, + descendants, + progress, + fork_choice, + SlotStateUpdate::DuplicateConfirmed, + ); + } + } + } + + fn process_gossip_verified_vote_hashes( + gossip_verified_vote_hash_receiver: &GossipVerifiedVoteHashReceiver, + unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, + heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice, + latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, + ) { + for (pubkey, slot, hash) in gossip_verified_vote_hash_receiver.try_iter() { + let is_frozen = heaviest_subtree_fork_choice.contains_block(&(slot, hash)); + // cluster_info_vote_listener will ensure it doesn't push duplicates + unfrozen_gossip_verified_vote_hashes.add_vote( + pubkey, + slot, + hash, + is_frozen, + latest_validator_votes_for_frozen_banks, + ) + } + } + + // Checks for and handle forks with duplicate slots. + fn process_duplicate_slots( + duplicate_slots_receiver: &DuplicateSlotReceiver, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, + gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + bank_forks: &RwLock, + ancestors: &HashMap>, + descendants: &HashMap>, + progress: &mut ProgressMap, + fork_choice: &mut HeaviestSubtreeForkChoice, + ) { + let new_duplicate_slots: Vec = duplicate_slots_receiver.try_iter().collect(); + let (root_slot, bank_hashes) = { + let r_bank_forks = bank_forks.read().unwrap(); + let bank_hashes: Vec> = new_duplicate_slots + .iter() + .map(|duplicate_slot| r_bank_forks.get(*duplicate_slot).map(|bank| bank.hash())) + .collect(); + + (r_bank_forks.root(), bank_hashes) + }; + for (duplicate_slot, bank_hash) in + new_duplicate_slots.into_iter().zip(bank_hashes.into_iter()) + { + // WindowService should only send the signal once per slot + check_slot_agrees_with_cluster( + duplicate_slot, + root_slot, + bank_hash, + duplicate_slots_tracker, + gossip_duplicate_confirmed_slots, + ancestors, + descendants, + progress, + fork_choice, + SlotStateUpdate::Duplicate, + ); + } + } + fn log_leader_change( my_pubkey: &Pubkey, bank_slot: Slot, @@ -890,6 +1117,7 @@ impl ReplayStage { progress_map: &ProgressMap, retransmit_slots_sender: &RetransmitSlotsSender, skipped_slots_info: &mut SkippedSlotsInfo, + has_new_vote_been_rooted: bool, ) { // all the individual calls to poh_recorder.lock() are designed to // increase granularity, decrease contention @@ -926,6 +1154,11 @@ impl ReplayStage { ); if let Some(next_leader) = leader_schedule_cache.slot_leader_at(poh_slot, Some(&parent)) { + if !has_new_vote_been_rooted { + info!("Haven't landed a vote, so skipping my leader slot"); + return; + } + trace!( "{} leader {} at poh slot: {}", my_pubkey, @@ -999,7 +1232,7 @@ impl ReplayStage { bank: &Arc, blockstore: &Blockstore, bank_progress: &mut ForkProgress, - transaction_status_sender: Option, + transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: &ReplayVoteSender, verify_recyclers: &VerifyRecyclers, subscriptions: &Arc, @@ -1019,48 +1252,41 @@ impl ReplayStage { ); let tx_count_after = bank_progress.replay_progress.num_txs; let tx_count = tx_count_after - tx_count_before; - confirm_result.map_err(|err| { - // LedgerCleanupService should not be cleaning up anything - // that comes after the root, so we should not see any - // errors related to the slot being purged - let slot = bank.slot(); - - // Block producer can abandon the block if it detects a better one - // while producing. Somewhat common and expected in a - // network with variable network/machine configuration. - let is_serious = !matches!( - err, - BlockstoreProcessorError::InvalidBlock(BlockError::TooFewTicks) - ); - if is_serious { - warn!("Fatal replay error in slot: {}, err: {:?}", slot, err); - } else { - info!("Slot had too few ticks: {}", slot); - } - Self::mark_dead_slot( - blockstore, - bank_progress, - slot, - &err, - is_serious, - subscriptions, - ); - + // All errors must lead to marking the slot as dead, otherwise, + // the `check_slot_agrees_with_cluster()` called by `replay_active_banks()` + // will break! err })?; Ok(tx_count) } + #[allow(clippy::too_many_arguments)] fn mark_dead_slot( blockstore: &Blockstore, - bank_progress: &mut ForkProgress, - slot: Slot, + bank: &Bank, + root: Slot, err: &BlockstoreProcessorError, - is_serious: bool, subscriptions: &Arc, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, + gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + ancestors: &HashMap>, + descendants: &HashMap>, + progress: &mut ProgressMap, + heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, ) { + // Do not remove from progress map when marking dead! Needed by + // `process_gossip_duplicate_confirmed_slots()` + + // Block producer can abandon the block if it detects a better one + // while producing. Somewhat common and expected in a + // network with variable network/machine configuration. + let is_serious = !matches!( + err, + BlockstoreProcessorError::InvalidBlock(BlockError::TooFewTicks) + ); + let slot = bank.slot(); if is_serious { datapoint_error!( "replay-stage-mark_dead_slot", @@ -1074,7 +1300,7 @@ impl ReplayStage { ("slot", slot, i64) ); } - bank_progress.is_dead = true; + progress.get_mut(&slot).unwrap().is_dead = true; blockstore .set_dead_slot(slot) .expect("Failed to mark slot as dead in blockstore"); @@ -1083,11 +1309,24 @@ impl ReplayStage { err: format!("error: {:?}", err), timestamp: timestamp(), }); + check_slot_agrees_with_cluster( + slot, + root, + Some(bank.hash()), + duplicate_slots_tracker, + gossip_duplicate_confirmed_slots, + ancestors, + descendants, + progress, + heaviest_subtree_fork_choice, + SlotStateUpdate::Dead, + ); } #[allow(clippy::too_many_arguments)] fn handle_votable_bank( bank: &Arc, + poh_recorder: &Arc>, switch_fork_decision: &SwitchForkDecision, bank_forks: &Arc>, tower: &mut Tower, @@ -1103,16 +1342,18 @@ impl ReplayStage { subscriptions: &Arc, block_commitment_cache: &Arc>, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, - cache_block_time_sender: &Option, bank_notification_sender: &Option, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, + gossip_duplicate_confirmed_slots: &mut GossipDuplicateConfirmedSlots, + unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, + vote_signatures: &mut Vec, + has_new_vote_been_rooted: &mut bool, ) { if bank.is_empty() { inc_new_counter_info!("replay_stage-voted_empty_bank", 1); } trace!("handle votable bank {}", bank.slot()); - let (vote, tower_slots) = tower.new_vote_from_bank(bank, vote_account_pubkey); - let new_root = tower.record_bank_vote(vote); - let last_vote = tower.last_vote_and_timestamp(); + let new_root = tower.record_bank_vote(bank, vote_account_pubkey); if let Err(err) = tower.save(&cluster_info.keypair) { error!("Unable to save tower: {:?}", err); @@ -1138,12 +1379,6 @@ impl ReplayStage { blockstore .set_roots(&rooted_slots) .expect("Ledger set roots failed"); - Self::cache_block_times( - blockstore, - bank_forks, - &rooted_slots, - cache_block_time_sender, - ); let highest_confirmed_root = Some( block_commitment_cache .read() @@ -1157,6 +1392,11 @@ impl ReplayStage { accounts_background_request_sender, highest_confirmed_root, heaviest_subtree_fork_choice, + duplicate_slots_tracker, + gossip_duplicate_confirmed_slots, + unfrozen_gossip_verified_vote_hashes, + has_new_vote_been_rooted, + vote_signatures, ); subscriptions.notify_roots(rooted_slots); if let Some(sender) = bank_notification_sender { @@ -1181,25 +1421,28 @@ impl ReplayStage { Self::push_vote( cluster_info, bank, + poh_recorder, vote_account_pubkey, authorized_voter_keypairs, - last_vote, - &tower_slots, + tower, switch_fork_decision, + vote_signatures, + *has_new_vote_been_rooted, ); } - fn push_vote( - cluster_info: &ClusterInfo, - bank: &Arc, + fn generate_vote_tx( + node_keypair: &Arc, + bank: &Bank, vote_account_pubkey: &Pubkey, authorized_voter_keypairs: &[Arc], vote: Vote, - tower: &[Slot], switch_fork_decision: &SwitchForkDecision, - ) { + vote_signatures: &mut Vec, + has_new_vote_been_rooted: bool, + ) -> Option { if authorized_voter_keypairs.is_empty() { - return; + return None; } let vote_account = match bank.get_vote_account(vote_account_pubkey) { None => { @@ -1207,7 +1450,7 @@ impl ReplayStage { "Vote account {} does not exist. Unable to vote", vote_account_pubkey, ); - return; + return None; } Some((_stake, vote_account)) => vote_account, }; @@ -1218,7 +1461,7 @@ impl ReplayStage { "Vote account {} is unreadable. Unable to vote", vote_account_pubkey, ); - return; + return None; } Ok(vote_state) => vote_state, }; @@ -1231,7 +1474,7 @@ impl ReplayStage { vote_account_pubkey, bank.epoch() ); - return; + return None; }; let authorized_voter_keypair = match authorized_voter_keypairs @@ -1241,36 +1484,139 @@ impl ReplayStage { None => { warn!("The authorized keypair {} for vote account {} is not available. Unable to vote", authorized_voter_pubkey, vote_account_pubkey); - return; + return None; } Some(authorized_voter_keypair) => authorized_voter_keypair, }; - let node_keypair = cluster_info.keypair.clone(); // Send our last few votes along with the new one - let vote_ix = if bank.unlock_switch_vote() { - switch_fork_decision - .to_vote_instruction( - vote, - &vote_account_pubkey, - &authorized_voter_keypair.pubkey(), - ) - .expect("Switch threshold failure should not lead to voting") - } else { - vote_instruction::vote( + let vote_ix = switch_fork_decision + .to_vote_instruction( + vote, &vote_account_pubkey, &authorized_voter_keypair.pubkey(), - vote, ) - }; + .expect("Switch threshold failure should not lead to voting"); let mut vote_tx = Transaction::new_with_payer(&[vote_ix], Some(&node_keypair.pubkey())); let blockhash = bank.last_blockhash(); vote_tx.partial_sign(&[node_keypair.as_ref()], blockhash); vote_tx.partial_sign(&[authorized_voter_keypair.as_ref()], blockhash); - let _ = cluster_info.send_vote(&vote_tx); - cluster_info.push_vote(tower, vote_tx); + + if !has_new_vote_been_rooted { + vote_signatures.push(vote_tx.signatures[0]); + if vote_signatures.len() > MAX_VOTE_SIGNATURES { + vote_signatures.remove(0); + } + } else { + vote_signatures.clear(); + } + + Some(vote_tx) + } + + #[allow(clippy::too_many_arguments)] + fn refresh_last_vote( + tower: &mut Tower, + cluster_info: &ClusterInfo, + heaviest_bank_on_same_fork: &Bank, + poh_recorder: &Mutex, + my_latest_landed_vote: Slot, + vote_account_pubkey: &Pubkey, + authorized_voter_keypairs: &[Arc], + vote_signatures: &mut Vec, + has_new_vote_been_rooted: bool, + last_vote_refresh_time: &mut LastVoteRefreshTime, + ) { + let last_voted_slot = tower.last_voted_slot(); + if last_voted_slot.is_none() { + return; + } + + // Refresh the vote if our latest vote hasn't landed, and the recent blockhash of the + // last attempt at a vote transaction has expired + let last_voted_slot = last_voted_slot.unwrap(); + if my_latest_landed_vote > last_voted_slot + && last_vote_refresh_time.last_print_time.elapsed().as_secs() >= 1 + { + last_vote_refresh_time.last_print_time = Instant::now(); + info!("Last landed vote for slot {} in bank {} is greater than the current last vote for slot: {} tracked by Tower", my_latest_landed_vote, heaviest_bank_on_same_fork.slot(), last_voted_slot); + } + if my_latest_landed_vote >= last_voted_slot + || heaviest_bank_on_same_fork + .check_hash_age(&tower.last_vote_tx_blockhash(), MAX_PROCESSING_AGE) + .unwrap_or(false) + // In order to avoid voting on multiple forks all past MAX_PROCESSING_AGE that don't + // include the last voted blockhash + || last_vote_refresh_time.last_refresh_time.elapsed().as_millis() < MAX_VOTE_REFRESH_INTERVAL_MILLIS as u128 + { + return; + } + + // TODO: check the timestamp in this vote is correct, i.e. it shouldn't + // have changed from the original timestamp of the vote. + let vote_tx = Self::generate_vote_tx( + &cluster_info.keypair, + heaviest_bank_on_same_fork, + vote_account_pubkey, + authorized_voter_keypairs, + tower.last_vote(), + &SwitchForkDecision::SameFork, + vote_signatures, + has_new_vote_been_rooted, + ); + + if let Some(vote_tx) = vote_tx { + let recent_blockhash = vote_tx.message.recent_blockhash; + tower.refresh_last_vote_tx_blockhash(recent_blockhash); + + // Send the votes to the TPU and gossip for network propagation + let hash_string = format!("{}", recent_blockhash); + datapoint_info!( + "refresh_vote", + ("last_voted_slot", last_voted_slot, i64), + ("target_bank_slot", heaviest_bank_on_same_fork.slot(), i64), + ("target_bank_hash", hash_string, String), + ); + let _ = cluster_info.send_vote( + &vote_tx, + crate::banking_stage::next_leader_tpu(cluster_info, poh_recorder), + ); + cluster_info.refresh_vote(vote_tx, last_voted_slot); + last_vote_refresh_time.last_refresh_time = Instant::now(); + } + } + + fn push_vote( + cluster_info: &ClusterInfo, + bank: &Bank, + poh_recorder: &Mutex, + vote_account_pubkey: &Pubkey, + authorized_voter_keypairs: &[Arc], + tower: &mut Tower, + switch_fork_decision: &SwitchForkDecision, + vote_signatures: &mut Vec, + has_new_vote_been_rooted: bool, + ) { + let vote_tx = Self::generate_vote_tx( + &cluster_info.keypair, + bank, + vote_account_pubkey, + authorized_voter_keypairs, + tower.last_vote(), + switch_fork_decision, + vote_signatures, + has_new_vote_been_rooted, + ); + if let Some(vote_tx) = vote_tx { + tower.refresh_last_vote_tx_blockhash(vote_tx.message.recent_blockhash); + let _ = cluster_info.send_vote( + &vote_tx, + crate::banking_stage::next_leader_tpu(cluster_info, poh_recorder), + ); + cluster_info.push_vote(&tower.tower_slots(), vote_tx); + } } fn update_commitment_cache( @@ -1290,8 +1636,8 @@ impl ReplayStage { my_pubkey: &Pubkey, blockstore: &Blockstore, bank: &Arc, - poh_recorder: &Arc>, - leader_schedule_cache: &Arc, + poh_recorder: &Mutex, + leader_schedule_cache: &LeaderScheduleCache, ) { let next_leader_slot = leader_schedule_cache.next_leader_slot( &my_pubkey, @@ -1322,12 +1668,13 @@ impl ReplayStage { #[allow(clippy::too_many_arguments)] fn replay_active_banks( - blockstore: &Arc, - bank_forks: &Arc>, + blockstore: &Blockstore, + bank_forks: &RwLock, my_pubkey: &Pubkey, vote_account: &Pubkey, progress: &mut ProgressMap, - transaction_status_sender: Option, + transaction_status_sender: Option<&TransactionStatusSender>, + cache_block_meta_sender: Option<&CacheBlockMetaSender>, verify_recyclers: &VerifyRecyclers, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, replay_vote_sender: &ReplayVoteSender, @@ -1335,6 +1682,12 @@ impl ReplayStage { rewards_recorder_sender: &Option, subscriptions: &Arc, evm_block_recorder_sender: &Option, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, + gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots, + ancestors: &HashMap>, + descendants: &HashMap>, + unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, + latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, ) -> bool { let mut did_complete_bank = false; let mut tx_count = 0; @@ -1361,6 +1714,12 @@ impl ReplayStage { stats.num_dropped_blocks_on_fork + new_dropped_blocks; (num_blocks_on_fork, num_dropped_blocks_on_fork) }; + + // New children adopt the same latest duplicate ancestor as their parent. + let duplicate_stats = DuplicateStats::new_with_unconfirmed_duplicate_ancestor( + progress.latest_unconfirmed_duplicate_ancestor(bank.parent_slot()), + ); + // Insert a progress entry even for slots this node is the leader for, so that // 1) confirm_forks can report confirmation, 2) we can cache computations about // this bank in `select_forks()` @@ -1370,16 +1729,18 @@ impl ReplayStage { &my_pubkey, vote_account, prev_leader_slot, + duplicate_stats, num_blocks_on_fork, num_dropped_blocks_on_fork, ) }); if bank.collector_id() != my_pubkey { + let root_slot = bank_forks.read().unwrap().root(); let replay_result = Self::replay_blockstore_into_bank( &bank, &blockstore, bank_progress, - transaction_status_sender.clone(), + transaction_status_sender, replay_vote_sender, verify_recyclers, subscriptions, @@ -1387,7 +1748,20 @@ impl ReplayStage { match replay_result { Ok(replay_tx_count) => tx_count += replay_tx_count, Err(err) => { - trace!("replay_result err: {:?}, slot {}", err, bank_slot); + // Error means the slot needs to be marked as dead + Self::mark_dead_slot( + blockstore, + &bank, + root_slot, + &err, + subscriptions, + duplicate_slots_tracker, + gossip_duplicate_confirmed_slots, + ancestors, + descendants, + progress, + heaviest_subtree_fork_choice, + ); // If the bank was corrupted, don't try to run the below logic to check if the // bank is completed continue; @@ -1396,45 +1770,57 @@ impl ReplayStage { } assert_eq!(*bank_slot, bank.slot()); if bank.is_complete() { - if !blockstore.has_duplicate_shreds_in_slot(bank.slot()) { - bank_progress.replay_stats.report_stats( - bank.slot(), - bank_progress.replay_progress.num_entries, - bank_progress.replay_progress.num_shreds, - ); - did_complete_bank = true; - info!("bank frozen: {}", bank.slot()); - if let Some(transaction_status_sender) = transaction_status_sender.clone() { - transaction_status_sender.send_transaction_status_freeze_message(&bank); - } - bank.freeze(); - heaviest_subtree_fork_choice - .add_new_leaf_slot(bank.slot(), Some(bank.parent_slot())); - if let Some(sender) = bank_notification_sender { - sender - .send(BankNotification::Frozen(bank.clone())) - .unwrap_or_else(|err| { - warn!("bank_notification_sender failed: {:?}", err) - }); - } + bank_progress.replay_stats.report_stats( + bank.slot(), + bank_progress.replay_progress.num_entries, + bank_progress.replay_progress.num_shreds, + ); + did_complete_bank = true; + info!("bank frozen: {}", bank.slot()); + if let Some(transaction_status_sender) = transaction_status_sender { + transaction_status_sender.send_transaction_status_freeze_message(&bank); + } + bank.freeze(); + let bank_hash = bank.hash(); + assert_ne!(bank_hash, Hash::default()); + heaviest_subtree_fork_choice.add_new_leaf_slot( + (bank.slot(), bank.hash()), + Some((bank.parent_slot(), bank.parent_hash())), + ); + check_slot_agrees_with_cluster( + bank.slot(), + bank_forks.read().unwrap().root(), + Some(bank.hash()), + duplicate_slots_tracker, + gossip_duplicate_confirmed_slots, + ancestors, + descendants, + progress, + heaviest_subtree_fork_choice, + SlotStateUpdate::Frozen, + ); + if let Some(sender) = bank_notification_sender { + sender + .send(BankNotification::Frozen(bank.clone())) + .unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err)); + } + blockstore_processor::cache_block_meta(&bank, cache_block_meta_sender); - Self::record_rewards(&bank, &rewards_recorder_sender); - Self::record_evm_block(&bank, &subscriptions, &evm_block_recorder_sender); - } else { - Self::mark_dead_slot( - blockstore, - bank_progress, - bank.slot(), - &BlockstoreProcessorError::InvalidBlock(BlockError::DuplicateBlock), - true, - subscriptions, - ); - warn!( - "{} duplicate shreds detected, not freezing bank {}", - my_pubkey, - bank.slot() - ); + let bank_hash = bank.hash(); + if let Some(new_frozen_voters) = + unfrozen_gossip_verified_vote_hashes.remove_slot_hash(bank.slot(), &bank_hash) + { + for pubkey in new_frozen_voters { + latest_validator_votes_for_frozen_banks.check_add_vote( + pubkey, + bank.slot(), + Some(bank_hash), + false, + ); + } } + Self::record_evm_block(&bank, &subscriptions, &evm_block_recorder_sender); + Self::record_rewards(&bank, &rewards_recorder_sender); } else { trace!( "bank {} not completed tick_height: {}, max_tick_height: {}", @@ -1450,7 +1836,7 @@ impl ReplayStage { #[allow(clippy::too_many_arguments)] pub(crate) fn compute_bank_stats( - my_pubkey: &Pubkey, + my_vote_pubkey: &Pubkey, ancestors: &HashMap>, frozen_banks: &mut Vec>, tower: &Tower, @@ -1458,7 +1844,8 @@ impl ReplayStage { vote_tracker: &VoteTracker, cluster_slots: &ClusterSlots, bank_forks: &RwLock, - heaviest_subtree_fork_choice: &mut dyn ForkChoice, + heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, + latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, ) -> Vec { frozen_banks.sort_by_key(|bank| bank.slot()); let mut new_stats = vec![]; @@ -1474,23 +1861,25 @@ impl ReplayStage { .computed; if !is_computed { let computed_bank_state = Tower::collect_vote_lockouts( - my_pubkey, + my_vote_pubkey, bank_slot, bank.vote_accounts().into_iter(), &ancestors, + |slot| progress.get_hash(slot), + latest_validator_votes_for_frozen_banks, ); // Notify any listeners of the votes found in this newly computed // bank heaviest_subtree_fork_choice.compute_bank_stats( &bank, tower, - progress, - &computed_bank_state, + latest_validator_votes_for_frozen_banks, ); let ComputedBankState { voted_stakes, total_stake, lockout_intervals, + my_latest_landed_vote, .. } = computed_bank_state; let stats = progress @@ -1500,6 +1889,8 @@ impl ReplayStage { stats.voted_stakes = voted_stakes; stats.lockout_intervals = lockout_intervals; stats.block_height = bank.block_height(); + stats.bank_hash = Some(bank.hash()); + stats.my_latest_landed_vote = my_latest_landed_vote; stats.computed = true; new_stats.push(bank_slot); datapoint_info!( @@ -1510,7 +1901,7 @@ impl ReplayStage { ); info!( "{} slot_weight: {} {} {} {}", - my_pubkey, + my_vote_pubkey, bank_slot, stats.weight, stats.fork_weight, @@ -1583,7 +1974,9 @@ impl ReplayStage { let newly_voted_pubkeys = slot_vote_tracker .as_ref() - .and_then(|slot_vote_tracker| slot_vote_tracker.write().unwrap().get_updates()) + .and_then(|slot_vote_tracker| { + slot_vote_tracker.write().unwrap().get_voted_slot_updates() + }) .unwrap_or_default(); let cluster_slot_pubkeys = cluster_slot_pubkeys @@ -1604,7 +1997,8 @@ impl ReplayStage { // a bank to vote on, a bank to reset to, pub(crate) fn select_vote_and_reset_forks( heaviest_bank: &Arc, - heaviest_bank_on_same_voted_fork: &Option>, + // Should only be None if there was no previous vote + heaviest_bank_on_same_voted_fork: Option<&Arc>, ancestors: &HashMap>, descendants: &HashMap>, progress: &ProgressMap, @@ -1635,25 +2029,90 @@ impl ReplayStage { .epoch_vote_accounts(heaviest_bank.epoch()) .expect("Bank epoch vote accounts must contain entry for the bank's own epoch"), ); - if let SwitchForkDecision::FailedSwitchThreshold(_, _) = switch_fork_decision { - // If we can't switch, then reset to the the next votable - // bank on the same fork as our last vote, but don't vote - info!( - "Waiting to switch vote to {}, resetting to slot {:?} on same fork for now", - heaviest_bank.slot(), - heaviest_bank_on_same_voted_fork.as_ref().map(|b| b.slot()) - ); - failure_reasons.push(HeaviestForkFailures::FailedSwitchThreshold( - heaviest_bank.slot(), - )); - heaviest_bank_on_same_voted_fork - .as_ref() - .map(|b| (b, switch_fork_decision)) - } else { - // If the switch threshold is observed, halt voting on - // the current fork and attempt to vote/reset Poh to - // the heaviest bank - Some((heaviest_bank, switch_fork_decision)) + + match switch_fork_decision { + SwitchForkDecision::FailedSwitchThreshold(_, _) => { + let reset_bank = heaviest_bank_on_same_voted_fork; + // If we can't switch and our last vote was on a non-duplicate/confirmed slot, then + // reset to the the next votable bank on the same fork as our last vote, + // but don't vote. + + // We don't just reset to the heaviest fork when switch threshold fails because + // a situation like this can occur: + + /* Figure 1: + slot 0 + | + slot 1 + / \ + slot 2 (last vote) | + | slot 8 (10%) + slot 4 (9%) + */ + + // Imagine 90% of validators voted on slot 4, but only 9% landed. If everybody that fails + // the switch theshold abandons slot 4 to build on slot 8 (because it's *currently* heavier), + // then there will be no blocks to include the votes for slot 4, and the network halts + // because 90% of validators can't vote + info!( + "Waiting to switch vote to {}, resetting to slot {:?} for now", + heaviest_bank.slot(), + reset_bank.as_ref().map(|b| b.slot()), + ); + failure_reasons.push(HeaviestForkFailures::FailedSwitchThreshold( + heaviest_bank.slot(), + )); + reset_bank.map(|b| (b, switch_fork_decision)) + } + SwitchForkDecision::FailedSwitchDuplicateRollback(latest_duplicate_ancestor) => { + // If we can't switch and our last vote was on an unconfirmed, duplicate slot, + // then we need to reset to the heaviest bank, even if the heaviest bank is not + // a descendant of the last vote (usually for switch threshold failures we reset + // to the heaviest descendant of the last vote, but in this case, the last vote + // was on a duplicate branch). This is because in the case of *unconfirmed* duplicate + // slots, somebody needs to generate an alternative branch to escape a situation + // like a 50-50 split where both partitions have voted on different versions of the + // same duplicate slot. + + // Unlike the situation described in `Figure 1` above, this is safe. To see why, + // imagine the same situation described in Figure 1 above occurs, but slot 2 is + // a duplicate block. There are now a few cases: + // + // Note first that DUPLICATE_THRESHOLD + SWITCH_FORK_THRESHOLD + DUPLICATE_LIVENESS_THRESHOLD = 1; + // + // 1) > DUPLICATE_THRESHOLD of the network voted on some version of slot 2. Because duplicate slots can be confirmed + // by gossip, unlike the situation described in `Figure 1`, we don't need those + // votes to land in a descendant to confirm slot 2. Once slot 2 is confirmed by + // gossip votes, that fork is added back to the fork choice set and falls back into + // normal fork choice, which is covered by the `FailedSwitchThreshold` case above + // (everyone will resume building on their last voted fork, slot 4, since slot 8 + // doesn't have for switch threshold) + // + // 2) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, > SWITCH_FORK_THRESHOLD of the network voted + // on slot 8. Then everybody abandons the duplicate fork from fork choice and both builds + // on slot 8's fork. They can also vote on slot 8's fork because it has sufficient weight + // to pass the switching threshold + // + // 3) <= DUPLICATE_THRESHOLD of the network voted on some version of slot 2, <= SWITCH_FORK_THRESHOLD of the network voted + // on slot 8. This means more than DUPLICATE_LIVENESS_THRESHOLD of the network is gone, so we cannot + // guarantee progress anyways + + // Note the heaviest fork is never descended from a known unconfirmed duplicate slot + // because the fork choice rule ensures that (marks it as an invalid candidate), + // thus it's safe to use as the reset bank. + let reset_bank = Some(heaviest_bank); + info!( + "Waiting to switch vote to {}, resetting to slot {:?} for now, latest duplicate ancestor: {:?}", + heaviest_bank.slot(), + reset_bank.as_ref().map(|b| b.slot()), + latest_duplicate_ancestor, + ); + failure_reasons.push(HeaviestForkFailures::FailedSwitchThreshold( + heaviest_bank.slot(), + )); + reset_bank.map(|b| (b, switch_fork_decision)) + } + _ => Some((heaviest_bank, switch_fork_decision)), } }; @@ -1833,6 +2292,49 @@ impl ReplayStage { did_newly_reach_threshold } + fn mark_slots_confirmed( + confirmed_forks: &[Slot], + bank_forks: &RwLock, + progress: &mut ProgressMap, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, + ancestors: &HashMap>, + descendants: &HashMap>, + fork_choice: &mut HeaviestSubtreeForkChoice, + ) { + let (root_slot, bank_hashes) = { + let r_bank_forks = bank_forks.read().unwrap(); + let bank_hashes: Vec> = confirmed_forks + .iter() + .map(|slot| r_bank_forks.get(*slot).map(|bank| bank.hash())) + .collect(); + + (r_bank_forks.root(), bank_hashes) + }; + for (slot, bank_hash) in confirmed_forks.iter().zip(bank_hashes.into_iter()) { + // This case should be guaranteed as false by confirm_forks() + if let Some(false) = progress.is_supermajority_confirmed(*slot) { + // Because supermajority confirmation will iterate through all ancestors/descendants + // in `check_slot_agrees_with_cluster`, only incur this cost if the slot wasn't already + // confirmed + progress.set_supermajority_confirmed_slot(*slot); + check_slot_agrees_with_cluster( + *slot, + root_slot, + bank_hash, + duplicate_slots_tracker, + // Don't need to pass the gossip confirmed slots since `slot` + // is already marked as confirmed in progress + &BTreeMap::new(), + ancestors, + descendants, + progress, + fork_choice, + SlotStateUpdate::DuplicateConfirmed, + ); + } + } + } + fn confirm_forks( tower: &Tower, voted_stakes: &VotedStakes, @@ -1842,7 +2344,7 @@ impl ReplayStage { ) -> Vec { let mut confirmed_forks = vec![]; for (slot, prog) in progress.iter() { - if !prog.fork_stats.confirmation_reported { + if !prog.fork_stats.is_supermajority_confirmed { let bank = bank_forks .read() .unwrap() @@ -1867,6 +2369,7 @@ impl ReplayStage { confirmed_forks } + #[allow(clippy::too_many_arguments)] pub(crate) fn handle_new_root( new_root: Slot, bank_forks: &RwLock, @@ -1874,6 +2377,11 @@ impl ReplayStage { accounts_background_request_sender: &AbsRequestSender, highest_confirmed_root: Option, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, + duplicate_slots_tracker: &mut DuplicateSlotsTracker, + gossip_duplicate_confirmed_slots: &mut GossipDuplicateConfirmedSlots, + unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, + has_new_vote_been_rooted: &mut bool, + voted_signatures: &mut Vec, ) { bank_forks.write().unwrap().set_root( new_root, @@ -1881,8 +2389,29 @@ impl ReplayStage { highest_confirmed_root, ); let r_bank_forks = bank_forks.read().unwrap(); + let new_root_bank = &r_bank_forks[new_root]; + if !*has_new_vote_been_rooted { + for signature in voted_signatures.iter() { + if new_root_bank.get_signature_status(signature).is_some() { + *has_new_vote_been_rooted = true; + break; + } + } + if *has_new_vote_been_rooted { + std::mem::take(voted_signatures); + } + } progress.handle_new_root(&r_bank_forks); - heaviest_subtree_fork_choice.set_root(new_root); + heaviest_subtree_fork_choice.set_root((new_root, r_bank_forks.root_bank().hash())); + let mut slots_ge_root = duplicate_slots_tracker.split_off(&new_root); + // duplicate_slots_tracker now only contains entries >= `new_root` + std::mem::swap(duplicate_slots_tracker, &mut slots_ge_root); + + let mut slots_ge_root = gossip_duplicate_confirmed_slots.split_off(&new_root); + // gossip_confirmed_slots now only contains entries >= `new_root` + std::mem::swap(gossip_duplicate_confirmed_slots, &mut slots_ge_root); + + unfrozen_gossip_verified_vote_hashes.set_root(new_root); } fn generate_new_bank_forks( @@ -1993,33 +2522,14 @@ impl ReplayStage { } } - fn cache_block_times( - blockstore: &Arc, - bank_forks: &Arc>, - rooted_slots: &[Slot], - cache_block_time_sender: &Option, - ) { - if let Some(cache_block_time_sender) = cache_block_time_sender { - for slot in rooted_slots { - if blockstore - .get_block_time(*slot) - .unwrap_or_default() - .is_none() - { - if let Some(rooted_bank) = bank_forks.read().unwrap().get(*slot) { - cache_block_time_sender - .send(rooted_bank.clone()) - .unwrap_or_else(|err| { - warn!("cache_block_time_sender failed: {:?}", err) - }); - } else { - error!( - "rooted_bank {:?} not available in BankForks; block time not cached", - slot - ); - } - } - } + pub fn get_unlock_switch_vote_slot(cluster_type: ClusterType) -> Slot { + match cluster_type { + ClusterType::Development => 0, + ClusterType::Devnet => 0, + // Epoch 63 + ClusterType::Testnet => 21_692_256, + // 400_000 slots into epoch 61 + ClusterType::MainnetBeta => 26_752_000, } } @@ -2033,8 +2543,10 @@ impl ReplayStage { pub(crate) mod tests { use super::*; use crate::{ + cluster_info::Node, consensus::test::{initialize_state, VoteSimulator}, consensus::Tower, + crds::Cursor, optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, progress_map::ValidatorStakeInfo, replay_stage::ReplayStage, @@ -2044,7 +2556,7 @@ pub(crate) mod tests { use solana_ledger::{ blockstore::make_slot_entries, blockstore::{entries_to_test_shreds, BlockstoreError}, - create_new_tmp_ledger, + blockstore_processor, create_new_tmp_ledger, entry::{self, next_entry, Entry}, genesis_utils::{create_genesis_config, create_genesis_config_with_leader}, get_tmp_ledger_path, @@ -2064,6 +2576,7 @@ pub(crate) mod tests { hash::{hash, Hash}, instruction::InstructionError, packet::PACKET_DATA_SIZE, + poh_config::PohConfig, signature::{Keypair, Signature, Signer}, system_transaction, transaction::TransactionError, @@ -2082,7 +2595,7 @@ pub(crate) mod tests { #[test] fn test_is_partition_detected() { - let (bank_forks, _) = setup_forks(); + let VoteSimulator { bank_forks, .. } = setup_forks(); let ancestors = bank_forks.read().unwrap().ancestors(); // Last vote 1 is an ancestor of the heaviest slot 3, no partition assert!(!ReplayStage::is_partition_detected(&ancestors, 1, 3)); @@ -2098,10 +2611,15 @@ pub(crate) mod tests { struct ReplayBlockstoreComponents { blockstore: Arc, - validator_voting_keys: HashMap, + validator_node_to_vote_keys: HashMap, + validator_authorized_voter_keypairs: HashMap, + my_vote_pubkey: Pubkey, progress: ProgressMap, - bank_forks: Arc>, + cluster_info: ClusterInfo, leader_schedule_cache: Arc, + poh_recorder: Mutex, + bank_forks: Arc>, + tower: Tower, rpc_subscriptions: Arc, } @@ -2114,10 +2632,11 @@ pub(crate) mod tests { let validator_authorized_voter_keypairs: Vec<_> = (0..20).map(|_| ValidatorVoteKeypairs::new_rand()).collect(); - let validator_voting_keys: HashMap<_, _> = validator_authorized_voter_keypairs - .iter() - .map(|v| (v.node_keypair.pubkey(), v.vote_keypair.pubkey())) - .collect(); + let validator_node_to_vote_keys: HashMap = + validator_authorized_voter_keypairs + .iter() + .map(|v| (v.node_keypair.pubkey(), v.vote_keypair.pubkey())) + .collect(); let GenesisConfigInfo { genesis_config, .. } = genesis_utils::create_genesis_config_with_vote_accounts( 10_000, @@ -2136,17 +2655,53 @@ pub(crate) mod tests { bank0.collector_id(), &Pubkey::default(), None, + DuplicateStats::default(), 0, 0, ), ); + // ClusterInfo + let my_keypairs = &validator_authorized_voter_keypairs[0]; + let my_pubkey = my_keypairs.node_keypair.pubkey(); + let cluster_info = ClusterInfo::new( + Node::new_localhost_with_pubkey(&my_pubkey).info, + Arc::new(Keypair::from_bytes(&my_keypairs.node_keypair.to_bytes()).unwrap()), + ); + assert_eq!(my_pubkey, cluster_info.id()); + // Leader schedule cache let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0)); + // PohRecorder + let poh_recorder = Mutex::new( + PohRecorder::new( + bank0.tick_height(), + bank0.last_blockhash(), + bank0.slot(), + None, + bank0.ticks_per_slot(), + &Pubkey::default(), + &blockstore, + &leader_schedule_cache, + &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::new(false)), + ) + .0, + ); + // BankForks let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0))); + // Tower + let my_vote_pubkey = my_keypairs.vote_keypair.pubkey(); + let tower = Tower::new_from_bankforks( + &bank_forks.read().unwrap(), + &ledger_path, + &cluster_info.id(), + &my_vote_pubkey, + ); + // RpcSubscriptions let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); @@ -2158,12 +2713,23 @@ pub(crate) mod tests { optimistically_confirmed_bank, )); + let validator_authorized_voter_keypairs: HashMap = + validator_authorized_voter_keypairs + .into_iter() + .map(|keys| (keys.vote_keypair.pubkey(), keys)) + .collect(); + ReplayBlockstoreComponents { blockstore, - validator_voting_keys, + validator_node_to_vote_keys, + validator_authorized_voter_keypairs, + my_vote_pubkey, progress, - bank_forks, + cluster_info, leader_schedule_cache, + poh_recorder, + bank_forks, + tower, rpc_subscriptions, } } @@ -2172,11 +2738,12 @@ pub(crate) mod tests { fn test_child_slots_of_same_parent() { let ReplayBlockstoreComponents { blockstore, - validator_voting_keys, + validator_node_to_vote_keys, mut progress, bank_forks, leader_schedule_cache, rpc_subscriptions, + .. } = replay_blockstore_components(); // Insert a non-root bank so that the propagation logic will update this @@ -2191,8 +2758,11 @@ pub(crate) mod tests { ForkProgress::new_from_bank( &bank1, bank1.collector_id(), - validator_voting_keys.get(&bank1.collector_id()).unwrap(), + validator_node_to_vote_keys + .get(&bank1.collector_id()) + .unwrap(), Some(0), + DuplicateStats::default(), 0, 0, ), @@ -2260,7 +2830,7 @@ pub(crate) mod tests { ]; for slot in expected_leader_slots { let leader = leader_schedule_cache.slot_leader_at(slot, None).unwrap(); - let vote_key = validator_voting_keys.get(&leader).unwrap(); + let vote_key = validator_node_to_vote_keys.get(&leader).unwrap(); assert!(progress .get_propagated_stats(1) .unwrap() @@ -2274,18 +2844,41 @@ pub(crate) mod tests { let genesis_config = create_genesis_config(10_000).genesis_config; let bank0 = Bank::new(&genesis_config); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0))); + let root = 3; - let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new(root); let root_bank = Bank::new_from_parent( bank_forks.read().unwrap().get(0).unwrap(), &Pubkey::default(), root, ); + root_bank.freeze(); + let root_hash = root_bank.hash(); bank_forks.write().unwrap().insert(root_bank); + + let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new((root, root_hash)); + let mut progress = ProgressMap::default(); for i in 0..=root { - progress.insert(i, ForkProgress::new(Hash::default(), None, None, 0, 0)); + progress.insert( + i, + ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0), + ); } + + let mut duplicate_slots_tracker: DuplicateSlotsTracker = + vec![root - 1, root, root + 1].into_iter().collect(); + let mut gossip_duplicate_confirmed_slots: GossipDuplicateConfirmedSlots = + vec![root - 1, root, root + 1] + .into_iter() + .map(|s| (s, Hash::default())) + .collect(); + let mut unfrozen_gossip_verified_vote_hashes: UnfrozenGossipVerifiedVoteHashes = + UnfrozenGossipVerifiedVoteHashes { + votes_per_slot: vec![root - 1, root, root + 1] + .into_iter() + .map(|s| (s, HashMap::new())) + .collect(), + }; ReplayStage::handle_new_root( root, &bank_forks, @@ -2293,10 +2886,35 @@ pub(crate) mod tests { &AbsRequestSender::default(), None, &mut heaviest_subtree_fork_choice, + &mut duplicate_slots_tracker, + &mut gossip_duplicate_confirmed_slots, + &mut unfrozen_gossip_verified_vote_hashes, + &mut true, + &mut Vec::new(), ); assert_eq!(bank_forks.read().unwrap().root(), root); assert_eq!(progress.len(), 1); assert!(progress.get(&root).is_some()); + // root - 1 is filtered out + assert_eq!( + duplicate_slots_tracker.into_iter().collect::>(), + vec![root, root + 1] + ); + assert_eq!( + gossip_duplicate_confirmed_slots + .keys() + .cloned() + .collect::>(), + vec![root, root + 1] + ); + assert_eq!( + unfrozen_gossip_verified_vote_hashes + .votes_per_slot + .keys() + .cloned() + .collect::>(), + vec![root, root + 1] + ); } #[test] @@ -2324,11 +2942,16 @@ pub(crate) mod tests { &Pubkey::default(), root, ); + root_bank.freeze(); + let root_hash = root_bank.hash(); bank_forks.write().unwrap().insert(root_bank); - let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new(root); + let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new((root, root_hash)); let mut progress = ProgressMap::default(); for i in 0..=root { - progress.insert(i, ForkProgress::new(Hash::default(), None, None, 0, 0)); + progress.insert( + i, + ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0), + ); } ReplayStage::handle_new_root( root, @@ -2337,6 +2960,11 @@ pub(crate) mod tests { &AbsRequestSender::default(), Some(confirmed_root), &mut heaviest_subtree_fork_choice, + &mut DuplicateSlotsTracker::default(), + &mut GossipDuplicateConfirmedSlots::default(), + &mut UnfrozenGossipVerifiedVoteHashes::default(), + &mut true, + &mut Vec::new(), ); assert_eq!(bank_forks.read().unwrap().root(), root); assert!(bank_forks.read().unwrap().get(confirmed_root).is_some()); @@ -2534,6 +3162,8 @@ pub(crate) mod tests { let gibberish = [0xa5u8; PACKET_DATA_SIZE]; let mut data_header = DataShredHeader::default(); data_header.flags |= DATA_COMPLETE_SHRED; + // Need to provide the right size for Shredder::deshred. + data_header.size = SIZE_OF_DATA_SHRED_PAYLOAD as u16; let mut shred = Shred::new_empty_from_header( ShredCommonHeader::default(), data_header, @@ -2578,9 +3208,9 @@ pub(crate) mod tests { let bank0 = bank_forks.working_bank(); let mut progress = ProgressMap::default(); let last_blockhash = bank0.last_blockhash(); - let mut bank0_progress = progress - .entry(bank0.slot()) - .or_insert_with(|| ForkProgress::new(last_blockhash, None, None, 0, 0)); + let mut bank0_progress = progress.entry(bank0.slot()).or_insert_with(|| { + ForkProgress::new(last_blockhash, None, DuplicateStats::default(), None, 0, 0) + }); let shreds = shred_to_insert(&mint_keypair, bank0.clone()); blockstore.insert_shreds(shreds, None, false).unwrap(); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); @@ -2601,6 +3231,28 @@ pub(crate) mod tests { )), ); + let subscriptions = Arc::new(RpcSubscriptions::new( + &exit, + bank_forks.clone(), + block_commitment_cache, + OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), + )); + if let Err(err) = &res { + ReplayStage::mark_dead_slot( + &blockstore, + &bank0, + 0, + err, + &subscriptions, + &mut DuplicateSlotsTracker::default(), + &GossipDuplicateConfirmedSlots::default(), + &HashMap::new(), + &HashMap::new(), + &mut progress, + &mut HeaviestSubtreeForkChoice::new((0, Hash::default())), + ); + } + // Check that the erroring bank was marked as dead in the progress map assert!(progress .get(&bank0.slot()) @@ -2687,7 +3339,19 @@ pub(crate) mod tests { arc_bank.freeze(); } - thread::sleep(Duration::from_millis(200)); + for _ in 0..10 { + let done = { + let bcc = block_commitment_cache.read().unwrap(); + bcc.get_block_commitment(0).is_some() + && bcc.get_block_commitment(1).is_some() + && bcc.get_block_commitment(2).is_some() + }; + if done { + break; + } else { + thread::sleep(Duration::from_millis(200)); + } + } let mut expected0 = BlockCommitment::default(); expected0.increase_confirmation_stake(3, leader_lamports); @@ -2771,7 +3435,7 @@ pub(crate) mod tests { &bank, &mut entries, true, - Some(TransactionStatusSender { + Some(&TransactionStatusSender { sender: transaction_status_sender, enable_cpi_and_log_storage: false, }), @@ -2845,13 +3509,16 @@ pub(crate) mod tests { #[test] fn test_compute_bank_stats_confirmed() { let vote_keypairs = ValidatorVoteKeypairs::new_rand(); - let node_pubkey = vote_keypairs.node_keypair.pubkey(); - let keypairs: HashMap<_, _> = vec![(node_pubkey, vote_keypairs)].into_iter().collect(); + let my_node_pubkey = vote_keypairs.node_keypair.pubkey(); + let my_vote_pubkey = vote_keypairs.vote_keypair.pubkey(); + let keypairs: HashMap<_, _> = vec![(my_node_pubkey, vote_keypairs)].into_iter().collect(); let (bank_forks, mut progress, mut heaviest_subtree_fork_choice) = initialize_state(&keypairs, 10_000); + let mut latest_validator_votes_for_frozen_banks = + LatestValidatorVotesForFrozenBanks::default(); let bank0 = bank_forks.get(0).unwrap().clone(); - let my_keypairs = keypairs.get(&node_pubkey).unwrap(); + let my_keypairs = keypairs.get(&my_node_pubkey).unwrap(); let vote_tx = vote_transaction::new_vote_transaction( vec![0], bank0.hash(), @@ -2863,7 +3530,7 @@ pub(crate) mod tests { ); let bank_forks = RwLock::new(bank_forks); - let bank1 = Bank::new_from_parent(&bank0, &node_pubkey, 1); + let bank1 = Bank::new_from_parent(&bank0, &my_node_pubkey, 1); bank1.process_transaction(&vote_tx).unwrap(); bank1.freeze(); @@ -2878,7 +3545,7 @@ pub(crate) mod tests { .collect(); let tower = Tower::new_for_tests(0, 0.67); let newly_computed = ReplayStage::compute_bank_stats( - &node_pubkey, + &my_vote_pubkey, &ancestors, &mut frozen_banks, &tower, @@ -2887,11 +3554,11 @@ pub(crate) mod tests { &ClusterSlots::default(), &bank_forks, &mut heaviest_subtree_fork_choice, + &mut latest_validator_votes_for_frozen_banks, ); // bank 0 has no votes, should not send any votes on the channel assert_eq!(newly_computed, vec![0]); - // The only vote is in bank 1, and bank_forks does not currently contain // bank 1, so no slot should be confirmed. { @@ -2904,14 +3571,21 @@ pub(crate) mod tests { &bank_forks, ); - assert!(confirmed_forks.is_empty()) + assert!(confirmed_forks.is_empty()); } // Insert the bank that contains a vote for slot 0, which confirms slot 0 bank_forks.write().unwrap().insert(bank1); progress.insert( 1, - ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0), + ForkProgress::new( + bank0.last_blockhash(), + None, + DuplicateStats::default(), + None, + 0, + 0, + ), ); let ancestors = bank_forks.read().unwrap().ancestors(); let mut frozen_banks: Vec<_> = bank_forks @@ -2922,7 +3596,7 @@ pub(crate) mod tests { .cloned() .collect(); let newly_computed = ReplayStage::compute_bank_stats( - &node_pubkey, + &my_vote_pubkey, &ancestors, &mut frozen_banks, &tower, @@ -2931,6 +3605,7 @@ pub(crate) mod tests { &ClusterSlots::default(), &bank_forks, &mut heaviest_subtree_fork_choice, + &mut latest_validator_votes_for_frozen_banks, ); // Bank 1 had one vote @@ -2944,6 +3619,7 @@ pub(crate) mod tests { &progress, &bank_forks, ); + // No new stats should have been computed assert_eq!(confirmed_forks, vec![0]); } @@ -2956,7 +3632,7 @@ pub(crate) mod tests { .cloned() .collect(); let newly_computed = ReplayStage::compute_bank_stats( - &node_pubkey, + &my_vote_pubkey, &ancestors, &mut frozen_banks, &tower, @@ -2965,6 +3641,7 @@ pub(crate) mod tests { &ClusterSlots::default(), &bank_forks, &mut heaviest_subtree_fork_choice, + &mut latest_validator_votes_for_frozen_banks, ); // No new stats should have been computed assert!(newly_computed.is_empty()); @@ -2974,13 +3651,12 @@ pub(crate) mod tests { fn test_same_weight_select_lower_slot() { // Init state let mut vote_simulator = VoteSimulator::new(1); - let node_pubkey = vote_simulator.node_pubkeys[0]; - let tower = Tower::new_with_key(&node_pubkey); + let my_node_pubkey = vote_simulator.node_pubkeys[0]; + let tower = Tower::new_with_key(&my_node_pubkey); // Create the tree of banks in a BankForks object let forks = tr(0) / (tr(1)) / (tr(2)); - vote_simulator.fill_bank_forks(forks.clone(), &HashMap::new()); - let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks); + vote_simulator.fill_bank_forks(forks, &HashMap::new()); let mut frozen_banks: Vec<_> = vote_simulator .bank_forks .read() @@ -2989,10 +3665,14 @@ pub(crate) mod tests { .values() .cloned() .collect(); - + let mut heaviest_subtree_fork_choice = &mut vote_simulator.heaviest_subtree_fork_choice; + let mut latest_validator_votes_for_frozen_banks = + LatestValidatorVotesForFrozenBanks::default(); let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors(); + + let my_vote_pubkey = vote_simulator.vote_pubkeys[0]; ReplayStage::compute_bank_stats( - &node_pubkey, + &my_vote_pubkey, &ancestors, &mut frozen_banks, &tower, @@ -3001,11 +3681,30 @@ pub(crate) mod tests { &ClusterSlots::default(), &vote_simulator.bank_forks, &mut heaviest_subtree_fork_choice, + &mut latest_validator_votes_for_frozen_banks, ); + let bank1 = vote_simulator + .bank_forks + .read() + .unwrap() + .get(1) + .unwrap() + .clone(); + let bank2 = vote_simulator + .bank_forks + .read() + .unwrap() + .get(2) + .unwrap() + .clone(); assert_eq!( - heaviest_subtree_fork_choice.stake_voted_subtree(1).unwrap(), - heaviest_subtree_fork_choice.stake_voted_subtree(2).unwrap() + heaviest_subtree_fork_choice + .stake_voted_subtree(&(1, bank1.hash())) + .unwrap(), + heaviest_subtree_fork_choice + .stake_voted_subtree(&(2, bank2.hash())) + .unwrap() ); let (heaviest_bank, _) = heaviest_subtree_fork_choice.select_forks( @@ -3024,8 +3723,8 @@ pub(crate) mod tests { fn test_child_bank_heavier() { // Init state let mut vote_simulator = VoteSimulator::new(1); - let node_pubkey = vote_simulator.node_pubkeys[0]; - let mut tower = Tower::new_with_key(&node_pubkey); + let my_node_pubkey = vote_simulator.node_pubkeys[0]; + let mut tower = Tower::new_with_key(&my_node_pubkey); // Create the tree of banks in a BankForks object let forks = tr(0) / (tr(1) / (tr(2) / (tr(3)))); @@ -3033,13 +3732,13 @@ pub(crate) mod tests { // Set the voting behavior let mut cluster_votes = HashMap::new(); let votes = vec![0, 2]; - cluster_votes.insert(node_pubkey, votes.clone()); + cluster_votes.insert(my_node_pubkey, votes.clone()); vote_simulator.fill_bank_forks(forks, &cluster_votes); // Fill banks with votes for vote in votes { assert!(vote_simulator - .simulate_vote(vote, &node_pubkey, &mut tower,) + .simulate_vote(vote, &my_node_pubkey, &mut tower,) .is_empty()); } @@ -3052,8 +3751,9 @@ pub(crate) mod tests { .cloned() .collect(); + let my_vote_pubkey = vote_simulator.vote_pubkeys[0]; ReplayStage::compute_bank_stats( - &node_pubkey, + &my_vote_pubkey, &vote_simulator.bank_forks.read().unwrap().ancestors(), &mut frozen_banks, &tower, @@ -3062,6 +3762,7 @@ pub(crate) mod tests { &ClusterSlots::default(), &vote_simulator.bank_forks, &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.latest_validator_votes_for_frozen_banks, ); frozen_banks.sort_by_key(|bank| bank.slot()); @@ -3083,8 +3784,9 @@ pub(crate) mod tests { assert_eq!( vote_simulator .heaviest_subtree_fork_choice - .best_slot(bank.slot()) - .unwrap(), + .best_slot(&(bank.slot(), bank.hash())) + .unwrap() + .0, 3 ); } @@ -3310,6 +4012,7 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), Some(9), + DuplicateStats::default(), Some(ValidatorStakeInfo { total_epoch_stake, ..ValidatorStakeInfo::default() @@ -3323,6 +4026,7 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), Some(8), + DuplicateStats::default(), Some(ValidatorStakeInfo { total_epoch_stake, ..ValidatorStakeInfo::default() @@ -3359,7 +4063,7 @@ pub(crate) mod tests { .unwrap() .write() .unwrap() - .get_updates() + .get_voted_slot_updates() .is_none()); // The voter should be recorded @@ -3405,6 +4109,7 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), Some(prev_leader_slot), + DuplicateStats::default(), { if i % 2 == 0 { Some(ValidatorStakeInfo { @@ -3484,6 +4189,7 @@ pub(crate) mod tests { let mut fork_progress = ForkProgress::new( Hash::default(), Some(prev_leader_slot), + DuplicateStats::default(), Some(ValidatorStakeInfo { total_epoch_stake, ..ValidatorStakeInfo::default() @@ -3543,7 +4249,7 @@ pub(crate) mod tests { // should succeed progress_map.insert( parent_slot, - ForkProgress::new(Hash::default(), None, None, 0, 0), + ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0), ); assert!(ReplayStage::check_propagation_for_start_leader( poh_slot, @@ -3559,6 +4265,7 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), None, + DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -3585,13 +4292,21 @@ pub(crate) mod tests { let previous_leader_slot = parent_slot - 1; progress_map.insert( parent_slot, - ForkProgress::new(Hash::default(), Some(previous_leader_slot), None, 0, 0), + ForkProgress::new( + Hash::default(), + Some(previous_leader_slot), + DuplicateStats::default(), + None, + 0, + 0, + ), ); progress_map.insert( previous_leader_slot, ForkProgress::new( Hash::default(), None, + DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -3652,6 +4367,7 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), None, + DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -3687,6 +4403,7 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), None, + DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -3710,6 +4427,7 @@ pub(crate) mod tests { ForkProgress::new( Hash::default(), None, + DuplicateStats::default(), Some(ValidatorStakeInfo::default()), 0, 0, @@ -3724,7 +4442,11 @@ pub(crate) mod tests { #[test] fn test_purge_unconfirmed_duplicate_slot() { - let (bank_forks, mut progress) = setup_forks(); + let VoteSimulator { + bank_forks, + mut progress, + .. + } = setup_forks(); let mut descendants = bank_forks.read().unwrap().descendants().clone(); let mut ancestors = bank_forks.read().unwrap().ancestors(); @@ -3784,7 +4506,7 @@ pub(crate) mod tests { #[test] fn test_purge_ancestors_descendants() { - let (bank_forks, _) = setup_forks(); + let VoteSimulator { bank_forks, .. } = setup_forks(); // Purge branch rooted at slot 2 let mut descendants = bank_forks.read().unwrap().descendants().clone(); @@ -3837,7 +4559,7 @@ pub(crate) mod tests { #[test] fn test_leader_snapshot_restart_propagation() { let ReplayBlockstoreComponents { - validator_voting_keys, + validator_node_to_vote_keys, mut progress, bank_forks, leader_schedule_cache, @@ -3872,7 +4594,7 @@ pub(crate) mod tests { let vote_tracker = VoteTracker::default(); // Add votes - for vote_key in validator_voting_keys.values() { + for vote_key in validator_node_to_vote_keys.values() { vote_tracker.insert_vote(root_bank.slot(), *vote_key); } @@ -3881,7 +4603,7 @@ pub(crate) mod tests { // Update propagation status let tower = Tower::new_for_tests(0, 0.67); ReplayStage::compute_bank_stats( - &my_pubkey, + &validator_node_to_vote_keys[&my_pubkey], &ancestors, &mut frozen_banks, &tower, @@ -3890,15 +4612,483 @@ pub(crate) mod tests { &ClusterSlots::default(), &bank_forks, &mut HeaviestSubtreeForkChoice::new_from_bank_forks(&bank_forks.read().unwrap()), + &mut LatestValidatorVotesForFrozenBanks::default(), ); // Check status is true assert!(progress.is_propagated(root_bank.slot())); } - fn setup_forks() -> (RwLock, ProgressMap) { + #[test] + fn test_unconfirmed_duplicate_slots_and_lockouts() { + /* + Build fork structure: + + slot 0 + | + slot 1 + / \ + slot 2 | + | | + slot 3 | + | | + slot 4 | + slot 5 + | + slot 6 + */ + let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4)))) / (tr(5) / (tr(6)))); + + // Make enough validators for vote switch thrshold later + let mut vote_simulator = VoteSimulator::new(2); + let validator_votes: HashMap> = vec![ + (vote_simulator.node_pubkeys[0], vec![5]), + (vote_simulator.node_pubkeys[1], vec![2]), + ] + .into_iter() + .collect(); + vote_simulator.fill_bank_forks(forks, &validator_votes); + + let (bank_forks, mut progress) = (vote_simulator.bank_forks, vote_simulator.progress); + let ledger_path = get_tmp_ledger_path!(); + let blockstore = Arc::new( + Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"), + ); + let mut tower = Tower::new_for_tests(8, 0.67); + + // All forks have same weight so heaviest bank to vote/reset on should be the tip of + // the fork with the lower slot + let (vote_fork, reset_fork) = run_compute_and_select_forks( + &bank_forks, + &mut progress, + &mut tower, + &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.latest_validator_votes_for_frozen_banks, + ); + assert_eq!(vote_fork.unwrap(), 4); + assert_eq!(reset_fork.unwrap(), 4); + + // Record the vote for 4 + tower.record_bank_vote( + &bank_forks.read().unwrap().get(4).unwrap(), + &Pubkey::default(), + ); + + // Mark 4 as duplicate, 3 should be the heaviest slot, but should not be votable + // because of lockout + blockstore.store_duplicate_slot(4, vec![], vec![]).unwrap(); + let ancestors = bank_forks.read().unwrap().ancestors(); + let descendants = bank_forks.read().unwrap().descendants().clone(); + let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); + let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default(); + let bank4_hash = bank_forks.read().unwrap().get(4).unwrap().hash(); + assert_ne!(bank4_hash, Hash::default()); + check_slot_agrees_with_cluster( + 4, + bank_forks.read().unwrap().root(), + Some(bank4_hash), + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &ancestors, + &descendants, + &mut progress, + &mut vote_simulator.heaviest_subtree_fork_choice, + SlotStateUpdate::Duplicate, + ); + + let (vote_fork, reset_fork) = run_compute_and_select_forks( + &bank_forks, + &mut progress, + &mut tower, + &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.latest_validator_votes_for_frozen_banks, + ); + assert!(vote_fork.is_none()); + assert_eq!(reset_fork.unwrap(), 3); + + // Now mark 2, an ancestor of 4, as duplicate + blockstore.store_duplicate_slot(2, vec![], vec![]).unwrap(); + let ancestors = bank_forks.read().unwrap().ancestors(); + let descendants = bank_forks.read().unwrap().descendants().clone(); + let bank2_hash = bank_forks.read().unwrap().get(2).unwrap().hash(); + assert_ne!(bank2_hash, Hash::default()); + check_slot_agrees_with_cluster( + 2, + bank_forks.read().unwrap().root(), + Some(bank2_hash), + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &ancestors, + &descendants, + &mut progress, + &mut vote_simulator.heaviest_subtree_fork_choice, + SlotStateUpdate::Duplicate, + ); + + let (vote_fork, reset_fork) = run_compute_and_select_forks( + &bank_forks, + &mut progress, + &mut tower, + &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.latest_validator_votes_for_frozen_banks, + ); + + // Should now pick the next heaviest fork that is not a descendant of 2, which is 6. + // However the lockout from vote 4 should still apply, so 6 should not be votable + assert!(vote_fork.is_none()); + assert_eq!(reset_fork.unwrap(), 6); + + // If slot 4 is marked as confirmed, then this confirms slot 2 and 4, and + // then slot 4 is now the heaviest bank again + gossip_duplicate_confirmed_slots.insert(4, bank4_hash); + check_slot_agrees_with_cluster( + 4, + bank_forks.read().unwrap().root(), + Some(bank4_hash), + &mut duplicate_slots_tracker, + &gossip_duplicate_confirmed_slots, + &ancestors, + &descendants, + &mut progress, + &mut vote_simulator.heaviest_subtree_fork_choice, + SlotStateUpdate::DuplicateConfirmed, + ); + let (vote_fork, reset_fork) = run_compute_and_select_forks( + &bank_forks, + &mut progress, + &mut tower, + &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.latest_validator_votes_for_frozen_banks, + ); + // Should now pick the heaviest fork 4 again, but lockouts apply so fork 4 + // is not votable, which avoids voting for 4 again. + assert!(vote_fork.is_none()); + assert_eq!(reset_fork.unwrap(), 4); + } + + #[test] + fn test_gossip_vote_doesnt_affect_fork_choice() { + let VoteSimulator { + bank_forks, + mut heaviest_subtree_fork_choice, + mut latest_validator_votes_for_frozen_banks, + vote_pubkeys, + .. + } = setup_forks(); + + let vote_pubkey = vote_pubkeys[0]; + let mut unfrozen_gossip_verified_vote_hashes = UnfrozenGossipVerifiedVoteHashes::default(); + let (gossip_verified_vote_hash_sender, gossip_verified_vote_hash_receiver) = unbounded(); + + // Best slot is 4 + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 4); + + // Cast a vote for slot 3 on one fork + let vote_slot = 3; + let vote_bank = bank_forks.read().unwrap().get(vote_slot).unwrap().clone(); + gossip_verified_vote_hash_sender + .send((vote_pubkey, vote_slot, vote_bank.hash())) + .expect("Send should succeed"); + ReplayStage::process_gossip_verified_vote_hashes( + &gossip_verified_vote_hash_receiver, + &mut unfrozen_gossip_verified_vote_hashes, + &heaviest_subtree_fork_choice, + &mut latest_validator_votes_for_frozen_banks, + ); + + // Pick the best fork. Gossip votes shouldn't affect fork choice + heaviest_subtree_fork_choice.compute_bank_stats( + &vote_bank, + &Tower::default(), + &mut latest_validator_votes_for_frozen_banks, + ); + + // Best slot is still 4 + assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 4); + } + + #[test] + fn test_replay_stage_refresh_last_vote() { + let ReplayBlockstoreComponents { + mut validator_authorized_voter_keypairs, + cluster_info, + poh_recorder, + bank_forks, + mut tower, + my_vote_pubkey, + .. + } = replay_blockstore_components(); + + let mut last_vote_refresh_time = LastVoteRefreshTime { + last_refresh_time: Instant::now(), + last_print_time: Instant::now(), + }; + let has_new_vote_been_rooted = false; + let mut voted_signatures = vec![]; + + let my_vote_keypair = vec![Arc::new( + validator_authorized_voter_keypairs + .remove(&my_vote_pubkey) + .unwrap() + .vote_keypair, + )]; + let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone(); + + fn fill_bank_with_ticks(bank: &Bank) { + let parent_distance = bank.slot() - bank.parent_slot(); + for _ in 0..parent_distance { + let last_blockhash = bank.last_blockhash(); + while bank.last_blockhash() == last_blockhash { + bank.register_tick(&Hash::new_unique()) + } + } + } + + // Simulate landing a vote for slot 0 landing in slot 1 + let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1)); + fill_bank_with_ticks(&bank1); + tower.record_bank_vote(&bank0, &my_vote_pubkey); + ReplayStage::push_vote( + &cluster_info, + &bank0, + &poh_recorder, + &my_vote_pubkey, + &my_vote_keypair, + &mut tower, + &SwitchForkDecision::SameFork, + &mut voted_signatures, + has_new_vote_been_rooted, + ); + let mut cursor = Cursor::default(); + let (_, votes) = cluster_info.get_votes(&mut cursor); + assert_eq!(votes.len(), 1); + let vote_tx = &votes[0]; + assert_eq!(vote_tx.message.recent_blockhash, bank0.last_blockhash()); + assert_eq!(tower.last_vote_tx_blockhash(), bank0.last_blockhash()); + assert_eq!(tower.last_voted_slot().unwrap(), 0); + bank1.process_transaction(vote_tx).unwrap(); + bank1.freeze(); + + // Trying to refresh the vote for bank 0 in bank 1 or bank 2 won't succeed because + // the last vote has landed already + let bank2 = Arc::new(Bank::new_from_parent(&bank1, &Pubkey::default(), 2)); + fill_bank_with_ticks(&bank2); + bank2.freeze(); + for refresh_bank in &[&bank1, &bank2] { + ReplayStage::refresh_last_vote( + &mut tower, + &cluster_info, + refresh_bank, + &poh_recorder, + Tower::last_voted_slot_in_bank(&refresh_bank, &my_vote_pubkey).unwrap(), + &my_vote_pubkey, + &my_vote_keypair, + &mut voted_signatures, + has_new_vote_been_rooted, + &mut last_vote_refresh_time, + ); + + // No new votes have been submitted to gossip + let (_, votes) = cluster_info.get_votes(&mut cursor); + assert!(votes.is_empty()); + // Tower's latest vote tx blockhash hasn't changed either + assert_eq!(tower.last_vote_tx_blockhash(), bank0.last_blockhash()); + assert_eq!(tower.last_voted_slot().unwrap(), 0); + } + + // Simulate submitting a new vote for bank 1 to the network, but the vote + // not landing + tower.record_bank_vote(&bank1, &my_vote_pubkey); + ReplayStage::push_vote( + &cluster_info, + &bank1, + &poh_recorder, + &my_vote_pubkey, + &my_vote_keypair, + &mut tower, + &SwitchForkDecision::SameFork, + &mut voted_signatures, + has_new_vote_been_rooted, + ); + let (_, votes) = cluster_info.get_votes(&mut cursor); + assert_eq!(votes.len(), 1); + let vote_tx = &votes[0]; + assert_eq!(vote_tx.message.recent_blockhash, bank1.last_blockhash()); + assert_eq!(tower.last_vote_tx_blockhash(), bank1.last_blockhash()); + assert_eq!(tower.last_voted_slot().unwrap(), 1); + + // Trying to refresh the vote for bank 1 in bank 2 won't succeed because + // the last vote has not expired yet + ReplayStage::refresh_last_vote( + &mut tower, + &cluster_info, + &bank2, + &poh_recorder, + Tower::last_voted_slot_in_bank(&bank2, &my_vote_pubkey).unwrap(), + &my_vote_pubkey, + &my_vote_keypair, + &mut voted_signatures, + has_new_vote_been_rooted, + &mut last_vote_refresh_time, + ); + // No new votes have been submitted to gossip + let (_, votes) = cluster_info.get_votes(&mut cursor); + assert!(votes.is_empty()); + assert_eq!(tower.last_vote_tx_blockhash(), bank1.last_blockhash()); + assert_eq!(tower.last_voted_slot().unwrap(), 1); + + // Create a bank where the last vote transaction will have expired + let expired_bank = Arc::new(Bank::new_from_parent( + &bank2, + &Pubkey::default(), + bank2.slot() + MAX_PROCESSING_AGE as Slot, + )); + fill_bank_with_ticks(&expired_bank); + expired_bank.freeze(); + + // Now trying to refresh the vote for slot 1 will succeed because the recent blockhash + // of the last vote transaction has expired + last_vote_refresh_time.last_refresh_time = last_vote_refresh_time + .last_refresh_time + .checked_sub(Duration::from_millis( + MAX_VOTE_REFRESH_INTERVAL_MILLIS as u64 + 1, + )) + .unwrap(); + let clone_refresh_time = last_vote_refresh_time.last_refresh_time; + ReplayStage::refresh_last_vote( + &mut tower, + &cluster_info, + &expired_bank, + &poh_recorder, + Tower::last_voted_slot_in_bank(&expired_bank, &my_vote_pubkey).unwrap(), + &my_vote_pubkey, + &my_vote_keypair, + &mut voted_signatures, + has_new_vote_been_rooted, + &mut last_vote_refresh_time, + ); + assert!(last_vote_refresh_time.last_refresh_time > clone_refresh_time); + let (_, votes) = cluster_info.get_votes(&mut cursor); + assert_eq!(votes.len(), 1); + let vote_tx = &votes[0]; + assert_eq!( + vote_tx.message.recent_blockhash, + expired_bank.last_blockhash() + ); + assert_eq!( + tower.last_vote_tx_blockhash(), + expired_bank.last_blockhash() + ); + assert_eq!(tower.last_voted_slot().unwrap(), 1); + + // Processing the vote transaction should be valid + let expired_bank_child = Arc::new(Bank::new_from_parent( + &expired_bank, + &Pubkey::default(), + expired_bank.slot() + 1, + )); + expired_bank_child.process_transaction(vote_tx).unwrap(); + let (_stake, vote_account) = expired_bank_child + .get_vote_account(&my_vote_pubkey) + .unwrap(); + assert_eq!( + vote_account.vote_state().as_ref().unwrap().tower(), + vec![0, 1] + ); + fill_bank_with_ticks(&expired_bank_child); + expired_bank_child.freeze(); + + // Trying to refresh the vote on a sibling bank where: + // 1) The vote for slot 1 hasn't landed + // 2) The latest refresh vote transaction's recent blockhash (the sibling's hash) doesn't exist + // This will still not refresh because `MAX_VOTE_REFRESH_INTERVAL_MILLIS` has not expired yet + let expired_bank_sibling = Arc::new(Bank::new_from_parent( + &bank2, + &Pubkey::default(), + expired_bank_child.slot() + 1, + )); + fill_bank_with_ticks(&expired_bank_sibling); + expired_bank_sibling.freeze(); + // Set the last refresh to now, shouldn't refresh because the last refresh just happened. + last_vote_refresh_time.last_refresh_time = Instant::now(); + ReplayStage::refresh_last_vote( + &mut tower, + &cluster_info, + &expired_bank_sibling, + &poh_recorder, + Tower::last_voted_slot_in_bank(&expired_bank_sibling, &my_vote_pubkey).unwrap(), + &my_vote_pubkey, + &my_vote_keypair, + &mut voted_signatures, + has_new_vote_been_rooted, + &mut last_vote_refresh_time, + ); + let (_, votes) = cluster_info.get_votes(&mut cursor); + assert!(votes.is_empty()); + assert_eq!( + vote_tx.message.recent_blockhash, + expired_bank.last_blockhash() + ); + assert_eq!( + tower.last_vote_tx_blockhash(), + expired_bank.last_blockhash() + ); + assert_eq!(tower.last_voted_slot().unwrap(), 1); + } + + fn run_compute_and_select_forks( + bank_forks: &RwLock, + progress: &mut ProgressMap, + tower: &mut Tower, + heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, + latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, + ) -> (Option, Option) { + let mut frozen_banks: Vec<_> = bank_forks + .read() + .unwrap() + .frozen_banks() + .values() + .cloned() + .collect(); + let ancestors = &bank_forks.read().unwrap().ancestors(); + let descendants = &bank_forks.read().unwrap().descendants().clone(); + ReplayStage::compute_bank_stats( + &Pubkey::default(), + &bank_forks.read().unwrap().ancestors(), + &mut frozen_banks, + tower, + progress, + &VoteTracker::default(), + &ClusterSlots::default(), + &bank_forks, + heaviest_subtree_fork_choice, + latest_validator_votes_for_frozen_banks, + ); + let (heaviest_bank, heaviest_bank_on_same_fork) = heaviest_subtree_fork_choice + .select_forks(&frozen_banks, &tower, &progress, &ancestors, bank_forks); + assert!(heaviest_bank_on_same_fork.is_none()); + let SelectVoteAndResetForkResult { + vote_bank, + reset_bank, + .. + } = ReplayStage::select_vote_and_reset_forks( + &heaviest_bank, + heaviest_bank_on_same_fork.as_ref(), + &ancestors, + &descendants, + progress, + tower, + ); + ( + vote_bank.map(|(b, _)| b.slot()), + reset_bank.map(|b| b.slot()), + ) + } + + fn setup_forks() -> VoteSimulator { /* Build fork structure: + slot 0 | slot 1 @@ -3915,7 +5105,7 @@ pub(crate) mod tests { let mut vote_simulator = VoteSimulator::new(1); vote_simulator.fill_bank_forks(forks, &HashMap::new()); - (vote_simulator.bank_forks, vote_simulator.progress) + vote_simulator } fn check_map_eq( diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index 0153a45214..2c2833cee2 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -16,7 +16,7 @@ use crate::{ rpc_subscriptions::RpcSubscriptions, window_service::{should_retransmit_and_persist, WindowService}, }; -use crossbeam_channel::Receiver; +use crossbeam_channel::{Receiver, Sender}; use lru::LruCache; use solana_client::rpc_response::SlotUpdate; use solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats}; @@ -28,9 +28,7 @@ use solana_measure::measure::Measure; use solana_metrics::inc_new_counter_error; use solana_perf::packet::{Packet, Packets}; use solana_runtime::{bank::Bank, bank_forks::BankForks}; -use solana_sdk::{ - clock::Slot, epoch_schedule::EpochSchedule, feature_set, pubkey::Pubkey, timing::timestamp, -}; +use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp}; use solana_streamer::streamer::PacketReceiver; use std::{ cmp, @@ -66,6 +64,7 @@ struct RetransmitStats { retransmit_total: AtomicU64, last_ts: AtomicU64, compute_turbine_peers_total: AtomicU64, + retransmit_tree_mismatch: AtomicU64, packets_by_slot: Mutex>, packets_by_source: Mutex>, } @@ -84,6 +83,7 @@ fn update_retransmit_stats( packets_by_source: HashMap, epoch_fetch: u64, epoch_cach_update: u64, + retransmit_tree_mismatch: u64, ) { stats.total_time.fetch_add(total_time, Ordering::Relaxed); stats @@ -106,6 +106,9 @@ fn update_retransmit_stats( stats .epoch_cache_update .fetch_add(epoch_cach_update, Ordering::Relaxed); + stats + .retransmit_tree_mismatch + .fetch_add(retransmit_tree_mismatch, Ordering::Relaxed); { let mut stats_packets_by_slot = stats.packets_by_slot.lock().unwrap(); for (slot, count) in packets_by_slot { @@ -158,6 +161,11 @@ fn update_retransmit_stats( stats.retransmit_total.swap(0, Ordering::Relaxed) as i64, i64 ), + ( + "retransmit_tree_mismatch", + stats.retransmit_tree_mismatch.swap(0, Ordering::Relaxed) as i64, + i64 + ), ( "compute_turbine", stats.compute_turbine_peers_total.swap(0, Ordering::Relaxed) as i64, @@ -175,7 +183,7 @@ fn update_retransmit_stats( ), ); let mut packets_by_slot = stats.packets_by_slot.lock().unwrap(); - let old_packets_by_slot = std::mem::replace(&mut *packets_by_slot, BTreeMap::new()); + let old_packets_by_slot = std::mem::take(&mut *packets_by_slot); drop(packets_by_slot); for (slot, num_shreds) in old_packets_by_slot { @@ -273,22 +281,6 @@ fn check_if_first_shred_received( } } -// Returns true if turbine retransmit peers patch (#14565) is enabled. -fn enable_turbine_retransmit_peers_patch(shred_slot: Slot, root_bank: &Bank) -> bool { - let feature_slot = root_bank - .feature_set - .activated_slot(&feature_set::turbine_retransmit_peers_patch::id()); - match feature_slot { - None => false, - Some(feature_slot) => { - let epoch_schedule = root_bank.epoch_schedule(); - let feature_epoch = epoch_schedule.get_epoch(feature_slot); - let shred_epoch = epoch_schedule.get_epoch(shred_slot); - feature_epoch < shred_epoch - } - } -} - // Drops shred slot leader from retransmit peers. // TODO: decide which bank should be used here. fn get_retransmit_peers( @@ -332,15 +324,15 @@ fn retransmit( first_shreds_received: &Mutex>, rpc_subscriptions: &Option>, ) -> Result<()> { - let timer = Duration::new(1, 0); + const RECV_TIMEOUT: Duration = Duration::from_secs(1); let r_lock = r.lock().unwrap(); - let packets = r_lock.recv_timeout(timer)?; + let packets = r_lock.recv_timeout(RECV_TIMEOUT)?; let mut timer_start = Measure::start("retransmit"); let mut total_packets = packets.packets.len(); - let mut packet_v = vec![packets]; + let mut packets = vec![packets]; while let Ok(nq) = r_lock.try_recv() { total_packets += nq.packets.len(); - packet_v.push(nq); + packets.push(nq); if total_packets >= MAX_PACKET_BATCH_SIZE { break; } @@ -385,95 +377,97 @@ fn retransmit( let mut repair_total = 0; let mut retransmit_total = 0; let mut compute_turbine_peers_total = 0; + let mut retransmit_tree_mismatch = 0; let mut packets_by_slot: HashMap = HashMap::new(); let mut packets_by_source: HashMap = HashMap::new(); let mut max_slot = 0; - for mut packets in packet_v { - for packet in packets.packets.iter_mut() { - // skip discarded packets and repair packets - if packet.meta.discard { - total_packets -= 1; - discard_total += 1; - continue; - } - if packet.meta.repair { - total_packets -= 1; - repair_total += 1; - continue; - } - let shred_slot = match check_if_already_received(packet, shreds_received) { - Some(slot) => slot, - None => continue, - }; - max_slot = max_slot.max(shred_slot); - - if let Some(rpc_subscriptions) = rpc_subscriptions { - if check_if_first_shred_received(shred_slot, first_shreds_received, &root_bank) { - rpc_subscriptions.notify_slot_update(SlotUpdate::FirstShredReceived { - slot: shred_slot, - timestamp: timestamp(), - }); - } + for packet in packets.iter().flat_map(|p| p.packets.iter()) { + // skip discarded packets and repair packets + if packet.meta.discard { + total_packets -= 1; + discard_total += 1; + continue; + } + if packet.meta.repair { + total_packets -= 1; + repair_total += 1; + continue; + } + let shred_slot = match check_if_already_received(packet, shreds_received) { + Some(slot) => slot, + None => continue, + }; + max_slot = max_slot.max(shred_slot); + + if let Some(rpc_subscriptions) = rpc_subscriptions { + if check_if_first_shred_received(shred_slot, first_shreds_received, &root_bank) { + rpc_subscriptions.notify_slot_update(SlotUpdate::FirstShredReceived { + slot: shred_slot, + timestamp: timestamp(), + }); } + } - let mut compute_turbine_peers = Measure::start("turbine_start"); - let stakes_and_index = get_retransmit_peers( - my_id, - shred_slot, - leader_schedule_cache, - r_bank.deref(), - r_epoch_stakes_cache.deref(), - ); - let (my_index, mut shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index( - &my_id, - &r_epoch_stakes_cache.peers, - &stakes_and_index, - packet.meta.seed, - ); - peers_len = cmp::max(peers_len, shuffled_stakes_and_index.len()); - // Until the patch is activated, do the old buggy thing. - if !enable_turbine_retransmit_peers_patch(shred_slot, root_bank.deref()) { - shuffled_stakes_and_index.remove(my_index); - } - // split off the indexes, we don't need the stakes anymore - let indexes: Vec<_> = shuffled_stakes_and_index - .into_iter() - .map(|(_, index)| index) - .collect(); - - let (neighbors, children) = - compute_retransmit_peers(DATA_PLANE_FANOUT, my_index, &indexes); - let neighbors: Vec<_> = neighbors - .into_iter() - .filter_map(|index| { - let peer = &r_epoch_stakes_cache.peers[index]; - if peer.id == my_id { - None - } else { - Some(peer) - } - }) - .collect(); - let children: Vec<_> = children - .into_iter() - .map(|index| &r_epoch_stakes_cache.peers[index]) - .collect(); - compute_turbine_peers.stop(); - compute_turbine_peers_total += compute_turbine_peers.as_us(); - - *packets_by_slot.entry(packet.meta.slot).or_insert(0) += 1; - *packets_by_source - .entry(packet.meta.addr().to_string()) - .or_insert(0) += 1; - - let mut retransmit_time = Measure::start("retransmit_to"); - if !packet.meta.forward { - ClusterInfo::retransmit_to(&neighbors, packet, sock, true)?; - } - ClusterInfo::retransmit_to(&children, packet, sock, packet.meta.forward)?; - retransmit_time.stop(); - retransmit_total += retransmit_time.as_us(); + let mut compute_turbine_peers = Measure::start("turbine_start"); + let stakes_and_index = get_retransmit_peers( + my_id, + shred_slot, + leader_schedule_cache, + r_bank.deref(), + r_epoch_stakes_cache.deref(), + ); + let (my_index, shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index( + &my_id, + &r_epoch_stakes_cache.peers, + &stakes_and_index, + packet.meta.seed, + ); + // If the node is on the critical path (i.e. the first node in each + // neighborhood), then we expect that the packet arrives at tvu socket + // as opposed to tvu-forwards. If this is not the case, then the + // turbine broadcast/retransmit tree is mismatched across nodes. + if packet.meta.forward == (my_index % DATA_PLANE_FANOUT == 0) { + retransmit_tree_mismatch += 1; } + peers_len = cmp::max(peers_len, shuffled_stakes_and_index.len()); + // split off the indexes, we don't need the stakes anymore + let indexes: Vec<_> = shuffled_stakes_and_index + .into_iter() + .map(|(_, index)| index) + .collect(); + debug_assert_eq!(my_id, r_epoch_stakes_cache.peers[indexes[my_index]].id); + + let (neighbors, children) = compute_retransmit_peers(DATA_PLANE_FANOUT, my_index, &indexes); + let neighbors: Vec<_> = neighbors + .into_iter() + .filter_map(|index| { + let peer = &r_epoch_stakes_cache.peers[index]; + if peer.id == my_id { + None + } else { + Some(peer) + } + }) + .collect(); + let children: Vec<_> = children + .into_iter() + .map(|index| &r_epoch_stakes_cache.peers[index]) + .collect(); + compute_turbine_peers.stop(); + compute_turbine_peers_total += compute_turbine_peers.as_us(); + + *packets_by_slot.entry(packet.meta.slot).or_default() += 1; + *packets_by_source + .entry(packet.meta.addr().to_string()) + .or_default() += 1; + + let mut retransmit_time = Measure::start("retransmit_to"); + if !packet.meta.forward { + ClusterInfo::retransmit_to(&neighbors, packet, sock, true)?; + } + ClusterInfo::retransmit_to(&children, packet, sock, packet.meta.forward)?; + retransmit_time.stop(); + retransmit_total += retransmit_time.as_us(); } max_slots.retransmit.fetch_max(max_slot, Ordering::Relaxed); timer_start.stop(); @@ -497,6 +491,7 @@ fn retransmit( packets_by_source, epoch_fetch.as_us(), epoch_cache_update.as_us(), + retransmit_tree_mismatch, ); Ok(()) @@ -605,6 +600,7 @@ impl RetransmitStage { completed_data_sets_sender: CompletedDataSetsSender, max_slots: &Arc, rpc_subscriptions: Option>, + duplicate_slots_sender: Sender, ) -> Self { let (retransmit_sender, retransmit_receiver) = channel(); @@ -666,6 +662,7 @@ impl RetransmitStage { cluster_slots, verified_vote_receiver, completed_data_sets_sender, + duplicate_slots_sender, ); let mut thread_hdls = t_retransmit; @@ -722,8 +719,10 @@ mod tests { evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); + let leader_schedule_cache = Arc::new(cached_leader_schedule); let bank_forks = Arc::new(RwLock::new(bank_forks)); diff --git a/core/src/rpc.rs b/core/src/rpc.rs index 172163b0f4..8716b59444 100644 --- a/core/src/rpc.rs +++ b/core/src/rpc.rs @@ -13,6 +13,7 @@ use crate::{ use bincode::{config::Options, serialize}; use jsonrpc_core::{types::error, Error, Metadata, Result}; use jsonrpc_derive::rpc; +use serde::{Deserialize, Serialize}; use solana_account_decoder::{ parse_account_data::AccountAdditionalData, parse_token::{ @@ -45,7 +46,7 @@ use solana_metrics::inc_new_counter_info; use solana_perf::packet::PACKET_DATA_SIZE; use solana_runtime::{ accounts::AccountAddressFilter, - accounts_index::{AccountIndex, IndexKey}, + accounts_index::{AccountIndex, AccountSecondaryIndexes, IndexKey}, bank::Bank, bank_forks::{BankForks, SnapshotConfig}, commitment::{BlockCommitmentArray, BlockCommitmentCache, CommitmentSlots}, @@ -53,7 +54,7 @@ use solana_runtime::{ snapshot_utils::get_highest_snapshot_archive_path, }; use solana_sdk::{ - account::Account, + account::{AccountSharedData, ReadableAccount}, account_utils::StateMut, clock::{Slot, UnixTimestamp, MAX_RECENT_BLOCKHASHES}, commitment_config::{CommitmentConfig, CommitmentLevel}, @@ -91,17 +92,31 @@ use std::{ }, time::Duration, }; -use tokio::runtime; +use tokio::runtime::Runtime; use velas_account_program::{VelasAccountType, ACCOUNT_LEN as VELAS_ACCOUNT_SIZE}; pub const MAX_REQUEST_PAYLOAD_SIZE: usize = 50 * (1 << 10); // 50kB pub const PERFORMANCE_SAMPLES_LIMIT: usize = 720; +// Limit the length of the `epoch_credits` array for each validator in a `get_vote_accounts` +// response +const MAX_RPC_EPOCH_CREDITS_HISTORY: usize = 5; + fn new_response(bank: &Bank, value: T) -> RpcResponse { let context = RpcResponseContext { slot: bank.slot() }; Response { context, value } } +/// Wrapper for rpc return types of methods that provide responses both with and without context. +/// Main purpose of this is to fix methods that lack context information in their return type, +/// without breaking backwards compatibility. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum OptionalContext { + Context(RpcResponse), + NoContext(T), +} + fn is_finalized( block_commitment_cache: &BlockCommitmentCache, bank: &Bank, @@ -114,8 +129,6 @@ fn is_finalized( #[derive(Debug, Default, Clone)] pub struct JsonRpcConfig { - pub enable_validator_exit: bool, - pub enable_set_log_filter: bool, pub enable_rpc_transaction_history: bool, pub enable_cpi_and_log_storage: bool, pub identity_pubkey: Pubkey, @@ -124,9 +137,11 @@ pub struct JsonRpcConfig { pub enable_bigtable_ledger_storage: bool, pub enable_bigtable_ledger_upload: bool, pub max_multiple_accounts: Option, - pub account_indexes: HashSet, + pub account_indexes: AccountSecondaryIndexes, pub rpc_threads: usize, pub rpc_bigtable_timeout: Option, + pub minimal_api: bool, + pub rpc_scan_and_fix_roots: bool, } #[derive(Clone)] @@ -136,12 +151,12 @@ pub struct JsonRpcRequestProcessor { pub blockstore: Arc, config: JsonRpcConfig, snapshot_config: Option, - validator_exit: Arc>>, + validator_exit: Arc>, health: Arc, cluster_info: Arc, genesis_hash: Hash, transaction_sender: Arc>>, - runtime_handle: runtime::Handle, + runtime: Arc, bigtable_ledger_storage: Option, optimistically_confirmed_bank: Arc>, largest_accounts_cache: Arc>, @@ -224,11 +239,11 @@ impl JsonRpcRequestProcessor { bank_forks: Arc>, block_commitment_cache: Arc>, blockstore: Arc, - validator_exit: Arc>>, + validator_exit: Arc>, health: Arc, cluster_info: Arc, genesis_hash: Hash, - runtime: &runtime::Runtime, + runtime: Arc, bigtable_ledger_storage: Option, optimistically_confirmed_bank: Arc>, largest_accounts_cache: Arc>, @@ -249,7 +264,7 @@ impl JsonRpcRequestProcessor { cluster_info, genesis_hash, transaction_sender: Arc::new(Mutex::new(sender)), - runtime_handle: runtime.handle().clone(), + runtime, bigtable_ledger_storage, optimistically_confirmed_bank, largest_accounts_cache, @@ -290,7 +305,7 @@ impl JsonRpcRequestProcessor { cluster_info, genesis_hash, transaction_sender: Arc::new(Mutex::new(sender)), - runtime_handle: runtime::Runtime::new().unwrap().handle().clone(), + runtime: Arc::new(Runtime::new().expect("Runtime")), bigtable_ledger_storage: None, optimistically_confirmed_bank: Arc::new(RwLock::new(OptimisticallyConfirmedBank { bank: bank.clone(), @@ -330,7 +345,7 @@ impl JsonRpcRequestProcessor { for pubkey in pubkeys { let response_account = - get_encoded_account(&bank, &pubkey, encoding.clone(), config.data_slice)?; + get_encoded_account(&bank, &pubkey, encoding, config.data_slice)?; accounts.push(response_account) } Ok(new_response(&bank, accounts)) @@ -350,7 +365,8 @@ impl JsonRpcRequestProcessor { program_id: &Pubkey, config: Option, filters: Vec, - ) -> Result> { + with_context: bool, + ) -> Result>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary); @@ -358,16 +374,16 @@ impl JsonRpcRequestProcessor { check_slice_and_encoding(&encoding, data_slice_config.is_some())?; let keyed_accounts = { if let Some(owner) = get_spl_token_owner_filter(program_id, &filters) { - self.get_filtered_spl_token_accounts_by_owner(&bank, &owner, filters) + self.get_filtered_spl_token_accounts_by_owner(&bank, &owner, filters)? } else if let Some(mint) = get_spl_token_mint_filter(program_id, &filters) { - self.get_filtered_spl_token_accounts_by_mint(&bank, &mint, filters) + self.get_filtered_spl_token_accounts_by_mint(&bank, &mint, filters)? } else { - self.get_filtered_program_accounts(&bank, program_id, filters) + self.get_filtered_program_accounts(&bank, program_id, filters)? } }; let result = if program_id == &spl_token_id_v2_0() && encoding == UiAccountEncoding::JsonParsed { - get_parsed_token_accounts(bank, keyed_accounts.into_iter()).collect() + get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() } else { keyed_accounts .into_iter() @@ -375,15 +391,18 @@ impl JsonRpcRequestProcessor { pubkey: pubkey.to_string(), account: UiAccount::encode( &pubkey, - account, - encoding.clone(), + &account, + encoding, None, data_slice_config, ), }) .collect() }; - Ok(result) + Ok(result).map(|result| match with_context { + true => OptionalContext::Context(new_response(&bank, result)), + false => OptionalContext::NoContext(result), + }) } pub fn get_inflation_reward( @@ -447,13 +466,9 @@ impl JsonRpcRequestProcessor { .unwrap_or_default() .into_iter() .filter_map(|reward| match reward.reward_type? { - RewardType::Staking | RewardType::Voting => { - if addresses.contains(&reward.pubkey) { - Some((reward.clone().pubkey, reward)) - } else { - None - } - } + RewardType::Staking | RewardType::Voting => addresses + .contains(&reward.pubkey) + .then(|| (reward.clone().pubkey, reward)), _ => None, }) .collect(); @@ -534,12 +549,16 @@ impl JsonRpcRequestProcessor { let last_valid_slot = bank .get_blockhash_last_valid_slot(&blockhash) .expect("bank blockhash queue should contain blockhash"); + let last_valid_block_height = bank + .get_blockhash_last_valid_block_height(&blockhash) + .expect("bank blockhash queue should contain blockhash"); new_response( &bank, RpcFees { blockhash: blockhash.to_string(), fee_calculator, last_valid_slot, + last_valid_block_height, }, ) } @@ -596,6 +615,10 @@ impl JsonRpcRequestProcessor { self.bank(commitment).slot() } + fn get_block_height(&self, commitment: Option) -> u64 { + self.bank(commitment).block_height() + } + fn get_max_retransmit_slot(&self) -> Slot { self.max_slots.retransmit.load(Ordering::Relaxed) } @@ -608,6 +631,43 @@ impl JsonRpcRequestProcessor { self.bank(commitment).collector_id().to_string() } + fn get_slot_leaders( + &self, + commitment: Option, + start_slot: Slot, + limit: usize, + ) -> Result> { + let bank = self.bank(commitment); + + let (mut epoch, mut slot_index) = + bank.epoch_schedule().get_epoch_and_slot_index(start_slot); + + let mut slot_leaders = Vec::with_capacity(limit); + while slot_leaders.len() < limit { + if let Some(leader_schedule) = + self.leader_schedule_cache.get_epoch_leader_schedule(epoch) + { + slot_leaders.extend( + leader_schedule + .get_slot_leaders() + .iter() + .skip(slot_index as usize) + .take(limit.saturating_sub(slot_leaders.len())), + ); + } else { + return Err(Error::invalid_params(format!( + "Invalid slot range: leader schedule for epoch {} is unavailable", + epoch + ))); + } + + epoch += 1; + slot_index = 0; + } + + Ok(slot_leaders) + } + fn minimum_ledger_slot(&self) -> Result { match self.blockstore.slot_meta_iterator(0) { Ok(mut metas) => match metas.next() { @@ -706,9 +766,17 @@ impl JsonRpcRequestProcessor { fn get_vote_accounts( &self, - commitment: Option, + config: Option, ) -> Result { - let bank = self.bank(commitment); + let config = config.unwrap_or_default(); + + let filter_by_vote_pubkey = if let Some(ref vote_pubkey) = config.vote_pubkey { + Some(verify_pubkey(vote_pubkey)?) + } else { + None + }; + + let bank = self.bank(config.commitment); let vote_accounts = bank.vote_accounts(); let epoch_vote_accounts = bank .epoch_vote_accounts(bank.get_epoch_and_slot_index(bank.slot()).0) @@ -719,7 +787,13 @@ impl JsonRpcRequestProcessor { Vec, ) = vote_accounts .iter() - .map(|(pubkey, (activated_stake, account))| { + .filter_map(|(vote_pubkey, (activated_stake, account))| { + if let Some(filter_by_vote_pubkey) = filter_by_vote_pubkey { + if *vote_pubkey != filter_by_vote_pubkey { + return None; + } + } + let vote_state = account.vote_state(); let vote_state = vote_state.as_ref().unwrap_or(&default_vote_state); let last_vote = if let Some(vote) = vote_state.votes.iter().last() { @@ -727,16 +801,28 @@ impl JsonRpcRequestProcessor { } else { 0 }; - RpcVoteAccountInfo { - vote_pubkey: (pubkey).to_string(), + + let epoch_credits = vote_state.epoch_credits(); + let epoch_credits = if epoch_credits.len() > MAX_RPC_EPOCH_CREDITS_HISTORY { + epoch_credits + .iter() + .skip(epoch_credits.len() - MAX_RPC_EPOCH_CREDITS_HISTORY) + .cloned() + .collect() + } else { + epoch_credits.clone() + }; + + Some(RpcVoteAccountInfo { + vote_pubkey: vote_pubkey.to_string(), node_pubkey: vote_state.node_pubkey.to_string(), activated_stake: *activated_stake, commission: vote_state.commission, root_slot: vote_state.root_slot.unwrap_or(0), - epoch_credits: vote_state.epoch_credits().clone(), - epoch_vote_account: epoch_vote_accounts.contains_key(pubkey), + epoch_credits, + epoch_vote_account: epoch_vote_accounts.contains_key(vote_pubkey), last_vote, - } + }) }) .partition(|vote_account_info| { if bank.slot() >= DELINQUENT_VALIDATOR_SLOT_DISTANCE as u64 { @@ -758,25 +844,6 @@ impl JsonRpcRequestProcessor { }) } - pub fn set_log_filter(&self, filter: String) { - if self.config.enable_set_log_filter { - solana_logger::setup_with(&filter); - } - } - - pub fn validator_exit(&self) -> bool { - if self.config.enable_validator_exit { - warn!("validator_exit request..."); - if let Some(x) = self.validator_exit.write().unwrap().take() { - x.exit() - } - true - } else { - debug!("validator_exit ignored"); - false - } - } - fn check_blockstore_root( &self, result: &std::result::Result, @@ -866,10 +933,11 @@ impl JsonRpcRequestProcessor { .highest_confirmed_root() { let result = self.blockstore.get_rooted_block(slot, true); + self.check_blockstore_root(&result, slot)?; if result.is_err() { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_result = self - .runtime_handle + .runtime .block_on(bigtable_ledger_storage.get_confirmed_block(slot)); self.check_bigtable_result(&bigtable_result)?; return Ok(bigtable_result.ok().map(|confirmed_block| { @@ -877,7 +945,6 @@ impl JsonRpcRequestProcessor { })); } } - self.check_blockstore_root(&result, slot)?; self.check_slot_cleaned_up(&result, slot)?; return Ok(result.ok().map(|confirmed_block| { confirmed_block.configure(encoding, transaction_details, show_rewards) @@ -892,38 +959,29 @@ impl JsonRpcRequestProcessor { .load(Ordering::SeqCst) { let result = self.blockstore.get_complete_block(slot, true); - return Ok(result.ok().map(|confirmed_block| { + return Ok(result.ok().map(|mut confirmed_block| { + if confirmed_block.block_time.is_none() + || confirmed_block.block_height.is_none() + { + let r_bank_forks = self.bank_forks.read().unwrap(); + let bank = r_bank_forks.get(slot).cloned(); + if let Some(bank) = bank { + if confirmed_block.block_time.is_none() { + confirmed_block.block_time = Some(bank.clock().unix_timestamp); + } + if confirmed_block.block_height.is_none() { + confirmed_block.block_height = Some(bank.block_height()); + } + } + } confirmed_block.configure(encoding, transaction_details, show_rewards) })); } } - } - Err(RpcCustomError::BlockNotAvailable { slot }.into()) - } - - pub fn get_confirmed_block_hash(&self, slot: Slot) -> Result> { - if self.config.enable_rpc_transaction_history - && slot - <= self - .block_commitment_cache - .read() - .unwrap() - .highest_confirmed_root() - { - let result = self.blockstore.get_confirmed_block_hash(slot); - if result.is_err() { - if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { - return Ok(self - .runtime_handle - .block_on(bigtable_ledger_storage.get_confirmed_block_hash(slot)) - .ok()); - } - } - self.check_slot_cleaned_up(&result, slot)?; - Ok(result.ok()) } else { - Err(RpcCustomError::BlockNotAvailable { slot }.into()) + return Err(RpcCustomError::TransactionHistoryNotAvailable.into()); } + Err(RpcCustomError::BlockNotAvailable { slot }.into()) } pub fn get_confirmed_blocks( @@ -966,7 +1024,7 @@ impl JsonRpcRequestProcessor { // into unfinalized confirmed blocks due to MAX_GET_CONFIRMED_BLOCKS_RANGE if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { return self - .runtime_handle + .runtime .block_on( bigtable_ledger_storage .get_confirmed_blocks(start_slot, (end_slot - start_slot) as usize + 1), // increment limit by 1 to ensure returned range is inclusive of both start_slot and end_slot @@ -1031,7 +1089,7 @@ impl JsonRpcRequestProcessor { // confirmed blocks due to MAX_GET_CONFIRMED_BLOCKS_RANGE if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { return Ok(self - .runtime_handle + .runtime .block_on(bigtable_ledger_storage.get_confirmed_blocks(start_slot, limit)) .unwrap_or_default()); } @@ -1084,7 +1142,7 @@ impl JsonRpcRequestProcessor { if result.is_err() || matches!(result, Ok(None)) { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_result = self - .runtime_handle + .runtime .block_on(bigtable_ledger_storage.get_confirmed_block(slot)); self.check_bigtable_result(&bigtable_result)?; return Ok(bigtable_result @@ -1142,6 +1200,10 @@ impl JsonRpcRequestProcessor { .unwrap_or(false); let bank = self.bank(Some(CommitmentConfig::processed())); + if search_transaction_history && !self.config.enable_rpc_transaction_history { + return Err(RpcCustomError::TransactionHistoryNotAvailable.into()); + } + for signature in signatures { let status = if let Some(status) = self.get_transaction_status(signature, &bank) { Some(status) @@ -1168,7 +1230,7 @@ impl JsonRpcRequestProcessor { }) .or_else(|| { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { - self.runtime_handle + self.runtime .block_on(bigtable_ledger_storage.get_signature_status(&signature)) .map(Some) .unwrap_or(None) @@ -1270,13 +1332,15 @@ impl JsonRpcRequestProcessor { None => { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { return Ok(self - .runtime_handle + .runtime .block_on(bigtable_ledger_storage.get_confirmed_transaction(&signature)) .unwrap_or(None) .map(|confirmed| confirmed.encode(encoding))); } } } + } else { + return Err(RpcCustomError::TransactionHistoryNotAvailable.into()); } Ok(None) } @@ -1341,7 +1405,7 @@ impl JsonRpcRequestProcessor { before = results.last().map(|x| x.signature); } - let bigtable_results = self.runtime_handle.block_on( + let bigtable_results = self.runtime.block_on( bigtable_ledger_storage.get_confirmed_signatures_for_address( &address, before.as_ref(), @@ -1379,7 +1443,7 @@ impl JsonRpcRequestProcessor { }) .collect()) } else { - Ok(vec![]) + Err(RpcCustomError::TransactionHistoryNotAvailable.into()) } } @@ -1391,7 +1455,7 @@ impl JsonRpcRequestProcessor { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_slot = self - .runtime_handle + .runtime .block_on(bigtable_ledger_storage.get_first_available_block()) .unwrap_or(None) .unwrap_or(slot); @@ -1443,7 +1507,7 @@ impl JsonRpcRequestProcessor { return Ok(RpcStakeActivation { state: StakeActivationState::Inactive, active: 0, - inactive: stake_account.lamports.saturating_sub(rent_exempt_reserve), + inactive: stake_account.lamports().saturating_sub(rent_exempt_reserve), }); } } @@ -1454,7 +1518,7 @@ impl JsonRpcRequestProcessor { .get_account(&stake_history::id()) .ok_or_else(Error::internal_error)?; let stake_history = - solana_sdk::account::from_account::(&stake_history_account) + solana_sdk::account::from_account::(&stake_history_account) .ok_or_else(Error::internal_error)?; let (active, activating, deactivating) = delegation.stake_activating_and_deactivating( @@ -1499,7 +1563,7 @@ impl JsonRpcRequestProcessor { "Invalid param: not a v2.0 Token account".to_string(), )); } - let token_account = TokenAccount::unpack(&account.data).map_err(|_| { + let token_account = TokenAccount::unpack(&account.data()).map_err(|_| { Error::invalid_params("Invalid param: not a v2.0 Token account".to_string()) })?; let mint = &Pubkey::from_str(&token_account.mint.to_string()) @@ -1523,7 +1587,7 @@ impl JsonRpcRequestProcessor { "Invalid param: not a v2.0 Token mint".to_string(), )); } - let mint = Mint::unpack(&mint_account.data).map_err(|_| { + let mint = Mint::unpack(&mint_account.data()).map_err(|_| { Error::invalid_params("Invalid param: mint could not be unpacked".to_string()) })?; @@ -1544,10 +1608,10 @@ impl JsonRpcRequestProcessor { )); } let mut token_balances: Vec = self - .get_filtered_spl_token_accounts_by_mint(&bank, &mint, vec![]) + .get_filtered_spl_token_accounts_by_mint(&bank, &mint, vec![])? .into_iter() .map(|(address, account)| { - let amount = TokenAccount::unpack(&account.data) + let amount = TokenAccount::unpack(&account.data()) .map(|account| account.amount) .unwrap_or(0); let amount = token_amount_to_ui_amount(amount, decimals); @@ -1592,7 +1656,8 @@ impl JsonRpcRequestProcessor { })); } - let keyed_accounts = self.get_filtered_spl_token_accounts_by_owner(&bank, owner, filters); + let keyed_accounts = + self.get_filtered_spl_token_accounts_by_owner(&bank, owner, filters)?; let accounts = if encoding == UiAccountEncoding::JsonParsed { get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() } else { @@ -1602,8 +1667,8 @@ impl JsonRpcRequestProcessor { pubkey: pubkey.to_string(), account: UiAccount::encode( &pubkey, - account, - encoding.clone(), + &account, + encoding, None, data_slice_config, ), @@ -1644,13 +1709,13 @@ impl JsonRpcRequestProcessor { ]; // Optional filter on Mint address, uses mint account index for scan let keyed_accounts = if let Some(mint) = mint { - self.get_filtered_spl_token_accounts_by_mint(&bank, &mint, filters) + self.get_filtered_spl_token_accounts_by_mint(&bank, &mint, filters)? } else { // Filter on Token Account state filters.push(RpcFilterType::DataSize( TokenAccount::get_packed_len() as u64 )); - self.get_filtered_program_accounts(&bank, &token_program_id, filters) + self.get_filtered_program_accounts(&bank, &token_program_id, filters)? }; let accounts = if encoding == UiAccountEncoding::JsonParsed { get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() @@ -1661,8 +1726,8 @@ impl JsonRpcRequestProcessor { pubkey: pubkey.to_string(), account: UiAccount::encode( &pubkey, - account, - encoding.clone(), + &account, + encoding, None, data_slice_config, ), @@ -1678,11 +1743,11 @@ impl JsonRpcRequestProcessor { bank: &Arc, program_id: &Pubkey, filters: Vec, - ) -> Vec<(Pubkey, Account)> { - let filter_closure = |account: &Account| { + ) -> Result> { + let filter_closure = |account: &AccountSharedData| { filters.iter().all(|filter_type| match filter_type { - RpcFilterType::DataSize(size) => account.data.len() as u64 == *size, - RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data), + RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, + RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), }) }; if self @@ -1690,16 +1755,24 @@ impl JsonRpcRequestProcessor { .account_indexes .contains(&AccountIndex::ProgramId) { - bank.get_filtered_indexed_accounts(&IndexKey::ProgramId(*program_id), |account| { - // The program-id account index checks for Account owner on inclusion. However, due - // to the current AccountsDb implementation, an account may remain in storage as a - // zero-lamport Account::Default() after being wiped and reinitialized in later - // updates. We include the redundant filters here to avoid returning these - // accounts. - account.owner == *program_id && filter_closure(account) - }) + if !self.config.account_indexes.include_key(program_id) { + return Err(RpcCustomError::KeyExcludedFromSecondaryIndex { + index_key: program_id.to_string(), + } + .into()); + } + Ok( + bank.get_filtered_indexed_accounts(&IndexKey::ProgramId(*program_id), |account| { + // The program-id account index checks for Account owner on inclusion. However, due + // to the current AccountsDb implementation, an account may remain in storage as a + // zero-lamport AccountSharedData::Default() after being wiped and reinitialized in later + // updates. We include the redundant filters here to avoid returning these + // accounts. + account.owner == *program_id && filter_closure(account) + }), + ) } else { - bank.get_filtered_program_accounts(program_id, filter_closure) + Ok(bank.get_filtered_program_accounts(program_id, filter_closure)) } } @@ -1709,10 +1782,10 @@ impl JsonRpcRequestProcessor { bank: &Arc, owner_key: &Pubkey, mut filters: Vec, - ) -> Vec<(Pubkey, Account)> { + ) -> Result> { // The by-owner accounts index checks for Token Account state and Owner address on // inclusion. However, due to the current AccountsDb implementation, an account may remain - // in storage as a zero-lamport Account::Default() after being wiped and reinitialized in + // in storage as a zero-lamport AccountSharedData::Default() after being wiped and reinitialized in // later updates. We include the redundant filters here to avoid returning these accounts. // // Filter on Token Account state @@ -1731,13 +1804,22 @@ impl JsonRpcRequestProcessor { .account_indexes .contains(&AccountIndex::SplTokenOwner) { - bank.get_filtered_indexed_accounts(&IndexKey::SplTokenOwner(*owner_key), |account| { - account.owner == spl_token_id_v2_0() - && filters.iter().all(|filter_type| match filter_type { - RpcFilterType::DataSize(size) => account.data.len() as u64 == *size, - RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data), - }) - }) + if !self.config.account_indexes.include_key(owner_key) { + return Err(RpcCustomError::KeyExcludedFromSecondaryIndex { + index_key: owner_key.to_string(), + } + .into()); + } + Ok(bank.get_filtered_indexed_accounts( + &IndexKey::SplTokenOwner(*owner_key), + |account| { + account.owner == spl_token_id_v2_0() + && filters.iter().all(|filter_type| match filter_type { + RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, + RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), + }) + }, + )) } else { self.get_filtered_program_accounts(bank, &spl_token_id_v2_0(), filters) } @@ -1749,10 +1831,10 @@ impl JsonRpcRequestProcessor { bank: &Arc, mint_key: &Pubkey, mut filters: Vec, - ) -> Vec<(Pubkey, Account)> { + ) -> Result> { // The by-mint accounts index checks for Token Account state and Mint address on inclusion. // However, due to the current AccountsDb implementation, an account may remain in storage - // as be zero-lamport Account::Default() after being wiped and reinitialized in later + // as be zero-lamport AccountSharedData::Default() after being wiped and reinitialized in later // updates. We include the redundant filters here to avoid returning these accounts. // // Filter on Token Account state @@ -1770,13 +1852,21 @@ impl JsonRpcRequestProcessor { .account_indexes .contains(&AccountIndex::SplTokenMint) { - bank.get_filtered_indexed_accounts(&IndexKey::SplTokenMint(*mint_key), |account| { - account.owner == spl_token_id_v2_0() - && filters.iter().all(|filter_type| match filter_type { - RpcFilterType::DataSize(size) => account.data.len() as u64 == *size, - RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data), - }) - }) + if !self.config.account_indexes.include_key(mint_key) { + return Err(RpcCustomError::KeyExcludedFromSecondaryIndex { + index_key: mint_key.to_string(), + } + .into()); + } + Ok( + bank.get_filtered_indexed_accounts(&IndexKey::SplTokenMint(*mint_key), |account| { + account.owner == spl_token_id_v2_0() + && filters.iter().all(|filter_type| match filter_type { + RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, + RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), + }) + }), + ) } else { self.get_filtered_program_accounts(bank, &spl_token_id_v2_0(), filters) } @@ -1794,7 +1884,7 @@ impl JsonRpcRequestProcessor { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_block = self - .runtime_handle + .runtime .block_on(bigtable_ledger_storage.get_evm_first_available_block()) .unwrap_or(None) .unwrap_or(block); @@ -1828,7 +1918,7 @@ impl JsonRpcRequestProcessor { if receipt.is_none() { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_receipt = self - .runtime_handle + .runtime .block_on(bigtable_ledger_storage.get_evm_confirmed_receipt(&hash)) .unwrap_or(None); return bigtable_receipt; @@ -1842,7 +1932,7 @@ impl JsonRpcRequestProcessor { if block.is_none() { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_block = self - .runtime_handle + .runtime .block_on(bigtable_ledger_storage.get_evm_confirmed_full_block(id)) .ok(); @@ -1865,7 +1955,7 @@ impl JsonRpcRequestProcessor { if block.is_none() { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_block = self - .runtime_handle + .runtime .block_on(bigtable_ledger_storage.get_evm_block_by_hash(hash)) .ok(); @@ -1879,8 +1969,8 @@ impl JsonRpcRequestProcessor { &self, bank: &Arc, storage_key: Pubkey, - ) -> Vec<(Pubkey, Account)> { - let is_target_velas_account = |account: &Account| -> bool { + ) -> Vec<(Pubkey, AccountSharedData)> { + let is_target_velas_account = |account: &AccountSharedData| -> bool { account.owner == velas_account_program::id() && account.data.len() == VELAS_ACCOUNT_SIZE && matches!( @@ -1909,8 +1999,8 @@ impl JsonRpcRequestProcessor { &self, bank: &Arc, owner_key: Pubkey, - ) -> Vec<(Pubkey, Account)> { - let is_target_velas_account_storage = |account: &Account| -> bool { + ) -> Vec<(Pubkey, AccountSharedData)> { + let is_target_velas_account_storage = |account: &AccountSharedData| -> bool { account.owner == velas_account_program::id() && match VelasAccountType::try_from(account.data.as_slice()) { Ok(VelasAccountType::Storage(account_storage)) => { @@ -1941,8 +2031,8 @@ impl JsonRpcRequestProcessor { &self, bank: &Arc, operational_key: Pubkey, - ) -> Vec<(Pubkey, Account)> { - let is_target_velas_account_storage = |account: &Account| -> bool { + ) -> Vec<(Pubkey, AccountSharedData)> { + let is_target_velas_account_storage = |account: &AccountSharedData| -> bool { account.owner == velas_account_program::id() && match VelasAccountType::try_from(account.data.as_slice()) { Ok(VelasAccountType::Storage(account_storage)) => account_storage @@ -1989,13 +2079,13 @@ fn verify_filter(input: &RpcFilterType) -> Result<()> { .map_err(|e| Error::invalid_params(format!("Invalid param: {:?}", e))) } -fn verify_pubkey(input: String) -> Result { +fn verify_pubkey(input: &str) -> Result { input .parse() .map_err(|e| Error::invalid_params(format!("Invalid param: {:?}", e))) } -fn verify_hash(input: String) -> Result { +fn verify_hash(input: &str) -> Result { input .parse() .map_err(|e| Error::invalid_params(format!("Invalid param: {:?}", e))) @@ -2012,11 +2102,11 @@ fn verify_token_account_filter( ) -> Result { match token_account_filter { RpcTokenAccountsFilter::Mint(mint_str) => { - let mint = verify_pubkey(mint_str)?; + let mint = verify_pubkey(&mint_str)?; Ok(TokenAccountsFilter::Mint(mint)) } RpcTokenAccountsFilter::ProgramId(program_id_str) => { - let program_id = verify_pubkey(program_id_str)?; + let program_id = verify_pubkey(&program_id_str)?; Ok(TokenAccountsFilter::ProgramId(program_id)) } } @@ -2065,7 +2155,7 @@ fn get_encoded_account( if account.owner == spl_token_id_v2_0() && encoding == UiAccountEncoding::JsonParsed { response = Some(get_parsed_token_account(bank.clone(), pubkey, account)); } else if (encoding == UiAccountEncoding::Binary || encoding == UiAccountEncoding::Base58) - && account.data.len() > 128 + && account.data().len() > 128 { let message = "Encoded binary (base 58) data should be less than 128 bytes, please use Base64 encoding.".to_string(); return Err(error::Error { @@ -2075,7 +2165,7 @@ fn get_encoded_account( }); } else { response = Some(UiAccount::encode( - pubkey, account, encoding, None, data_slice, + pubkey, &account, encoding, None, data_slice, )); } } @@ -2141,9 +2231,9 @@ fn get_spl_token_mint_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> pub(crate) fn get_parsed_token_account( bank: Arc, pubkey: &Pubkey, - account: Account, + account: AccountSharedData, ) -> UiAccount { - let additional_data = get_token_account_mint(&account.data) + let additional_data = get_token_account_mint(&account.data()) .and_then(|mint_pubkey| get_mint_owner_and_decimals(&bank, &mint_pubkey).ok()) .map(|(_, decimals)| AccountAdditionalData { spl_token_decimals: Some(decimals), @@ -2151,7 +2241,7 @@ pub(crate) fn get_parsed_token_account( UiAccount::encode( pubkey, - account, + &account, UiAccountEncoding::JsonParsed, additional_data, None, @@ -2163,11 +2253,11 @@ pub(crate) fn get_parsed_token_accounts( keyed_accounts: I, ) -> impl Iterator where - I: Iterator, + I: Iterator, { let mut mint_decimals: HashMap = HashMap::new(); keyed_accounts.filter_map(move |(pubkey, account)| { - let additional_data = get_token_account_mint(&account.data).map(|mint_pubkey| { + let additional_data = get_token_account_mint(&account.data()).map(|mint_pubkey| { let spl_token_decimals = mint_decimals.get(&mint_pubkey).cloned().or_else(|| { let (_, decimals) = get_mint_owner_and_decimals(&bank, &mint_pubkey).ok()?; mint_decimals.insert(mint_pubkey, decimals); @@ -2178,7 +2268,7 @@ where let maybe_encoded_account = UiAccount::encode( &pubkey, - account, + &account, UiAccountEncoding::JsonParsed, additional_data, None, @@ -2231,7 +2321,7 @@ fn get_mint_owner_and_decimals(bank: &Arc, mint: &Pubkey) -> Result<(Pubke let mint_account = bank.get_account(mint).ok_or_else(|| { Error::invalid_params("Invalid param: could not find mint".to_string()) })?; - let decimals = get_mint_decimals(&mint_account.data)?; + let decimals = get_mint_decimals(&mint_account.data())?; Ok((mint_account.owner, decimals)) } } @@ -2244,1391 +2334,1586 @@ fn get_mint_decimals(data: &[u8]) -> Result { .map(|mint| mint.decimals) } -#[rpc] -pub trait RpcSol { - type Metadata; +fn _send_transaction( + meta: JsonRpcRequestProcessor, + transaction: Transaction, + wire_transaction: Vec, + last_valid_slot: Slot, + durable_nonce_info: Option<(Pubkey, Hash)>, +) -> Result { + if transaction.signatures.is_empty() { + return Err(RpcCustomError::TransactionSignatureVerificationFailure.into()); + } + let signature = transaction.signatures[0]; + let transaction_info = TransactionInfo::new( + signature, + wire_transaction, + last_valid_slot, + durable_nonce_info, + ); + meta.transaction_sender + .lock() + .unwrap() + .send(transaction_info) + .unwrap_or_else(|err| warn!("Failed to enqueue transaction: {}", err)); - // DEPRECATED - #[rpc(meta, name = "confirmTransaction")] - fn confirm_transaction( - &self, - meta: Self::Metadata, - signature_str: String, - commitment: Option, - ) -> Result>; + Ok(signature.to_string()) +} - // DEPRECATED - #[rpc(meta, name = "getSignatureStatus")] - fn get_signature_status( - &self, - meta: Self::Metadata, - signature_str: String, - commitment: Option, - ) -> Result>>; +// Minimal RPC interface that trusted validators are expected to provide +pub mod rpc_minimal { + use super::*; + #[rpc] + pub trait Minimal { + type Metadata; + + #[rpc(meta, name = "getBalance")] + fn get_balance( + &self, + meta: Self::Metadata, + pubkey_str: String, + commitment: Option, + ) -> Result>; + + #[rpc(meta, name = "getEpochInfo")] + fn get_epoch_info( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result; + + #[rpc(meta, name = "getHealth")] + fn get_health(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getIdentity")] + fn get_identity(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getSlot")] + fn get_slot( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result; + + #[rpc(meta, name = "getBlockHeight")] + fn get_block_height( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result; + + #[rpc(meta, name = "getSnapshotSlot")] + fn get_snapshot_slot(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getTransactionCount")] + fn get_transaction_count( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result; + + #[rpc(meta, name = "getVersion")] + fn get_version(&self, meta: Self::Metadata) -> Result; + + // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so + // it can be removed from rpc_minimal + #[rpc(meta, name = "getVoteAccounts")] + fn get_vote_accounts( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result; + + // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so + // it can be removed from rpc_minimal + #[rpc(meta, name = "getLeaderSchedule")] + fn get_leader_schedule( + &self, + meta: Self::Metadata, + options: Option, + config: Option, + ) -> Result>; + } + + pub struct MinimalImpl; + impl Minimal for MinimalImpl { + type Metadata = JsonRpcRequestProcessor; + + fn get_balance( + &self, + meta: Self::Metadata, + pubkey_str: String, + commitment: Option, + ) -> Result> { + debug!("get_balance rpc request received: {:?}", pubkey_str); + let pubkey = verify_pubkey(&pubkey_str)?; + Ok(meta.get_balance(&pubkey, commitment)) + } - // DEPRECATED (used by Trust Wallet) - #[rpc(meta, name = "getSignatureConfirmation")] - fn get_signature_confirmation( - &self, - meta: Self::Metadata, - signature_str: String, - commitment: Option, - ) -> Result>; + fn get_epoch_info( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result { + debug!("get_epoch_info rpc request received"); + let bank = meta.bank(commitment); + Ok(bank.get_epoch_info()) + } - #[rpc(meta, name = "getAccountInfo")] - fn get_account_info( - &self, - meta: Self::Metadata, - pubkey_str: String, - config: Option, - ) -> Result>>; + fn get_health(&self, meta: Self::Metadata) -> Result { + match meta.health.check() { + RpcHealthStatus::Ok => Ok("ok".to_string()), + RpcHealthStatus::Unknown => Err(RpcCustomError::NodeUnhealthy { + num_slots_behind: None, + } + .into()), + RpcHealthStatus::Behind { num_slots } => Err(RpcCustomError::NodeUnhealthy { + num_slots_behind: Some(num_slots), + } + .into()), + } + } - #[rpc(meta, name = "getMultipleAccounts")] - fn get_multiple_accounts( - &self, - meta: Self::Metadata, - pubkey_strs: Vec, - config: Option, - ) -> Result>>>; + fn get_identity(&self, meta: Self::Metadata) -> Result { + debug!("get_identity rpc request received"); + Ok(RpcIdentity { + identity: meta.config.identity_pubkey.to_string(), + }) + } - #[rpc(meta, name = "getProgramAccounts")] - fn get_program_accounts( - &self, - meta: Self::Metadata, - program_id_str: String, - config: Option, - ) -> Result>; + fn get_slot( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result { + debug!("get_slot rpc request received"); + Ok(meta.get_slot(commitment)) + } - #[rpc(meta, name = "getMinimumBalanceForRentExemption")] - fn get_minimum_balance_for_rent_exemption( - &self, - meta: Self::Metadata, - data_len: usize, - commitment: Option, - ) -> Result; + fn get_block_height( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result { + debug!("get_block_height rpc request received"); + Ok(meta.get_block_height(commitment)) + } - #[rpc(meta, name = "getInflationGovernor")] - fn get_inflation_governor( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result; + fn get_snapshot_slot(&self, meta: Self::Metadata) -> Result { + debug!("get_snapshot_slot rpc request received"); - #[rpc(meta, name = "getInflationRate")] - fn get_inflation_rate(&self, meta: Self::Metadata) -> Result; + meta.snapshot_config + .and_then(|snapshot_config| { + get_highest_snapshot_archive_path(&snapshot_config.snapshot_package_output_path) + .map(|(_, (slot, _, _))| slot) + }) + .ok_or_else(|| RpcCustomError::NoSnapshot.into()) + } - #[rpc(meta, name = "getEpochSchedule")] - fn get_epoch_schedule(&self, meta: Self::Metadata) -> Result; + fn get_transaction_count( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result { + debug!("get_transaction_count rpc request received"); + Ok(meta.get_transaction_count(commitment)) + } - #[rpc(meta, name = "getBalance")] - fn get_balance( - &self, - meta: Self::Metadata, - pubkey_str: String, - commitment: Option, - ) -> Result>; + fn get_version(&self, _: Self::Metadata) -> Result { + debug!("get_version rpc request received"); + let version = solana_version::Version::default(); + Ok(RpcVersionInfo { + solana_core: version.to_string(), + feature_set: Some(version.feature_set), + }) + } - #[rpc(meta, name = "getClusterNodes")] - fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result>; + // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so + // it can be removed from rpc_minimal + fn get_vote_accounts( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result { + debug!("get_vote_accounts rpc request received"); + meta.get_vote_accounts(config) + } - #[rpc(meta, name = "getRecentPerformanceSamples")] - fn get_recent_performance_samples( - &self, - meta: Self::Metadata, - limit: Option, - ) -> Result>; + // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so + // it can be removed from rpc_minimal + fn get_leader_schedule( + &self, + meta: Self::Metadata, + options: Option, + config: Option, + ) -> Result> { + let (slot, maybe_config) = options.map(|options| options.unzip()).unwrap_or_default(); + let config = maybe_config.or(config).unwrap_or_default(); + + if let Some(ref identity) = config.identity { + let _ = verify_pubkey(identity)?; + } - #[rpc(meta, name = "getEpochInfo")] - fn get_epoch_info( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result; + let bank = meta.bank(config.commitment); + let slot = slot.unwrap_or_else(|| bank.slot()); + let epoch = bank.epoch_schedule().get_epoch(slot); + + debug!("get_leader_schedule rpc request received: {:?}", slot); + + Ok(meta + .leader_schedule_cache + .get_epoch_leader_schedule(epoch) + .map(|leader_schedule| { + let mut schedule_by_identity = + solana_ledger::leader_schedule_utils::leader_schedule_by_identity( + leader_schedule.get_slot_leaders().iter().enumerate(), + ); + if let Some(identity) = config.identity { + schedule_by_identity.retain(|k, _| *k == identity); + } + schedule_by_identity + })) + } + } +} - #[rpc(meta, name = "getBlockCommitment")] - fn get_block_commitment( - &self, - meta: Self::Metadata, - block: Slot, - ) -> Result>; +// Full RPC interface that an API node is expected to provide +// (rpc_minimal should also be provided by an API node) +pub mod rpc_full { + use super::*; + #[rpc] + pub trait Full { + type Metadata; + + // DEPRECATED + #[rpc(meta, name = "confirmTransaction")] + fn confirm_transaction( + &self, + meta: Self::Metadata, + signature_str: String, + commitment: Option, + ) -> Result>; + + // // DEPRECATED + // #[rpc(meta, name = "getSignatureStatus")] + // fn get_signature_status( + // &self, + // meta: Self::Metadata, + // signature_str: String, + // commitment: Option, + // ) -> Result>>; + + // // DEPRECATED (used by Trust Wallet) + // #[rpc(meta, name = "getSignatureConfirmation")] + // fn get_signature_confirmation( + // &self, + // meta: Self::Metadata, + // signature_str: String, + // commitment: Option, + // ) -> Result>; + + #[rpc(meta, name = "getAccountInfo")] + fn get_account_info( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result>>; + + #[rpc(meta, name = "getMultipleAccounts")] + fn get_multiple_accounts( + &self, + meta: Self::Metadata, + pubkey_strs: Vec, + config: Option, + ) -> Result>>>; + + #[rpc(meta, name = "getProgramAccounts")] + fn get_program_accounts( + &self, + meta: Self::Metadata, + program_id_str: String, + config: Option, + ) -> Result>>; + + #[rpc(meta, name = "getMinimumBalanceForRentExemption")] + fn get_minimum_balance_for_rent_exemption( + &self, + meta: Self::Metadata, + data_len: usize, + commitment: Option, + ) -> Result; + + #[rpc(meta, name = "getInflationReward")] + fn get_inflation_reward( + &self, + meta: Self::Metadata, + address_strs: Vec, + config: Option, + ) -> Result>>; + + #[rpc(meta, name = "getInflationGovernor")] + fn get_inflation_governor( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result; + + #[rpc(meta, name = "getInflationRate")] + fn get_inflation_rate(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getEpochSchedule")] + fn get_epoch_schedule(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getClusterNodes")] + fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result>; + + #[rpc(meta, name = "getRecentPerformanceSamples")] + fn get_recent_performance_samples( + &self, + meta: Self::Metadata, + limit: Option, + ) -> Result>; + + #[rpc(meta, name = "getBlockCommitment")] + fn get_block_commitment( + &self, + meta: Self::Metadata, + block: Slot, + ) -> Result>; + + #[rpc(meta, name = "getGenesisHash")] + fn get_genesis_hash(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getRecentBlockhash")] + fn get_recent_blockhash( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result>; + + #[rpc(meta, name = "getFees")] + fn get_fees( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result>; + + #[rpc(meta, name = "getFeeCalculatorForBlockhash")] + fn get_fee_calculator_for_blockhash( + &self, + meta: Self::Metadata, + blockhash: String, + commitment: Option, + ) -> Result>>; + + #[rpc(meta, name = "getFeeRateGovernor")] + fn get_fee_rate_governor( + &self, + meta: Self::Metadata, + ) -> Result>; + + #[rpc(meta, name = "getSignatureStatuses")] + fn get_signature_statuses( + &self, + meta: Self::Metadata, + signature_strs: Vec, + config: Option, + ) -> Result>>>; + + #[rpc(meta, name = "getMaxRetransmitSlot")] + fn get_max_retransmit_slot(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getMaxShredInsertSlot")] + fn get_max_shred_insert_slot(&self, meta: Self::Metadata) -> Result; + + // DEPRECATED + #[rpc(meta, name = "getTotalSupply")] + fn get_total_supply( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result; + + #[rpc(meta, name = "getLargestAccounts")] + fn get_largest_accounts( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result>>; + + #[rpc(meta, name = "getSupply")] + fn get_supply( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result>; + + #[rpc(meta, name = "requestAirdrop")] + fn request_airdrop( + &self, + meta: Self::Metadata, + pubkey_str: String, + lamports: u64, + config: Option, + ) -> Result; + + #[rpc(meta, name = "sendTransaction")] + fn send_transaction( + &self, + meta: Self::Metadata, + data: String, + config: Option, + ) -> Result; + + #[rpc(meta, name = "simulateTransaction")] + fn simulate_transaction( + &self, + meta: Self::Metadata, + data: String, + config: Option, + ) -> Result>; + + #[rpc(meta, name = "getSlotLeader")] + fn get_slot_leader( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result; + + #[rpc(meta, name = "getSlotLeaders")] + fn get_slot_leaders( + &self, + meta: Self::Metadata, + start_slot: Slot, + limit: u64, + ) -> Result>; + + #[rpc(meta, name = "minimumLedgerSlot")] + fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getConfirmedBlock")] + fn get_confirmed_block( + &self, + meta: Self::Metadata, + slot: Slot, + config: Option>, + ) -> Result>; + + #[rpc(meta, name = "getBlockTime")] + fn get_block_time(&self, meta: Self::Metadata, slot: Slot) + -> Result>; + + #[rpc(meta, name = "getConfirmedBlocks")] + fn get_confirmed_blocks( + &self, + meta: Self::Metadata, + start_slot: Slot, + config: Option, + commitment: Option, + ) -> Result>; + + #[rpc(meta, name = "getConfirmedBlocksWithLimit")] + fn get_confirmed_blocks_with_limit( + &self, + meta: Self::Metadata, + start_slot: Slot, + limit: usize, + commitment: Option, + ) -> Result>; + + #[rpc(meta, name = "getConfirmedTransaction")] + fn get_confirmed_transaction( + &self, + meta: Self::Metadata, + signature_str: String, + config: Option>, + ) -> Result>; + + // DEPRECATED + #[rpc(meta, name = "getConfirmedSignaturesForAddress")] + fn get_confirmed_signatures_for_address( + &self, + meta: Self::Metadata, + pubkey_str: String, + start_slot: Slot, + end_slot: Slot, + ) -> Result>; + + #[rpc(meta, name = "getConfirmedSignaturesForAddress2")] + fn get_confirmed_signatures_for_address2( + &self, + meta: Self::Metadata, + address: String, + config: Option, + ) -> Result>; + + #[rpc(meta, name = "getFirstAvailableBlock")] + fn get_first_available_block(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "getStakeActivation")] + fn get_stake_activation( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result; + + #[rpc(meta, name = "getBlockProduction")] + fn get_block_production( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result>; + + // SPL Token-specific RPC endpoints + // See https://github.com/solana-labs/solana-program-library/releases/tag/token-v2.0.0 for + // program details + + #[rpc(meta, name = "getTokenAccountBalance")] + fn get_token_account_balance( + &self, + meta: Self::Metadata, + pubkey_str: String, + commitment: Option, + ) -> Result>; + + #[rpc(meta, name = "getTokenSupply")] + fn get_token_supply( + &self, + meta: Self::Metadata, + mint_str: String, + commitment: Option, + ) -> Result>; + + #[rpc(meta, name = "getTokenLargestAccounts")] + fn get_token_largest_accounts( + &self, + meta: Self::Metadata, + mint_str: String, + commitment: Option, + ) -> Result>>; + + #[rpc(meta, name = "getTokenAccountsByOwner")] + fn get_token_accounts_by_owner( + &self, + meta: Self::Metadata, + owner_str: String, + token_account_filter: RpcTokenAccountsFilter, + config: Option, + ) -> Result>>; + + #[rpc(meta, name = "getTokenAccountsByDelegate")] + fn get_token_accounts_by_delegate( + &self, + meta: Self::Metadata, + delegate_str: String, + token_account_filter: RpcTokenAccountsFilter, + config: Option, + ) -> Result>>; + + #[rpc(meta, name = "getVelasAccountsByOperationalKey")] + fn get_velas_accounts_by_operational_key( + &self, + meta: Self::Metadata, + pubkey_str: String, + ) -> Result>>; + + #[rpc(meta, name = "getVelasAccountsByOwnerKey")] + fn get_velas_accounts_by_owner_key( + &self, + meta: Self::Metadata, + pubkey_str: String, + ) -> Result>>; + + // TODO(velas): post-merge rebase from develop + // #[rpc(meta, name = "getVelasRelyingPartiesByOwnerKey")] + // fn get_velas_relying_parties_by_owner_key( + // &self, + // meta: Self::Metadata, + // pubkey_str: String, + // ) -> Result>>; + } + + pub struct FullImpl; + impl Full for FullImpl { + type Metadata = JsonRpcRequestProcessor; + + fn confirm_transaction( + &self, + meta: Self::Metadata, + id: String, + commitment: Option, + ) -> Result> { + debug!("confirm_transaction rpc request received: {:?}", id); + let signature = verify_signature(&id)?; + Ok(meta.confirm_transaction(&signature, commitment)) + } - #[rpc(meta, name = "getGenesisHash")] - fn get_genesis_hash(&self, meta: Self::Metadata) -> Result; + fn get_account_info( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result>> { + debug!("get_account_info rpc request received: {:?}", pubkey_str); + let pubkey = verify_pubkey(&pubkey_str)?; + meta.get_account_info(&pubkey, config) + } - #[rpc(meta, name = "getHealth")] - fn get_health(&self, meta: Self::Metadata) -> Result; + fn get_multiple_accounts( + &self, + meta: Self::Metadata, + pubkey_strs: Vec, + config: Option, + ) -> Result>>> { + debug!( + "get_multiple_accounts rpc request received: {:?}", + pubkey_strs.len() + ); - #[rpc(meta, name = "getLeaderSchedule")] - fn get_leader_schedule( - &self, - meta: Self::Metadata, - slot: Option, - commitment: Option, - ) -> Result>; + let max_multiple_accounts = meta + .config + .max_multiple_accounts + .unwrap_or(MAX_MULTIPLE_ACCOUNTS); + if pubkey_strs.len() > max_multiple_accounts { + return Err(Error::invalid_params(format!( + "Too many inputs provided; max {}", + max_multiple_accounts + ))); + } + let mut pubkeys: Vec = vec![]; + for pubkey_str in pubkey_strs { + pubkeys.push(verify_pubkey(&pubkey_str)?); + } + meta.get_multiple_accounts(pubkeys, config) + } - #[rpc(meta, name = "getRecentBlockhash")] - fn get_recent_blockhash( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result>; + fn get_minimum_balance_for_rent_exemption( + &self, + meta: Self::Metadata, + data_len: usize, + commitment: Option, + ) -> Result { + debug!( + "get_minimum_balance_for_rent_exemption rpc request received: {:?}", + data_len + ); + if data_len as u64 > system_instruction::MAX_PERMITTED_DATA_LENGTH { + return Err(Error::invalid_request()); + } + Ok(meta.get_minimum_balance_for_rent_exemption(data_len, commitment)) + } - #[rpc(meta, name = "getFees")] - fn get_fees( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result>; + fn get_program_accounts( + &self, + meta: Self::Metadata, + program_id_str: String, + config: Option, + ) -> Result>> { + debug!( + "get_program_accounts rpc request received: {:?}", + program_id_str + ); + let program_id = verify_pubkey(&program_id_str)?; + let (config, filters, with_context) = if let Some(config) = config { + ( + Some(config.account_config), + config.filters.unwrap_or_default(), + config.with_context.unwrap_or_default(), + ) + } else { + (None, vec![], false) + }; + if filters.len() > MAX_GET_PROGRAM_ACCOUNT_FILTERS { + return Err(Error::invalid_params(format!( + "Too many filters provided; max {}", + MAX_GET_PROGRAM_ACCOUNT_FILTERS + ))); + } + for filter in &filters { + verify_filter(filter)?; + } + meta.get_program_accounts(&program_id, config, filters, with_context) + } - #[rpc(meta, name = "getFeeCalculatorForBlockhash")] - fn get_fee_calculator_for_blockhash( - &self, - meta: Self::Metadata, - blockhash: String, - commitment: Option, - ) -> Result>>; + fn get_inflation_governor( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result { + debug!("get_inflation_governor rpc request received"); + Ok(meta.get_inflation_governor(commitment)) + } - #[rpc(meta, name = "getFeeRateGovernor")] - fn get_fee_rate_governor( - &self, - meta: Self::Metadata, - ) -> Result>; + fn get_inflation_rate(&self, meta: Self::Metadata) -> Result { + debug!("get_inflation_rate rpc request received"); + Ok(meta.get_inflation_rate()) + } - #[rpc(meta, name = "getSnapshotSlot")] - fn get_snapshot_slot(&self, meta: Self::Metadata) -> Result; + fn get_epoch_schedule(&self, meta: Self::Metadata) -> Result { + debug!("get_epoch_schedule rpc request received"); + Ok(meta.get_epoch_schedule()) + } - #[rpc(meta, name = "getSignatureStatuses")] - fn get_signature_statuses( - &self, - meta: Self::Metadata, - signature_strs: Vec, - config: Option, - ) -> Result>>>; + fn get_recent_performance_samples( + &self, + meta: Self::Metadata, + limit: Option, + ) -> Result> { + debug!("get_recent_performance_samples request received"); - #[rpc(meta, name = "getSlot")] - fn get_slot(&self, meta: Self::Metadata, commitment: Option) -> Result; + let limit = limit.unwrap_or(PERFORMANCE_SAMPLES_LIMIT); - #[rpc(meta, name = "getMaxRetransmitSlot")] - fn get_max_retransmit_slot(&self, meta: Self::Metadata) -> Result; + if limit > PERFORMANCE_SAMPLES_LIMIT { + return Err(Error::invalid_params(format!( + "Invalid limit; max {}", + PERFORMANCE_SAMPLES_LIMIT + ))); + } - #[rpc(meta, name = "getMaxShredInsertSlot")] - fn get_max_shred_insert_slot(&self, meta: Self::Metadata) -> Result; + Ok(meta + .blockstore + .get_recent_perf_samples(limit) + .map_err(|err| { + warn!("get_recent_performance_samples failed: {:?}", err); + Error::invalid_request() + })? + .iter() + .map(|(slot, sample)| RpcPerfSample { + slot: *slot, + num_transactions: sample.num_transactions, + num_slots: sample.num_slots, + sample_period_secs: sample.sample_period_secs, + }) + .collect()) + } - #[rpc(meta, name = "getTransactionCount")] - fn get_transaction_count( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result; + fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result> { + debug!("get_cluster_nodes rpc request received"); + let cluster_info = &meta.cluster_info; + fn valid_address_or_none(addr: &SocketAddr) -> Option { + if ContactInfo::is_valid_address(addr) { + Some(*addr) + } else { + None + } + } + let my_shred_version = cluster_info.my_shred_version(); + Ok(cluster_info + .all_peers() + .iter() + .filter_map(|(contact_info, _)| { + if my_shred_version == contact_info.shred_version + && ContactInfo::is_valid_address(&contact_info.gossip) + { + let (version, feature_set) = if let Some(version) = + cluster_info.get_node_version(&contact_info.id) + { + (Some(version.to_string()), Some(version.feature_set)) + } else { + (None, None) + }; + Some(RpcContactInfo { + pubkey: contact_info.id.to_string(), + gossip: Some(contact_info.gossip), + tpu: valid_address_or_none(&contact_info.tpu), + rpc: valid_address_or_none(&contact_info.rpc), + version, + feature_set, + shred_version: Some(my_shred_version), + }) + } else { + None // Exclude spy nodes + } + }) + .collect()) + } - // DEPRECATED - #[rpc(meta, name = "getTotalSupply")] - fn get_total_supply( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result; + fn get_block_commitment( + &self, + meta: Self::Metadata, + block: Slot, + ) -> Result> { + debug!("get_block_commitment rpc request received"); + Ok(meta.get_block_commitment(block)) + } - #[rpc(meta, name = "getLargestAccounts")] - fn get_largest_accounts( - &self, - meta: Self::Metadata, - config: Option, - ) -> Result>>; + fn get_genesis_hash(&self, meta: Self::Metadata) -> Result { + debug!("get_genesis_hash rpc request received"); + Ok(meta.genesis_hash.to_string()) + } - #[rpc(meta, name = "getSupply")] - fn get_supply( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result>; - - #[rpc(meta, name = "requestAirdrop")] - fn request_airdrop( - &self, - meta: Self::Metadata, - pubkey_str: String, - lamports: u64, - config: Option, - ) -> Result; - - #[rpc(meta, name = "sendTransaction")] - fn send_transaction( - &self, - meta: Self::Metadata, - data: String, - config: Option, - ) -> Result; - - #[rpc(meta, name = "simulateTransaction")] - fn simulate_transaction( - &self, - meta: Self::Metadata, - data: String, - config: Option, - ) -> Result>; - - #[rpc(meta, name = "getSlotLeader")] - fn get_slot_leader( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result; - - #[rpc(meta, name = "getSlotLeaders")] - fn get_slot_leaders( - &self, - meta: Self::Metadata, - start_slot: Slot, - end_slot: Slot, - ) -> Result>; - - #[rpc(meta, name = "minimumLedgerSlot")] - fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result; - - #[rpc(meta, name = "getVoteAccounts")] - fn get_vote_accounts( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result; - - #[rpc(meta, name = "validatorExit")] - fn validator_exit(&self, meta: Self::Metadata) -> Result; - - #[rpc(meta, name = "getIdentity")] - fn get_identity(&self, meta: Self::Metadata) -> Result; - - #[rpc(meta, name = "getVersion")] - fn get_version(&self, meta: Self::Metadata) -> Result; - - #[rpc(meta, name = "setLogFilter")] - fn set_log_filter(&self, _meta: Self::Metadata, filter: String) -> Result<()>; - - #[rpc(meta, name = "getConfirmedBlock")] - fn get_confirmed_block( - &self, - meta: Self::Metadata, - slot: Slot, - config: Option>, - ) -> Result>; - - #[rpc(meta, name = "getBlockTime")] - fn get_block_time(&self, meta: Self::Metadata, slot: Slot) -> Result>; - - #[rpc(meta, name = "getConfirmedBlocks")] - fn get_confirmed_blocks( - &self, - meta: Self::Metadata, - start_slot: Slot, - config: Option, - commitment: Option, - ) -> Result>; - - #[rpc(meta, name = "getConfirmedBlocksWithLimit")] - fn get_confirmed_blocks_with_limit( - &self, - meta: Self::Metadata, - start_slot: Slot, - limit: usize, - commitment: Option, - ) -> Result>; - - #[rpc(meta, name = "getConfirmedTransaction")] - fn get_confirmed_transaction( - &self, - meta: Self::Metadata, - signature_str: String, - config: Option>, - ) -> Result>; - - // DEPRECATED - #[rpc(meta, name = "getConfirmedSignaturesForAddress")] - fn get_confirmed_signatures_for_address( - &self, - meta: Self::Metadata, - pubkey_str: String, - start_slot: Slot, - end_slot: Slot, - ) -> Result>; - - #[rpc(meta, name = "getConfirmedSignaturesForAddress2")] - fn get_confirmed_signatures_for_address2( - &self, - meta: Self::Metadata, - address: String, - config: Option, - ) -> Result>; - - #[rpc(meta, name = "getFirstAvailableBlock")] - fn get_first_available_block(&self, meta: Self::Metadata) -> Result; - - #[rpc(meta, name = "getStakeActivation")] - fn get_stake_activation( - &self, - meta: Self::Metadata, - pubkey_str: String, - config: Option, - ) -> Result; - - #[rpc(meta, name = "getInflationReward")] - fn get_inflation_reward( - &self, - meta: Self::Metadata, - address_strs: Vec, - config: Option, - ) -> Result>>; - - // SPL Token-specific RPC endpoints - // See https://github.com/solana-labs/solana-program-library/releases/tag/token-v2.0.0 for - // program details - - #[rpc(meta, name = "getTokenAccountBalance")] - fn get_token_account_balance( - &self, - meta: Self::Metadata, - pubkey_str: String, - commitment: Option, - ) -> Result>; - - #[rpc(meta, name = "getTokenSupply")] - fn get_token_supply( - &self, - meta: Self::Metadata, - mint_str: String, - commitment: Option, - ) -> Result>; - - #[rpc(meta, name = "getTokenLargestAccounts")] - fn get_token_largest_accounts( - &self, - meta: Self::Metadata, - mint_str: String, - commitment: Option, - ) -> Result>>; - - #[rpc(meta, name = "getTokenAccountsByOwner")] - fn get_token_accounts_by_owner( - &self, - meta: Self::Metadata, - owner_str: String, - token_account_filter: RpcTokenAccountsFilter, - config: Option, - ) -> Result>>; - - #[rpc(meta, name = "getTokenAccountsByDelegate")] - fn get_token_accounts_by_delegate( - &self, - meta: Self::Metadata, - delegate_str: String, - token_account_filter: RpcTokenAccountsFilter, - config: Option, - ) -> Result>>; - - #[rpc(meta, name = "getVelasAccountsByOperationalKey")] - fn get_velas_accounts_by_operational_key( - &self, - meta: Self::Metadata, - pubkey_str: String, - ) -> Result>>; - - #[rpc(meta, name = "getVelasAccountsByOwnerKey")] - fn get_velas_accounts_by_owner_key( - &self, - meta: Self::Metadata, - pubkey_str: String, - ) -> Result>>; -} - -fn _send_transaction( - meta: JsonRpcRequestProcessor, - transaction: Transaction, - wire_transaction: Vec, - last_valid_slot: Slot, - durable_nonce_info: Option<(Pubkey, Hash)>, -) -> Result { - if transaction.signatures.is_empty() { - return Err(RpcCustomError::TransactionSignatureVerificationFailure.into()); - } - let signature = transaction.signatures[0]; - let transaction_info = TransactionInfo::new( - signature, - wire_transaction, - last_valid_slot, - durable_nonce_info, - ); - meta.transaction_sender - .lock() - .unwrap() - .send(transaction_info) - .unwrap_or_else(|err| warn!("Failed to enqueue transaction: {}", err)); - - Ok(signature.to_string()) -} - -pub struct RpcSolImpl; -impl RpcSol for RpcSolImpl { - type Metadata = JsonRpcRequestProcessor; - - fn confirm_transaction( - &self, - meta: Self::Metadata, - id: String, - commitment: Option, - ) -> Result> { - debug!("confirm_transaction rpc request received: {:?}", id); - let signature = verify_signature(&id)?; - Ok(meta.confirm_transaction(&signature, commitment)) - } - - fn get_account_info( - &self, - meta: Self::Metadata, - pubkey_str: String, - config: Option, - ) -> Result>> { - debug!("get_account_info rpc request received: {:?}", pubkey_str); - let pubkey = verify_pubkey(pubkey_str)?; - meta.get_account_info(&pubkey, config) - } - - fn get_multiple_accounts( - &self, - meta: Self::Metadata, - pubkey_strs: Vec, - config: Option, - ) -> Result>>> { - debug!( - "get_multiple_accounts rpc request received: {:?}", - pubkey_strs.len() - ); - - let max_multiple_accounts = meta - .config - .max_multiple_accounts - .unwrap_or(MAX_MULTIPLE_ACCOUNTS); - if pubkey_strs.len() > max_multiple_accounts { - return Err(Error::invalid_params(format!( - "Too many inputs provided; max {}", - max_multiple_accounts - ))); - } - let mut pubkeys: Vec = vec![]; - for pubkey_str in pubkey_strs { - pubkeys.push(verify_pubkey(pubkey_str)?); + fn get_recent_blockhash( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result> { + debug!("get_recent_blockhash rpc request received"); + Ok(meta.get_recent_blockhash(commitment)) } - meta.get_multiple_accounts(pubkeys, config) - } - fn get_minimum_balance_for_rent_exemption( - &self, - meta: Self::Metadata, - data_len: usize, - commitment: Option, - ) -> Result { - debug!( - "get_minimum_balance_for_rent_exemption rpc request received: {:?}", - data_len - ); - if data_len as u64 > system_instruction::MAX_PERMITTED_DATA_LENGTH { - return Err(Error::invalid_request()); + fn get_fees( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result> { + debug!("get_fees rpc request received"); + Ok(meta.get_fees(commitment)) } - Ok(meta.get_minimum_balance_for_rent_exemption(data_len, commitment)) - } - fn get_program_accounts( - &self, - meta: Self::Metadata, - program_id_str: String, - config: Option, - ) -> Result> { - debug!( - "get_program_accounts rpc request received: {:?}", - program_id_str - ); - let program_id = verify_pubkey(program_id_str)?; - let (config, filters) = if let Some(config) = config { - ( - Some(config.account_config), - config.filters.unwrap_or_default(), - ) - } else { - (None, vec![]) - }; - if filters.len() > MAX_GET_PROGRAM_ACCOUNT_FILTERS { - return Err(Error::invalid_params(format!( - "Too many filters provided; max {}", - MAX_GET_PROGRAM_ACCOUNT_FILTERS - ))); + fn get_fee_calculator_for_blockhash( + &self, + meta: Self::Metadata, + blockhash: String, + commitment: Option, + ) -> Result>> { + debug!("get_fee_calculator_for_blockhash rpc request received"); + let blockhash = Hash::from_str(&blockhash) + .map_err(|e| Error::invalid_params(format!("{:?}", e)))?; + Ok(meta.get_fee_calculator_for_blockhash(&blockhash, commitment)) } - for filter in &filters { - verify_filter(filter)?; - } - meta.get_program_accounts(&program_id, config, filters) - } - - fn get_inflation_governor( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result { - debug!("get_inflation_governor rpc request received"); - Ok(meta.get_inflation_governor(commitment)) - } - - fn get_inflation_rate(&self, meta: Self::Metadata) -> Result { - debug!("get_inflation_rate rpc request received"); - Ok(meta.get_inflation_rate()) - } - - fn get_epoch_schedule(&self, meta: Self::Metadata) -> Result { - debug!("get_epoch_schedule rpc request received"); - Ok(meta.get_epoch_schedule()) - } - - fn get_balance( - &self, - meta: Self::Metadata, - pubkey_str: String, - commitment: Option, - ) -> Result> { - debug!("get_balance rpc request received: {:?}", pubkey_str); - let pubkey = verify_pubkey(pubkey_str)?; - Ok(meta.get_balance(&pubkey, commitment)) - } - - fn get_recent_performance_samples( - &self, - meta: Self::Metadata, - limit: Option, - ) -> Result> { - debug!("get_recent_performance_samples request received"); - - let limit = limit.unwrap_or(PERFORMANCE_SAMPLES_LIMIT); - if limit > PERFORMANCE_SAMPLES_LIMIT { - return Err(Error::invalid_params(format!( - "Invalid limit; max {}", - PERFORMANCE_SAMPLES_LIMIT - ))); + fn get_fee_rate_governor( + &self, + meta: Self::Metadata, + ) -> Result> { + debug!("get_fee_rate_governor rpc request received"); + Ok(meta.get_fee_rate_governor()) } - Ok(meta - .blockstore - .get_recent_perf_samples(limit) - .map_err(|err| { - warn!("get_recent_performance_samples failed: {:?}", err); - Error::invalid_request() - })? - .iter() - .map(|(slot, sample)| RpcPerfSample { - slot: *slot, - num_transactions: sample.num_transactions, - num_slots: sample.num_slots, - sample_period_secs: sample.sample_period_secs, - }) - .collect()) - } - - fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result> { - debug!("get_cluster_nodes rpc request received"); - let cluster_info = &meta.cluster_info; - fn valid_address_or_none(addr: &SocketAddr) -> Option { - if ContactInfo::is_valid_address(addr) { - Some(*addr) - } else { - None + // fn get_signature_confirmation( + // &self, + // meta: Self::Metadata, + // signature_str: String, + // commitment: Option, + // ) -> Result> { + // debug!( + // "get_signature_confirmation rpc request received: {:?}", + // signature_str + // ); + // let signature = verify_signature(&signature_str)?; + // Ok(meta.get_signature_confirmation_status(signature, commitment)) + // } + + // fn get_signature_status( + // &self, + // meta: Self::Metadata, + // signature_str: String, + // commitment: Option, + // ) -> Result>> { + // debug!( + // "get_signature_status rpc request received: {:?}", + // signature_str + // ); + // let signature = verify_signature(&signature_str)?; + // Ok(meta.get_signature_status(signature, commitment)) + // } + + fn get_signature_statuses( + &self, + meta: Self::Metadata, + signature_strs: Vec, + config: Option, + ) -> Result>>> { + debug!( + "get_signature_statuses rpc request received: {:?}", + signature_strs.len() + ); + if signature_strs.len() > MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS { + return Err(Error::invalid_params(format!( + "Too many inputs provided; max {}", + MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS + ))); } - } - let my_shred_version = cluster_info.my_shred_version(); - Ok(cluster_info - .all_peers() - .iter() - .filter_map(|(contact_info, _)| { - if my_shred_version == contact_info.shred_version - && ContactInfo::is_valid_address(&contact_info.gossip) - { - let (version, feature_set) = - if let Some(version) = cluster_info.get_node_version(&contact_info.id) { - (Some(version.to_string()), Some(version.feature_set)) - } else { - (None, None) - }; - Some(RpcContactInfo { - pubkey: contact_info.id.to_string(), - gossip: Some(contact_info.gossip), - tpu: valid_address_or_none(&contact_info.tpu), - rpc: valid_address_or_none(&contact_info.rpc), - version, - feature_set, - }) - } else { - None // Exclude spy nodes - } - }) - .collect()) - } - - fn get_epoch_info( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result { - debug!("get_epoch_info rpc request received"); - let bank = meta.bank(commitment); - Ok(bank.get_epoch_info()) - } - - fn get_block_commitment( - &self, - meta: Self::Metadata, - block: Slot, - ) -> Result> { - debug!("get_block_commitment rpc request received"); - Ok(meta.get_block_commitment(block)) - } - - fn get_genesis_hash(&self, meta: Self::Metadata) -> Result { - debug!("get_genesis_hash rpc request received"); - Ok(meta.genesis_hash.to_string()) - } - - fn get_health(&self, meta: Self::Metadata) -> Result { - match meta.health.check() { - RpcHealthStatus::Ok => Ok("ok".to_string()), - RpcHealthStatus::Behind { num_slots } => Err(RpcCustomError::NodeUnhealthy { - num_slots_behind: Some(num_slots), + let mut signatures: Vec = vec![]; + for signature_str in signature_strs { + signatures.push(verify_signature(&signature_str)?); } - .into()), + meta.get_signature_statuses(signatures, config) } - } - - fn get_leader_schedule( - &self, - meta: Self::Metadata, - slot: Option, - commitment: Option, - ) -> Result> { - let bank = meta.bank(commitment); - let slot = slot.unwrap_or_else(|| bank.slot()); - let epoch = bank.epoch_schedule().get_epoch(slot); - - debug!("get_leader_schedule rpc request received: {:?}", slot); - Ok( - solana_ledger::leader_schedule_utils::leader_schedule(epoch, &bank).map( - |leader_schedule| { - let mut leader_schedule_by_identity = HashMap::new(); + fn get_max_retransmit_slot(&self, meta: Self::Metadata) -> Result { + debug!("get_max_retransmit_slot rpc request received"); + Ok(meta.get_max_retransmit_slot()) + } - for (slot_index, identity_pubkey) in - leader_schedule.get_slot_leaders().iter().enumerate() - { - leader_schedule_by_identity - .entry(identity_pubkey) - .or_insert_with(Vec::new) - .push(slot_index); - } + fn get_max_shred_insert_slot(&self, meta: Self::Metadata) -> Result { + debug!("get_max_shred_insert_slot rpc request received"); + Ok(meta.get_max_shred_insert_slot()) + } - leader_schedule_by_identity - .into_iter() - .map(|(identity_pubkey, slot_indices)| { - (identity_pubkey.to_string(), slot_indices) - }) - .collect() - }, - ), - ) - } + fn get_total_supply( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result { + debug!("get_total_supply rpc request received"); + Ok(meta.get_total_supply(commitment)) + } - fn get_recent_blockhash( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result> { - debug!("get_recent_blockhash rpc request received"); - Ok(meta.get_recent_blockhash(commitment)) - } + fn get_largest_accounts( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result>> { + debug!("get_largest_accounts rpc request received"); + Ok(meta.get_largest_accounts(config)) + } - fn get_fees( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result> { - debug!("get_fees rpc request received"); - Ok(meta.get_fees(commitment)) - } + fn get_supply( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result> { + debug!("get_supply rpc request received"); + Ok(meta.get_supply(commitment)) + } - fn get_fee_calculator_for_blockhash( - &self, - meta: Self::Metadata, - blockhash: String, - commitment: Option, - ) -> Result>> { - debug!("get_fee_calculator_for_blockhash rpc request received"); - let blockhash = - Hash::from_str(&blockhash).map_err(|e| Error::invalid_params(format!("{:?}", e)))?; - Ok(meta.get_fee_calculator_for_blockhash(&blockhash, commitment)) - } + fn request_airdrop( + &self, + meta: Self::Metadata, + pubkey_str: String, + lamports: u64, + config: Option, + ) -> Result { + debug!("request_airdrop rpc request received"); + trace!( + "request_airdrop id={} lamports={} config: {:?}", + pubkey_str, + lamports, + &config + ); - fn get_fee_rate_governor( - &self, - meta: Self::Metadata, - ) -> Result> { - debug!("get_fee_rate_governor rpc request received"); - Ok(meta.get_fee_rate_governor()) - } + let faucet_addr = meta.config.faucet_addr.ok_or_else(Error::invalid_request)?; + let pubkey = verify_pubkey(&pubkey_str)?; - fn get_signature_confirmation( - &self, - meta: Self::Metadata, - signature_str: String, - commitment: Option, - ) -> Result> { - debug!( - "get_signature_confirmation rpc request received: {:?}", - signature_str - ); - let signature = verify_signature(&signature_str)?; - Ok(meta.get_signature_confirmation_status(signature, commitment)) - } + let config = config.unwrap_or_default(); + let bank = meta.bank(config.commitment); - fn get_signature_status( - &self, - meta: Self::Metadata, - signature_str: String, - commitment: Option, - ) -> Result>> { - debug!( - "get_signature_status rpc request received: {:?}", - signature_str - ); - let signature = verify_signature(&signature_str)?; - Ok(meta.get_signature_status(signature, commitment)) - } + let blockhash = if let Some(blockhash) = config.recent_blockhash { + verify_hash(&blockhash)? + } else { + bank.confirmed_last_blockhash().0 + }; + let last_valid_slot = bank.get_blockhash_last_valid_slot(&blockhash).unwrap_or(0); - fn get_snapshot_slot(&self, meta: Self::Metadata) -> Result { - debug!("get_snapshot_slot rpc request received"); + let transaction = + request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash).map_err( + |err| { + info!("request_airdrop_transaction failed: {:?}", err); + Error::internal_error() + }, + )?; - meta.snapshot_config - .and_then(|snapshot_config| { - get_highest_snapshot_archive_path(&snapshot_config.snapshot_package_output_path) - .map(|(_, (slot, _, _))| slot) - }) - .ok_or_else(|| RpcCustomError::NoSnapshot.into()) - } + let wire_transaction = serialize(&transaction).map_err(|err| { + info!("request_airdrop: serialize error: {:?}", err); + Error::internal_error() + })?; - fn get_signature_statuses( - &self, - meta: Self::Metadata, - signature_strs: Vec, - config: Option, - ) -> Result>>> { - debug!( - "get_signature_statuses rpc request received: {:?}", - signature_strs.len() - ); - if signature_strs.len() > MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS { - return Err(Error::invalid_params(format!( - "Too many inputs provided; max {}", - MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS - ))); - } - let mut signatures: Vec = vec![]; - for signature_str in signature_strs { - signatures.push(verify_signature(&signature_str)?); + _send_transaction(meta, transaction, wire_transaction, last_valid_slot, None) } - meta.get_signature_statuses(signatures, config) - } - - fn get_slot(&self, meta: Self::Metadata, commitment: Option) -> Result { - debug!("get_slot rpc request received"); - Ok(meta.get_slot(commitment)) - } - fn get_max_retransmit_slot(&self, meta: Self::Metadata) -> Result { - debug!("get_max_retransmit_slot rpc request received"); - Ok(meta.get_max_retransmit_slot()) - } - - fn get_max_shred_insert_slot(&self, meta: Self::Metadata) -> Result { - debug!("get_max_shred_insert_slot rpc request received"); - Ok(meta.get_max_shred_insert_slot()) - } - - fn get_transaction_count( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result { - debug!("get_transaction_count rpc request received"); - Ok(meta.get_transaction_count(commitment)) - } - - fn get_total_supply( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result { - debug!("get_total_supply rpc request received"); - Ok(meta.get_total_supply(commitment)) - } + fn send_transaction( + &self, + meta: Self::Metadata, + data: String, + config: Option, + ) -> Result { + debug!("send_transaction rpc request received"); + let config = config.unwrap_or_default(); + let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base58); + let (wire_transaction, transaction) = deserialize_transaction(data, encoding)?; + + let preflight_commitment = config + .preflight_commitment + .map(|commitment| CommitmentConfig { commitment }); + let preflight_bank = &*meta.bank(preflight_commitment); + + let mut last_valid_slot = preflight_bank + .get_blockhash_last_valid_slot(&transaction.message.recent_blockhash) + .unwrap_or(0); + + let durable_nonce_info = solana_sdk::transaction::uses_durable_nonce(&transaction) + .and_then(|nonce_ix| { + solana_sdk::transaction::get_nonce_pubkey_from_instruction( + &nonce_ix, + &transaction, + ) + }) + .map(|&pubkey| (pubkey, transaction.message.recent_blockhash)); + if durable_nonce_info.is_some() { + // While it uses a defined constant, this last_valid_slot value is chosen arbitrarily. + // It provides a fallback timeout for durable-nonce transaction retries in case of + // malicious packing of the retry queue. Durable-nonce transactions are otherwise + // retried until the nonce is advanced. + last_valid_slot = preflight_bank.slot() + MAX_RECENT_BLOCKHASHES as u64; + } - fn get_largest_accounts( - &self, - meta: Self::Metadata, - config: Option, - ) -> Result>> { - debug!("get_largest_accounts rpc request received"); - Ok(meta.get_largest_accounts(config)) - } + if !config.skip_preflight { + if let Err(e) = verify_transaction(&transaction) { + return Err(e); + } - fn get_supply( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result> { - debug!("get_supply rpc request received"); - Ok(meta.get_supply(commitment)) - } + match meta.health.check() { + RpcHealthStatus::Ok => (), + RpcHealthStatus::Unknown => { + return Err(RpcCustomError::NodeUnhealthy { + num_slots_behind: None, + } + .into()); + } + RpcHealthStatus::Behind { num_slots } => { + return Err(RpcCustomError::NodeUnhealthy { + num_slots_behind: Some(num_slots), + } + .into()); + } + } - fn request_airdrop( - &self, - meta: Self::Metadata, - pubkey_str: String, - lamports: u64, - config: Option, - ) -> Result { - debug!("request_airdrop rpc request received"); - trace!( - "request_airdrop id={} lamports={} config: {:?}", - pubkey_str, - lamports, - &config - ); - - let faucet_addr = meta.config.faucet_addr.ok_or_else(Error::invalid_request)?; - let pubkey = verify_pubkey(pubkey_str)?; + if let (Err(err), logs, _) = preflight_bank.simulate_transaction(&transaction) { + return Err(RpcCustomError::SendTransactionPreflightFailure { + message: format!("Transaction simulation failed: {}", err), + result: RpcSimulateTransactionResult { + err: Some(err), + logs: Some(logs), + accounts: None, + }, + } + .into()); + } + } - let config = config.unwrap_or_default(); - let bank = meta.bank(config.commitment); + _send_transaction( + meta, + transaction, + wire_transaction, + last_valid_slot, + durable_nonce_info, + ) + } - let blockhash = if let Some(blockhash) = config.recent_blockhash { - verify_hash(blockhash)? - } else { - bank.confirmed_last_blockhash().0 - }; - let last_valid_slot = bank.get_blockhash_last_valid_slot(&blockhash).unwrap_or(0); + fn simulate_transaction( + &self, + meta: Self::Metadata, + data: String, + config: Option, + ) -> Result> { + debug!("simulate_transaction rpc request received"); + let config = config.unwrap_or_default(); + let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base58); + let (_, mut transaction) = deserialize_transaction(data, encoding)?; + + if config.sig_verify { + if config.replace_recent_blockhash { + return Err(Error::invalid_params( + "sigVerify may not be used with replaceRecentBlockhash", + )); + } - let transaction = request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash) - .map_err(|err| { - info!("request_airdrop_transaction failed: {:?}", err); - Error::internal_error() - })?; + if let Err(e) = verify_transaction(&transaction) { + return Err(e); + } + } + let bank = &*meta.bank(config.commitment); + if config.replace_recent_blockhash { + transaction.message.recent_blockhash = bank.last_blockhash(); + } + let (result, logs, post_simulation_accounts) = bank.simulate_transaction(&transaction); - let wire_transaction = serialize(&transaction).map_err(|err| { - info!("request_airdrop: serialize error: {:?}", err); - Error::internal_error() - })?; + let accounts = if let Some(config_accounts) = config.accounts { + let accounts_encoding = config_accounts + .encoding + .unwrap_or(UiAccountEncoding::Base64); - _send_transaction(meta, transaction, wire_transaction, last_valid_slot, None) - } + if accounts_encoding == UiAccountEncoding::Binary + || accounts_encoding == UiAccountEncoding::Base58 + { + return Err(Error::invalid_params("base58 encoding not supported")); + } - fn send_transaction( - &self, - meta: Self::Metadata, - data: String, - config: Option, - ) -> Result { - debug!("send_transaction rpc request received"); - let config = config.unwrap_or_default(); - let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base58); - let (wire_transaction, transaction) = deserialize_transaction(data, encoding)?; + if config_accounts.addresses.len() > post_simulation_accounts.len() { + return Err(Error::invalid_params(format!( + "Too many accounts provided; max {}", + post_simulation_accounts.len() + ))); + } - let preflight_commitment = config - .preflight_commitment - .map(|commitment| CommitmentConfig { commitment }); - let preflight_bank = &*meta.bank(preflight_commitment); + let mut accounts = vec![]; + for address_str in config_accounts.addresses { + let address = verify_pubkey(&address_str)?; + accounts.push(if result.is_err() { + None + } else { + transaction + .message + .account_keys + .iter() + .position(|pubkey| *pubkey == address) + .map(|i| post_simulation_accounts.get(i)) + .flatten() + .map(|account| { + UiAccount::encode(&address, account, accounts_encoding, None, None) + }) + }); + } + Some(accounts) + } else { + None + }; - let mut last_valid_slot = preflight_bank - .get_blockhash_last_valid_slot(&transaction.message.recent_blockhash) - .unwrap_or(0); + Ok(new_response( + &bank, + RpcSimulateTransactionResult { + err: result.err(), + logs: Some(logs), + accounts, + }, + )) + } - let durable_nonce_info = solana_sdk::transaction::uses_durable_nonce(&transaction) - .and_then(|nonce_ix| { - solana_sdk::transaction::get_nonce_pubkey_from_instruction(&nonce_ix, &transaction) - }) - .map(|&pubkey| (pubkey, transaction.message.recent_blockhash)); - if durable_nonce_info.is_some() { - // While it uses a defined constant, this last_valid_slot value is chosen arbitrarily. - // It provides a fallback timeout for durable-nonce transaction retries in case of - // malicious packing of the retry queue. Durable-nonce transactions are otherwise - // retried until the nonce is advanced. - last_valid_slot = preflight_bank.slot() + MAX_RECENT_BLOCKHASHES as u64; + fn get_slot_leader( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result { + debug!("get_slot_leader rpc request received"); + Ok(meta.get_slot_leader(commitment)) } - if !config.skip_preflight { - if let Err(e) = verify_transaction(&transaction) { - return Err(e); - } + fn get_slot_leaders( + &self, + meta: Self::Metadata, + start_slot: Slot, + limit: u64, + ) -> Result> { + debug!( + "get_slot_leaders rpc request received (start: {} limit: {})", + start_slot, limit + ); - match meta.health.check() { - RpcHealthStatus::Ok => (), - RpcHealthStatus::Behind { num_slots } => { - return Err(RpcCustomError::NodeUnhealthy { - num_slots_behind: Some(num_slots), - } - .into()); - } + let limit = limit as usize; + if limit > MAX_GET_SLOT_LEADERS { + return Err(Error::invalid_params(format!( + "Invalid limit; max {}", + MAX_GET_SLOT_LEADERS + ))); } - if let (Err(err), logs) = preflight_bank.simulate_transaction(transaction.clone()) { - return Err(RpcCustomError::SendTransactionPreflightFailure { - message: format!("Transaction simulation failed: {}", err), - result: RpcSimulateTransactionResult { - err: Some(err), - logs: Some(logs), - }, - } - .into()); - } + Ok(meta + .get_slot_leaders(None, start_slot, limit)? + .into_iter() + .map(|identity| identity.to_string()) + .collect()) } - _send_transaction( - meta, - transaction, - wire_transaction, - last_valid_slot, - durable_nonce_info, - ) - } - - fn simulate_transaction( - &self, - meta: Self::Metadata, - data: String, - config: Option, - ) -> Result> { - debug!("simulate_transaction rpc request received"); - let config = config.unwrap_or_default(); - let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base58); - let (_, transaction) = deserialize_transaction(data, encoding)?; - - if config.sig_verify { - if let Err(e) = verify_transaction(&transaction) { - return Err(e); - } + fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result { + debug!("minimum_ledger_slot rpc request received"); + meta.minimum_ledger_slot() } - let bank = &*meta.bank(config.commitment); - let (result, logs) = bank.simulate_transaction(transaction); - - Ok(new_response( - &bank, - RpcSimulateTransactionResult { - err: result.err(), - logs: Some(logs), - }, - )) - } + fn get_confirmed_block( + &self, + meta: Self::Metadata, + slot: Slot, + config: Option>, + ) -> Result> { + debug!("get_confirmed_block rpc request received: {:?}", slot); + meta.get_confirmed_block(slot, config) + } - fn get_slot_leader( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result { - debug!("get_slot_leader rpc request received"); - Ok(meta.get_slot_leader(commitment)) - } + fn get_confirmed_blocks( + &self, + meta: Self::Metadata, + start_slot: Slot, + config: Option, + commitment: Option, + ) -> Result> { + let (end_slot, maybe_commitment) = + config.map(|config| config.unzip()).unwrap_or_default(); + debug!( + "get_confirmed_blocks rpc request received: {}-{:?}", + start_slot, end_slot + ); + meta.get_confirmed_blocks(start_slot, end_slot, commitment.or(maybe_commitment)) + } - fn get_slot_leaders( - &self, - meta: Self::Metadata, - start_slot: Slot, - limit: u64, - ) -> Result> { - debug!( - "get_slot_leaders rpc request received (start: {} limit: {})", - start_slot, limit - ); + fn get_confirmed_blocks_with_limit( + &self, + meta: Self::Metadata, + start_slot: Slot, + limit: usize, + commitment: Option, + ) -> Result> { + debug!( + "get_confirmed_blocks_with_limit rpc request received: {}-{}", + start_slot, limit, + ); + meta.get_confirmed_blocks_with_limit(start_slot, limit, commitment) + } - let limit = limit as usize; - if limit > MAX_GET_SLOT_LEADERS { - return Err(Error::invalid_params(format!( - "Invalid limit; max {}", - MAX_GET_SLOT_LEADERS - ))); + fn get_block_time( + &self, + meta: Self::Metadata, + slot: Slot, + ) -> Result> { + meta.get_block_time(slot) } - let bank = meta.bank(None); - let (mut epoch, mut slot_index) = - bank.epoch_schedule().get_epoch_and_slot_index(start_slot); + fn get_confirmed_transaction( + &self, + meta: Self::Metadata, + signature_str: String, + config: Option>, + ) -> Result> { + debug!( + "get_confirmed_transaction rpc request received: {:?}", + signature_str + ); + let signature = verify_signature(&signature_str)?; + meta.get_confirmed_transaction(signature, config) + } - let mut slot_leaders = Vec::with_capacity(limit); - while slot_leaders.len() < limit { - if let Some(leader_schedule) = - meta.leader_schedule_cache.get_epoch_leader_schedule(epoch) - { - slot_leaders.extend( - leader_schedule - .get_slot_leaders() - .iter() - .skip(slot_index as usize) - .take(limit.saturating_sub(slot_leaders.len())) - .map(|pubkey| pubkey.to_string()), - ); - } else { + fn get_confirmed_signatures_for_address( + &self, + meta: Self::Metadata, + pubkey_str: String, + start_slot: Slot, + end_slot: Slot, + ) -> Result> { + debug!( + "get_confirmed_signatures_for_address rpc request received: {:?} {:?}-{:?}", + pubkey_str, start_slot, end_slot + ); + let pubkey = verify_pubkey(&pubkey_str)?; + if end_slot < start_slot { return Err(Error::invalid_params(format!( - "Invalid slot range: leader schedule for epoch {} is unavailable", - epoch + "start_slot {} must be less than or equal to end_slot {}", + start_slot, end_slot ))); } - - epoch += 1; - slot_index = 0; + if end_slot - start_slot > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE { + return Err(Error::invalid_params(format!( + "Slot range too large; max {}", + MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE + ))); + } + Ok(meta + .get_confirmed_signatures_for_address(pubkey, start_slot, end_slot) + .iter() + .map(|signature| signature.to_string()) + .collect()) } - Ok(slot_leaders) - } - - fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result { - debug!("minimum_ledger_slot rpc request received"); - meta.minimum_ledger_slot() - } + fn get_confirmed_signatures_for_address2( + &self, + meta: Self::Metadata, + address: String, + config: Option, + ) -> Result> { + let address = verify_pubkey(&address)?; + + let config = config.unwrap_or_default(); + let before = config + .before + .map(|ref before| verify_signature(before)) + .transpose()?; + let until = config + .until + .map(|ref until| verify_signature(until)) + .transpose()?; + let limit = config + .limit + .unwrap_or(MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT); + + if limit == 0 || limit > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT { + return Err(Error::invalid_params(format!( + "Invalid limit; max {}", + MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT + ))); + } - fn get_vote_accounts( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result { - debug!("get_vote_accounts rpc request received"); - meta.get_vote_accounts(commitment) - } + meta.get_confirmed_signatures_for_address2( + address, + before, + until, + limit, + config.commitment, + ) + } - fn validator_exit(&self, meta: Self::Metadata) -> Result { - debug!("validator_exit rpc request received"); - Ok(meta.validator_exit()) - } + fn get_first_available_block(&self, meta: Self::Metadata) -> Result { + debug!("get_first_available_block rpc request received"); + Ok(meta.get_first_available_block()) + } - fn get_identity(&self, meta: Self::Metadata) -> Result { - debug!("get_identity rpc request received"); - Ok(RpcIdentity { - identity: meta.config.identity_pubkey.to_string(), - }) - } + fn get_block_production( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result> { + debug!("get_block_production rpc request received"); - fn get_version(&self, _: Self::Metadata) -> Result { - debug!("get_version rpc request received"); - let version = solana_version::Version::default(); - Ok(RpcVersionInfo { - solana_core: version.to_string(), - feature_set: Some(version.feature_set), - }) - } + let config = config.unwrap_or_default(); + let filter_by_identity = if let Some(ref identity) = config.identity { + Some(verify_pubkey(identity)?) + } else { + None + }; - fn set_log_filter(&self, meta: Self::Metadata, filter: String) -> Result<()> { - debug!("set_log_filter rpc request received"); - meta.set_log_filter(filter); - Ok(()) - } + let bank = meta.bank(config.commitment); + let (first_slot, last_slot) = match config.range { + None => ( + bank.epoch_schedule().get_first_slot_in_epoch(bank.epoch()), + bank.slot(), + ), + Some(range) => { + let first_slot = range.first_slot; + let last_slot = range.last_slot.unwrap_or_else(|| bank.slot()); + if last_slot < first_slot { + return Err(Error::invalid_params(format!( + "lastSlot, {}, cannot be less than firstSlot, {}", + last_slot, first_slot + ))); + } + (first_slot, last_slot) + } + }; - fn get_confirmed_block( - &self, - meta: Self::Metadata, - slot: Slot, - config: Option>, - ) -> Result> { - debug!("get_confirmed_block rpc request received: {:?}", slot); - meta.get_confirmed_block(slot, config) - } + let slot_history = bank.get_slot_history(); + if first_slot < slot_history.oldest() { + return Err(Error::invalid_params(format!( + "firstSlot, {}, is too small; min {}", + first_slot, + slot_history.oldest() + ))); + } + if last_slot > slot_history.newest() { + return Err(Error::invalid_params(format!( + "lastSlot, {}, is too large; max {}", + last_slot, + slot_history.newest() + ))); + } - fn get_confirmed_blocks( - &self, - meta: Self::Metadata, - start_slot: Slot, - config: Option, - commitment: Option, - ) -> Result> { - let (end_slot, maybe_commitment) = config.map(|config| config.unzip()).unwrap_or_default(); - debug!( - "get_confirmed_blocks rpc request received: {}-{:?}", - start_slot, end_slot - ); - meta.get_confirmed_blocks(start_slot, end_slot, commitment.or(maybe_commitment)) - } + let slot_leaders = meta.get_slot_leaders( + config.commitment, + first_slot, + last_slot.saturating_sub(first_slot) as usize + 1, // +1 because last_slot is inclusive + )?; - fn get_confirmed_blocks_with_limit( - &self, - meta: Self::Metadata, - start_slot: Slot, - limit: usize, - commitment: Option, - ) -> Result> { - debug!( - "get_confirmed_blocks_with_limit rpc request received: {}-{}", - start_slot, limit, - ); - meta.get_confirmed_blocks_with_limit(start_slot, limit, commitment) - } + let mut block_production: HashMap<_, (usize, usize)> = HashMap::new(); - fn get_block_time(&self, meta: Self::Metadata, slot: Slot) -> Result> { - meta.get_block_time(slot) - } + let mut slot = first_slot; + for identity in slot_leaders { + if let Some(ref filter_by_identity) = filter_by_identity { + if identity != *filter_by_identity { + slot += 1; + continue; + } + } - fn get_confirmed_transaction( - &self, - meta: Self::Metadata, - signature_str: String, - config: Option>, - ) -> Result> { - debug!( - "get_confirmed_transaction rpc request received: {:?}", - signature_str - ); - let signature = verify_signature(&signature_str)?; - meta.get_confirmed_transaction(signature, config) - } + let mut entry = block_production.entry(identity).or_default(); + if slot_history.check(slot) == solana_sdk::slot_history::Check::Found { + entry.1 += 1; // Increment blocks_produced + } + entry.0 += 1; // Increment leader_slots + slot += 1; + } - fn get_confirmed_signatures_for_address( - &self, - meta: Self::Metadata, - pubkey_str: String, - start_slot: Slot, - end_slot: Slot, - ) -> Result> { - debug!( - "get_confirmed_signatures_for_address rpc request received: {:?} {:?}-{:?}", - pubkey_str, start_slot, end_slot - ); - let pubkey = verify_pubkey(pubkey_str)?; - if end_slot < start_slot { - return Err(Error::invalid_params(format!( - "start_slot {} must be less than or equal to end_slot {}", - start_slot, end_slot - ))); - } - if end_slot - start_slot > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE { - return Err(Error::invalid_params(format!( - "Slot range too large; max {}", - MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE - ))); + Ok(new_response( + &bank, + RpcBlockProduction { + by_identity: block_production + .into_iter() + .map(|(k, v)| (k.to_string(), v)) + .collect(), + range: RpcBlockProductionRange { + first_slot, + last_slot, + }, + }, + )) } - Ok(meta - .get_confirmed_signatures_for_address(pubkey, start_slot, end_slot) - .iter() - .map(|signature| signature.to_string()) - .collect()) - } - fn get_confirmed_signatures_for_address2( - &self, - meta: Self::Metadata, - address: String, - config: Option, - ) -> Result> { - let address = verify_pubkey(address)?; - - let config = config.unwrap_or_default(); - let before = config - .before - .map(|ref before| verify_signature(before)) - .transpose()?; - let until = config - .until - .map(|ref until| verify_signature(until)) - .transpose()?; - let limit = config - .limit - .unwrap_or(MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT); - - if limit == 0 || limit > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT { - return Err(Error::invalid_params(format!( - "Invalid limit; max {}", - MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT - ))); + fn get_stake_activation( + &self, + meta: Self::Metadata, + pubkey_str: String, + config: Option, + ) -> Result { + debug!( + "get_stake_activation rpc request received: {:?}", + pubkey_str + ); + let pubkey = verify_pubkey(&pubkey_str)?; + meta.get_stake_activation(&pubkey, config) } - meta.get_confirmed_signatures_for_address2(address, before, until, limit, config.commitment) - } - - fn get_first_available_block(&self, meta: Self::Metadata) -> Result { - debug!("get_first_available_block rpc request received"); - Ok(meta.get_first_available_block()) - } - - fn get_stake_activation( - &self, - meta: Self::Metadata, - pubkey_str: String, - config: Option, - ) -> Result { - debug!( - "get_stake_activation rpc request received: {:?}", - pubkey_str - ); - let pubkey = verify_pubkey(pubkey_str)?; - meta.get_stake_activation(&pubkey, config) - } + fn get_inflation_reward( + &self, + meta: Self::Metadata, + address_strs: Vec, + config: Option, + ) -> Result>> { + debug!( + "get_inflation_reward rpc request received: {:?}", + address_strs.len() + ); - fn get_inflation_reward( - &self, - meta: Self::Metadata, - address_strs: Vec, - config: Option, - ) -> Result>> { - debug!( - "get_inflation_reward rpc request received: {:?}", - address_strs.len() - ); + let mut addresses: Vec = vec![]; + for address_str in address_strs { + addresses.push(verify_pubkey(&address_str)?); + } - let mut addresses: Vec = vec![]; - for address_str in address_strs { - addresses.push(verify_pubkey(address_str)?); + meta.get_inflation_reward(addresses, config) } - meta.get_inflation_reward(addresses, config) - } - - fn get_token_account_balance( - &self, - meta: Self::Metadata, - pubkey_str: String, - commitment: Option, - ) -> Result> { - debug!( - "get_token_account_balance rpc request received: {:?}", - pubkey_str - ); - let pubkey = verify_pubkey(pubkey_str)?; - meta.get_token_account_balance(&pubkey, commitment) - } + fn get_token_account_balance( + &self, + meta: Self::Metadata, + pubkey_str: String, + commitment: Option, + ) -> Result> { + debug!( + "get_token_account_balance rpc request received: {:?}", + pubkey_str + ); + let pubkey = verify_pubkey(&pubkey_str)?; + meta.get_token_account_balance(&pubkey, commitment) + } - fn get_token_supply( - &self, - meta: Self::Metadata, - mint_str: String, - commitment: Option, - ) -> Result> { - debug!("get_token_supply rpc request received: {:?}", mint_str); - let mint = verify_pubkey(mint_str)?; - meta.get_token_supply(&mint, commitment) - } + fn get_token_supply( + &self, + meta: Self::Metadata, + mint_str: String, + commitment: Option, + ) -> Result> { + debug!("get_token_supply rpc request received: {:?}", mint_str); + let mint = verify_pubkey(&mint_str)?; + meta.get_token_supply(&mint, commitment) + } - fn get_token_largest_accounts( - &self, - meta: Self::Metadata, - mint_str: String, - commitment: Option, - ) -> Result>> { - debug!( - "get_token_largest_accounts rpc request received: {:?}", - mint_str - ); - let mint = verify_pubkey(mint_str)?; - meta.get_token_largest_accounts(&mint, commitment) - } + fn get_token_largest_accounts( + &self, + meta: Self::Metadata, + mint_str: String, + commitment: Option, + ) -> Result>> { + debug!( + "get_token_largest_accounts rpc request received: {:?}", + mint_str + ); + let mint = verify_pubkey(&mint_str)?; + meta.get_token_largest_accounts(&mint, commitment) + } - fn get_token_accounts_by_owner( - &self, - meta: Self::Metadata, - owner_str: String, - token_account_filter: RpcTokenAccountsFilter, - config: Option, - ) -> Result>> { - debug!( - "get_token_accounts_by_owner rpc request received: {:?}", - owner_str - ); - let owner = verify_pubkey(owner_str)?; - let token_account_filter = verify_token_account_filter(token_account_filter)?; - meta.get_token_accounts_by_owner(&owner, token_account_filter, config) - } + fn get_token_accounts_by_owner( + &self, + meta: Self::Metadata, + owner_str: String, + token_account_filter: RpcTokenAccountsFilter, + config: Option, + ) -> Result>> { + debug!( + "get_token_accounts_by_owner rpc request received: {:?}", + owner_str + ); + let owner = verify_pubkey(&owner_str)?; + let token_account_filter = verify_token_account_filter(token_account_filter)?; + meta.get_token_accounts_by_owner(&owner, token_account_filter, config) + } - fn get_token_accounts_by_delegate( - &self, - meta: Self::Metadata, - delegate_str: String, - token_account_filter: RpcTokenAccountsFilter, - config: Option, - ) -> Result>> { - debug!( - "get_token_accounts_by_delegate rpc request received: {:?}", - delegate_str - ); - let delegate = verify_pubkey(delegate_str)?; - let token_account_filter = verify_token_account_filter(token_account_filter)?; - meta.get_token_accounts_by_delegate(&delegate, token_account_filter, config) - } + fn get_token_accounts_by_delegate( + &self, + meta: Self::Metadata, + delegate_str: String, + token_account_filter: RpcTokenAccountsFilter, + config: Option, + ) -> Result>> { + debug!( + "get_token_accounts_by_delegate rpc request received: {:?}", + delegate_str + ); + let delegate = verify_pubkey(&delegate_str)?; + let token_account_filter = verify_token_account_filter(token_account_filter)?; + meta.get_token_accounts_by_delegate(&delegate, token_account_filter, config) + } - fn get_velas_accounts_by_owner_key( - &self, - meta: Self::Metadata, - pubkey_str: String, - ) -> Result>> { - debug!( - "get_velas_accounts_by_owner_key rpc request received: {:?}", - pubkey_str - ); + // Velas scope - let owner_key = verify_pubkey(pubkey_str)?; - let bank = meta.bank(None); + fn get_velas_accounts_by_owner_key( + &self, + meta: Self::Metadata, + pubkey_str: String, + ) -> Result>> { + debug!( + "get_velas_accounts_by_owner_key rpc request received: {:?}", + pubkey_str + ); - let storages = meta.get_velas_accounts_storages_by_owner_key(&bank, owner_key); - debug!( - "get_velas_accounts_by_owner_key velas accounts storages {:?}", - storages - ); + let owner_key = verify_pubkey(&pubkey_str)?; + let bank = meta.bank(None); - let accounts = storages - .into_iter() - .flat_map(|(storage, _)| meta.get_velas_accounts_by_storage_key(&bank, storage)); - debug!( - "get_velas_accounts_by_owner_key velas accounts {:?}", - accounts - ); + let storages = meta.get_velas_accounts_storages_by_owner_key(&bank, owner_key); + debug!( + "get_velas_accounts_by_owner_key velas accounts storages {:?}", + storages + ); - Ok(new_response( - &bank, - accounts + let accounts = storages .into_iter() - .map(|(pubkey, _account)| pubkey.to_string()) - .collect(), - )) - } + .flat_map(|(storage, _)| meta.get_velas_accounts_by_storage_key(&bank, storage)); + debug!( + "get_velas_accounts_by_owner_key velas accounts {:?}", + accounts + ); - fn get_velas_accounts_by_operational_key( - &self, - meta: Self::Metadata, - pubkey_str: String, - ) -> Result>> { - debug!( - "get_velas_accounts_by_operational_key rpc request received: {:?}", - pubkey_str - ); + Ok(new_response( + &bank, + accounts + .into_iter() + .map(|(pubkey, _account)| pubkey.to_string()) + .collect(), + )) + } - let operational_key = verify_pubkey(pubkey_str)?; - let bank = meta.bank(None); + fn get_velas_accounts_by_operational_key( + &self, + meta: Self::Metadata, + pubkey_str: String, + ) -> Result>> { + debug!( + "get_velas_accounts_by_operational_key rpc request received: {:?}", + pubkey_str + ); - let storages = meta.get_velas_accounts_storages_by_operational_key(&bank, operational_key); - debug!( - "get_velas_accounts_by_operational_key velas accounts storages {:?}", - storages - ); + let operational_key = verify_pubkey(&pubkey_str)?; + let bank = meta.bank(None); - let accounts = storages - .into_iter() - .flat_map(|(storage, _)| meta.get_velas_accounts_by_storage_key(&bank, storage)); - debug!( - "get_velas_accounts_by_operational_key velas accounts {:?}", - accounts - ); + let storages = + meta.get_velas_accounts_storages_by_operational_key(&bank, operational_key); + debug!( + "get_velas_accounts_by_operational_key velas accounts storages {:?}", + storages + ); - Ok(new_response( - &bank, - accounts + let accounts = storages .into_iter() - .map(|(pubkey, _account)| pubkey.to_string()) - .collect(), - )) + .flat_map(|(storage, _)| meta.get_velas_accounts_by_storage_key(&bank, storage)); + debug!( + "get_velas_accounts_by_operational_key velas accounts {:?}", + accounts + ); + + Ok(new_response( + &bank, + accounts + .into_iter() + .map(|(pubkey, _account)| pubkey.to_string()) + .collect(), + )) + } } } @@ -3704,16 +3989,16 @@ fn deserialize_transaction( .map(|transaction| (wire_transaction, transaction)) } -pub(crate) fn create_validator_exit(exit: &Arc) -> Arc>> { +pub(crate) fn create_validator_exit(exit: &Arc) -> Arc> { let mut validator_exit = ValidatorExit::default(); let exit_ = exit.clone(); validator_exit.register_exit(Box::new(move || exit_.store(true, Ordering::Relaxed))); - Arc::new(RwLock::new(Some(validator_exit))) + Arc::new(RwLock::new(validator_exit)) } #[cfg(test)] pub mod tests { - use super::*; + use super::{rpc_full::*, rpc_minimal::*, *}; use crate::{ contact_info::ContactInfo, non_circulating_supply::non_circulating_accounts, @@ -3724,9 +4009,7 @@ pub mod tests { rpc_subscriptions::RpcSubscriptions, }; use bincode::deserialize; - use jsonrpc_core::{ - futures::future::Future, ErrorCode, MetaIoHandler, Output, Response, Value, - }; + use jsonrpc_core::{futures, ErrorCode, MetaIoHandler, Output, Response, Value}; use jsonrpc_core_client::transports::local; use solana_client::rpc_filter::{Memcmp, MemcmpEncodedBytes}; use solana_ledger::{ @@ -3738,6 +4021,7 @@ pub mod tests { accounts_background_service::AbsRequestSender, commitment::BlockCommitment, }; use solana_sdk::{ + account::Account, clock::MAX_RECENT_BLOCKHASHES, fee_calculator::DEFAULT_BURN_PERCENT, hash::{hash, Hash}, @@ -3857,7 +4141,10 @@ pub mod tests { .unwrap() .set_root(*root, &AbsRequestSender::default(), Some(0)); let mut stakes = HashMap::new(); - stakes.insert(leader_vote_keypair.pubkey(), (1, Account::default())); + stakes.insert( + leader_vote_keypair.pubkey(), + (1, AccountSharedData::default()), + ); let block_time = bank_forks .read() .unwrap() @@ -3921,7 +4208,7 @@ pub mod tests { RpcHealth::stub(), cluster_info.clone(), Hash::default(), - &runtime::Runtime::new().unwrap(), + Arc::new(tokio::runtime::Runtime::new().unwrap()), None, OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), Arc::new(RwLock::new(LargestAccountsCache::new(30))), @@ -3937,8 +4224,8 @@ pub mod tests { )); let mut io = MetaIoHandler::default(); - let rpc = RpcSolImpl; - io.extend_with(rpc.to_delegate()); + io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); + io.extend_with(rpc_full::FullImpl.to_delegate()); RpcHandler { io, meta, @@ -3972,7 +4259,7 @@ pub mod tests { let meta = JsonRpcRequestProcessor::new_from_bank(&bank); let mut io = MetaIoHandler::default(); - io.extend_with(RpcSolImpl.to_delegate()); + io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#, @@ -4000,17 +4287,25 @@ pub mod tests { let meta = JsonRpcRequestProcessor::new_from_bank(&bank); let mut io = MetaIoHandler::default(); - io.extend_with(RpcSolImpl.to_delegate()); + io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); - let fut = { - let (client, server) = - local::connect_with_metadata::(&io, meta); + async fn use_client(client: rpc_minimal::gen_client::Client, mint_pubkey: Pubkey) -> u64 { client .get_balance(mint_pubkey.to_string(), None) - .join(server) + .await + .unwrap() + .value + } + + let fut = async { + let (client, server) = + local::connect_with_metadata::(&io, meta); + let client = use_client(client, mint_pubkey); + + futures::join!(client, server) }; - let (response, _) = fut.wait().unwrap(); - assert_eq!(response.value, 20); + let (response, _) = futures::executor::block_on(fut); + assert_eq!(response, 20); } #[test] @@ -4030,7 +4325,7 @@ pub mod tests { .expect("actual response deserialization"); let expected = format!( - r#"{{"jsonrpc":"2.0","result":[{{"pubkey": "{}", "gossip": "127.0.0.1:1235", "tpu": "127.0.0.1:1234", "rpc": "127.0.0.1:{}", "version": null, "featureSet": null}}],"id":1}}"#, + r#"{{"jsonrpc":"2.0","result":[{{"pubkey": "{}", "gossip": "127.0.0.1:1235", "shredVersion": 0, "tpu": "127.0.0.1:1234", "rpc": "127.0.0.1:{}", "version": null, "featureSet": null}}],"id":1}}"#, leader_pubkey, rpc_port::DEFAULT_RPC_PORT ); @@ -4133,7 +4428,7 @@ pub mod tests { let meta = JsonRpcRequestProcessor::new_from_bank(&bank); let mut io = MetaIoHandler::default(); - io.extend_with(RpcSolImpl.to_delegate()); + io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}"#; let res = io.handle_request_sync(&req, meta); @@ -4377,6 +4672,14 @@ pub mod tests { for req in [ r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [0]}"#, r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule"}"#, + &format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [null, {{ "identity": "{}" }}]}}"#, + bank.collector_id().to_string() + ), + &format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [{{ "identity": "{}" }}]}}"#, + bank.collector_id().to_string() + ), ] .iter() { @@ -4409,7 +4712,7 @@ pub mod tests { } let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [42424242]}"#; - let rep = io.handle_request_sync(&req, meta); + let rep = io.handle_request_sync(&req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); @@ -4423,6 +4726,27 @@ pub mod tests { panic!("Expected single response"); }; assert_eq!(schedule, None); + + // `bob` is not in the leader schedule, look for an empty response + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [{{ "identity": "{}"}}]}}"#, + bob_pubkey + ); + + let rep = io.handle_request_sync(&req, meta); + let res: Response = serde_json::from_str(&rep.expect("actual response")) + .expect("actual response deserialization"); + + let schedule: Option = if let Response::Single(res) = res { + if let Output::Success(res) = res { + serde_json::from_value(res.result).unwrap() + } else { + panic!("Expected success"); + } + } else { + panic!("Expected single response"); + }; + assert_eq!(schedule, Some(HashMap::default())); } #[test] @@ -4513,7 +4837,7 @@ pub mod tests { let address = solana_sdk::pubkey::new_rand(); let data = vec![1, 2, 3, 4, 5]; - let mut account = Account::new(42, 5, &Pubkey::default()); + let mut account = AccountSharedData::new(42, 5, &Pubkey::default()); account.data = data.clone(); bank.store_account(&address, &account); @@ -4570,7 +4894,7 @@ pub mod tests { let address = Pubkey::new(&[9; 32]); let data = vec![1, 2, 3, 4, 5]; - let mut account = Account::new(42, 5, &Pubkey::default()); + let mut account = AccountSharedData::new(42, 5, &Pubkey::default()); account.data = data.clone(); bank.store_account(&address, &account); @@ -4726,6 +5050,26 @@ pub mod tests { .expect("actual response deserialization"); assert_eq!(expected, result); + // Test returns context + let req = format!( + r#"{{ + "jsonrpc":"2.0", + "id":1, + "method":"getProgramAccounts", + "params":["{}",{{ + "withContext": true + }}] + }}"#, + system_program::id(), + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")).unwrap(); + let contains_slot = result["result"]["context"] + .as_object() + .expect("must contain context") + .contains_key("slot"); + assert!(contains_slot); + // Set up nonce accounts to test filters let nonce_keypair0 = Keypair::new(); let instruction = system_instruction::create_nonce_account( @@ -4882,7 +5226,6 @@ pub mod tests { #[test] fn test_rpc_simulate_transaction() { - let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, @@ -4890,31 +5233,99 @@ pub mod tests { alice, bank, .. - } = start_rpc_handler_with_tx(&bob_pubkey); + } = start_rpc_handler_with_tx(&solana_sdk::pubkey::new_rand()); + let bob_pubkey = solana_sdk::pubkey::new_rand(); let mut tx = system_transaction::transfer(&alice, &bob_pubkey, 1234, blockhash); let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string(); tx.signatures[0] = Signature::default(); let tx_badsig_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string(); + tx.message.recent_blockhash = Hash::default(); + let tx_invalid_recent_blockhash = bs58::encode(serialize(&tx).unwrap()).into_string(); bank.freeze(); // Ensure the root bank is frozen, `start_rpc_handler_with_tx()` doesn't do this // Good signature with sigVerify=true let req = format!( - r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#, + r#"{{"jsonrpc":"2.0", + "id":1, + "method":"simulateTransaction", + "params":[ + "{}", + {{ + "sigVerify": true, + "accounts": {{ + "encoding": "jsonParsed", + "addresses": ["{}", "{}"] + }} + }} + ] + }}"#, tx_serialized_encoded, + solana_sdk::pubkey::new_rand(), + bob_pubkey, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, - "value":{"err":null, "logs":[ - "Program 11111111111111111111111111111111 invoke [1]", - "Program 11111111111111111111111111111111 success" - ]} + "value":{ + "accounts": [ + null, + { + "data": ["", "base64"], + "executable": false, + "owner": "11111111111111111111111111111111", + "lamports": 1234, + "rentEpoch": 0 + } + ], + "err":null, + "logs":[ + "Program 11111111111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 success" + ] + } + }, + "id": 1, + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(expected, result); + + // Too many input accounts... + let req = format!( + r#"{{"jsonrpc":"2.0", + "id":1, + "method":"simulateTransaction", + "params":[ + "{}", + {{ + "sigVerify": true, + "accounts": {{ + "addresses": [ + "11111111111111111111111111111111", + "11111111111111111111111111111111", + "11111111111111111111111111111111", + "11111111111111111111111111111111" + ] + }} + }} + ] + }}"#, + tx_serialized_encoded, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc":"2.0", + "error": { + "code": error::ErrorCode::InvalidParams.code(), + "message": "Too many accounts provided; max 3" }, - "id": 1, + "id":1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); @@ -4953,7 +5364,7 @@ pub mod tests { "jsonrpc": "2.0", "result": { "context":{"slot":0}, - "value":{"err":null, "logs":[ + "value":{"accounts": null, "err":null, "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" ]} @@ -4971,18 +5382,88 @@ pub mod tests { r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}"]}}"#, tx_serialized_encoded, ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc": "2.0", + "result": { + "context":{"slot":0}, + "value":{"accounts": null, "err":null, "logs":[ + "Program 11111111111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 success" + ]} + }, + "id": 1, + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(expected, result); + + // Enabled both sigVerify=true and replaceRecentBlockhash=true + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {}]}}"#, + tx_serialized_encoded, + json!({ + "sigVerify": true, + "replaceRecentBlockhash": true, + }) + .to_string() + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc":"2.0", + "error": { + "code": ErrorCode::InvalidParams, + "message": "sigVerify may not be used with replaceRecentBlockhash" + }, + "id":1 + }); + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(expected, result); + + // Bad recent blockhash with replaceRecentBlockhash=false + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"replaceRecentBlockhash": false}}]}}"#, + tx_invalid_recent_blockhash, + ); + let res = io.handle_request_sync(&req, meta.clone()); + let expected = json!({ + "jsonrpc":"2.0", + "result": { + "context":{"slot":0}, + "value":{"err": "BlockhashNotFound", "accounts": null, "logs":[]} + }, + "id":1 + }); + + let expected: Response = + serde_json::from_value(expected).expect("expected response deserialization"); + let result: Response = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + assert_eq!(expected, result); + + // Bad recent blockhash with replaceRecentBlockhash=true + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"replaceRecentBlockhash": true}}]}}"#, + tx_invalid_recent_blockhash, + ); let res = io.handle_request_sync(&req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, - "value":{"err":null, "logs":[ + "value":{"accounts": null, "err":null, "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" ]} }, "id": 1, }); + let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) @@ -5124,7 +5605,7 @@ pub mod tests { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, - meta, + mut meta, blockhash, alice, confirmed_block_signatures, @@ -5163,7 +5644,7 @@ pub mod tests { r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#, confirmed_block_signatures[1] ); - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(&req, meta.clone()); let expected_res: transaction::Result<()> = Err(TransactionError::InstructionError( 0, InstructionError::Custom(1), @@ -5173,6 +5654,20 @@ pub mod tests { serde_json::from_value(json["result"]["value"][0].clone()) .expect("actual response deserialization"); assert_eq!(expected_res, result.as_ref().unwrap().status); + + // disable rpc-tx-history, but attempt historical query + meta.config.enable_rpc_transaction_history = false; + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"], {{"searchTransactionHistory": true}}]}}"#, + confirmed_block_signatures[1] + ); + let res = io.handle_request_sync(&req, meta); + assert_eq!( + res, + Some( + r#"{"jsonrpc":"2.0","error":{"code":-32011,"message":"Transaction history is not available from this node"},"id":1}"#.to_string(), + ) + ); } #[test] @@ -5228,6 +5723,7 @@ pub mod tests { "lamportsPerSignature": 0, }, "lastValidSlot": MAX_RECENT_BLOCKHASHES, + "lastValidBlockHeight": MAX_RECENT_BLOCKHASHES, }}, "id": 1 }); @@ -5342,8 +5838,7 @@ pub mod tests { let meta = JsonRpcRequestProcessor::new_from_bank(&bank); let mut io = MetaIoHandler::default(); - let rpc = RpcSolImpl; - io.extend_with(rpc.to_delegate()); + io.extend_with(rpc_full::FullImpl.to_delegate()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["37u9WtQpcm6ULa3Vmu7ySnANv"]}"#; let res = io.handle_request_sync(req, meta); @@ -5366,8 +5861,7 @@ pub mod tests { bank_forks.write().unwrap().get(0).unwrap().freeze(); let mut io = MetaIoHandler::default(); - let rpc = RpcSolImpl; - io.extend_with(rpc.to_delegate()); + io.extend_with(rpc_full::FullImpl.to_delegate()); let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair( ContactInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234")), )); @@ -5382,7 +5876,7 @@ pub mod tests { health.clone(), cluster_info, Hash::default(), - &runtime::Runtime::new().unwrap(), + Arc::new(tokio::runtime::Runtime::new().unwrap()), None, OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), Arc::new(RwLock::new(LargestAccountsCache::new(30))), @@ -5408,7 +5902,7 @@ pub mod tests { assert_eq!( res, Some( - r#"{"jsonrpc":"2.0","error":{"code":-32002,"message":"Transaction simulation failed: Blockhash not found","data":{"err":"BlockhashNotFound","logs":[]}},"id":1}"#.to_string(), + r#"{"jsonrpc":"2.0","error":{"code":-32002,"message":"Transaction simulation failed: Blockhash not found","data":{"accounts":null,"err":"BlockhashNotFound","logs":[]}},"id":1}"#.to_string(), ) ); @@ -5515,10 +6009,10 @@ pub mod tests { #[test] fn test_rpc_verify_pubkey() { let pubkey = solana_sdk::pubkey::new_rand(); - assert_eq!(verify_pubkey(pubkey.to_string()).unwrap(), pubkey); + assert_eq!(verify_pubkey(&pubkey.to_string()).unwrap(), pubkey); let bad_pubkey = "a1b2c3d4"; assert_eq!( - verify_pubkey(bad_pubkey.to_string()), + verify_pubkey(&bad_pubkey.to_string()), Err(Error::invalid_params("Invalid param: WrongSize")) ); } @@ -5562,76 +6056,6 @@ pub mod tests { ) } - #[test] - fn test_rpc_request_processor_config_default_trait_validator_exit_fails() { - let exit = Arc::new(AtomicBool::new(false)); - let validator_exit = create_validator_exit(&exit); - let ledger_path = get_tmp_ledger_path!(); - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); - let cluster_info = Arc::new(ClusterInfo::default()); - let tpu_address = cluster_info.my_contact_info().tpu; - let bank_forks = new_bank_forks().0; - let (request_processor, receiver) = JsonRpcRequestProcessor::new( - JsonRpcConfig::default(), - None, - bank_forks.clone(), - block_commitment_cache, - blockstore, - validator_exit, - RpcHealth::stub(), - cluster_info, - Hash::default(), - &runtime::Runtime::new().unwrap(), - None, - OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), - Arc::new(RwLock::new(LargestAccountsCache::new(30))), - Arc::new(MaxSlots::default()), - Arc::new(LeaderScheduleCache::default()), - Arc::new(AtomicU64::default()), - ); - SendTransactionService::new(tpu_address, &bank_forks, None, receiver, 1000, 1); - assert_eq!(request_processor.validator_exit(), false); - assert_eq!(exit.load(Ordering::Relaxed), false); - } - - #[test] - fn test_rpc_request_processor_allow_validator_exit_config() { - let exit = Arc::new(AtomicBool::new(false)); - let validator_exit = create_validator_exit(&exit); - let ledger_path = get_tmp_ledger_path!(); - let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); - let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); - let config = JsonRpcConfig { - enable_validator_exit: true, - ..JsonRpcConfig::default() - }; - let bank_forks = new_bank_forks().0; - let cluster_info = Arc::new(ClusterInfo::default()); - let tpu_address = cluster_info.my_contact_info().tpu; - let (request_processor, receiver) = JsonRpcRequestProcessor::new( - config, - None, - bank_forks.clone(), - block_commitment_cache, - blockstore, - validator_exit, - RpcHealth::stub(), - cluster_info, - Hash::default(), - &runtime::Runtime::new().unwrap(), - None, - OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), - Arc::new(RwLock::new(LargestAccountsCache::new(30))), - Arc::new(MaxSlots::default()), - Arc::new(LeaderScheduleCache::default()), - Arc::new(AtomicU64::default()), - ); - SendTransactionService::new(tpu_address, &bank_forks, None, receiver, 1000, 1); - assert_eq!(request_processor.validator_exit(), true); - assert_eq!(exit.load(Ordering::Relaxed), true); - } - #[test] fn test_rpc_get_identity() { let bob_pubkey = solana_sdk::pubkey::new_rand(); @@ -5717,14 +6141,10 @@ pub mod tests { CommitmentSlots::new_from_slot(bank_forks.read().unwrap().highest_slot()), ))); - let config = JsonRpcConfig { - enable_validator_exit: true, - ..JsonRpcConfig::default() - }; let cluster_info = Arc::new(ClusterInfo::default()); let tpu_address = cluster_info.my_contact_info().tpu; let (request_processor, receiver) = JsonRpcRequestProcessor::new( - config, + JsonRpcConfig::default(), None, bank_forks.clone(), block_commitment_cache, @@ -5733,7 +6153,7 @@ pub mod tests { RpcHealth::stub(), cluster_info, Hash::default(), - &runtime::Runtime::new().unwrap(), + Arc::new(tokio::runtime::Runtime::new().unwrap()), None, OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), Arc::new(RwLock::new(LargestAccountsCache::new(30))), @@ -5824,7 +6244,7 @@ pub mod tests { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, - meta, + mut meta, confirmed_block_signatures, blockhash, .. @@ -5876,7 +6296,7 @@ pub mod tests { } let req = r#"{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlock","params":[0,"binary"]}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_block: Option = @@ -5917,6 +6337,17 @@ pub mod tests { } } } + + // disable rpc-tx-history + meta.config.enable_rpc_transaction_history = false; + let req = r#"{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlock","params":[0]}"#; + let res = io.handle_request_sync(&req, meta); + assert_eq!( + res, + Some( + r#"{"jsonrpc":"2.0","error":{"code":-32011,"message":"Transaction history is not available from this node"},"id":1}"#.to_string(), + ) + ); } #[test] @@ -5970,6 +6401,83 @@ pub mod tests { assert_eq!(confirmed_block.rewards.unwrap(), vec![]); } + #[test] + fn test_get_block_production() { + let bob_pubkey = solana_sdk::pubkey::new_rand(); + let roots = vec![0, 1, 3, 4, 8]; + let RpcHandler { + io, + meta, + block_commitment_cache, + leader_pubkey, + .. + } = start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots); + block_commitment_cache + .write() + .unwrap() + .set_highest_confirmed_root(8); + + let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlockProduction","params":[]}"#; + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let block_production: RpcBlockProduction = + serde_json::from_value(result["result"]["value"].clone()).unwrap(); + assert_eq!( + block_production.by_identity.get(&leader_pubkey.to_string()), + Some(&(9, 5)) + ); + assert_eq!( + block_production.range, + RpcBlockProductionRange { + first_slot: 0, + last_slot: 8 + } + ); + + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockProduction","params":[{{"identity": "{}"}}]}}"#, + leader_pubkey + ); + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let block_production: RpcBlockProduction = + serde_json::from_value(result["result"]["value"].clone()).unwrap(); + assert_eq!( + block_production.by_identity.get(&leader_pubkey.to_string()), + Some(&(9, 5)) + ); + assert_eq!( + block_production.range, + RpcBlockProductionRange { + first_slot: 0, + last_slot: 8 + } + ); + + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockProduction","params":[{{"range": {{"firstSlot": 0, "lastSlot": 4}}, "identity": "{}"}}]}}"#, + bob_pubkey + ); + let res = io.handle_request_sync(&req, meta); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + let block_production: RpcBlockProduction = + serde_json::from_value(result["result"]["value"].clone()).unwrap(); + assert_eq!( + block_production.by_identity.get(&leader_pubkey.to_string()), + None + ); + assert_eq!( + block_production.range, + RpcBlockProductionRange { + first_slot: 0, + last_slot: 4 + } + ); + } + #[test] fn test_get_confirmed_blocks() { let bob_pubkey = solana_sdk::pubkey::new_rand(); @@ -6247,8 +6755,7 @@ pub mod tests { } } - // Advance bank to the next epoch - for _ in 0..TEST_SLOTS_PER_EPOCH { + let mut advance_bank = || { bank.freeze(); // Votes @@ -6289,6 +6796,11 @@ pub mod tests { bank.process_transaction(&transaction) .expect("process transaction"); + }; + + // Advance bank to the next epoch + for _ in 0..TEST_SLOTS_PER_EPOCH { + advance_bank(); } let req = format!( @@ -6308,7 +6820,6 @@ pub mod tests { // Both accounts should be active and have voting history. assert_eq!(vote_account_status.current.len(), 2); - //let leader_info = &vote_account_status.current[0]; let leader_info = vote_account_status .current .iter() @@ -6325,6 +6836,57 @@ pub mod tests { ] ); + // Filter request based on the leader: + { + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#, + json!([RpcGetVoteAccountsConfig { + vote_pubkey: Some(leader_vote_keypair.pubkey().to_string()), + commitment: Some(CommitmentConfig::processed()) + }]) + ); + + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + + let vote_account_status: RpcVoteAccountStatus = + serde_json::from_value(result["result"].clone()).unwrap(); + + assert_eq!(vote_account_status.current.len(), 1); + assert_eq!(vote_account_status.delinquent.len(), 0); + for vote_account_info in vote_account_status.current { + assert_eq!( + vote_account_info.vote_pubkey, + leader_vote_keypair.pubkey().to_string() + ); + } + } + + // Overflow the epoch credits history and ensure only `MAX_RPC_EPOCH_CREDITS_HISTORY` + // results are returned + for _ in 0..(TEST_SLOTS_PER_EPOCH * (MAX_RPC_EPOCH_CREDITS_HISTORY) as u64) { + advance_bank(); + } + + let req = format!( + r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#, + json!([CommitmentConfig::processed()]) + ); + + let res = io.handle_request_sync(&req, meta.clone()); + let result: Value = serde_json::from_str(&res.expect("actual response")) + .expect("actual response deserialization"); + + let vote_account_status: RpcVoteAccountStatus = + serde_json::from_value(result["result"].clone()).unwrap(); + + assert!(vote_account_status.delinquent.is_empty()); + assert!(!vote_account_status + .current + .iter() + .any(|x| x.epoch_credits.len() != MAX_RPC_EPOCH_CREDITS_HISTORY)); + // Advance bank with no voting bank.freeze(); bank_forks.write().unwrap().insert(Bank::new_from_parent( @@ -6425,12 +6987,12 @@ pub mod tests { close_authority: COption::Some(owner), }; TokenAccount::pack(token_account, &mut account_data).unwrap(); - let token_account = Account { + let token_account = AccountSharedData::from(Account { lamports: 111, data: account_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() - }; + }); let token_account_pubkey = solana_sdk::pubkey::new_rand(); bank.store_account(&token_account_pubkey, &token_account); @@ -6444,12 +7006,12 @@ pub mod tests { freeze_authority: COption::Some(owner), }; Mint::pack(mint_state, &mut mint_data).unwrap(); - let mint_account = Account { + let mint_account = AccountSharedData::from(Account { lamports: 111, data: mint_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() - }; + }); bank.store_account(&Pubkey::from_str(&mint.to_string()).unwrap(), &mint_account); let req = format!( @@ -6521,12 +7083,12 @@ pub mod tests { close_authority: COption::Some(owner), }; TokenAccount::pack(token_account, &mut account_data).unwrap(); - let token_account = Account { + let token_account = AccountSharedData::from(Account { lamports: 111, data: account_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() - }; + }); let token_with_different_mint_pubkey = solana_sdk::pubkey::new_rand(); bank.store_account(&token_with_different_mint_pubkey, &token_account); @@ -6740,12 +7302,12 @@ pub mod tests { freeze_authority: COption::Some(owner), }; Mint::pack(mint_state, &mut mint_data).unwrap(); - let mint_account = Account { + let mint_account = AccountSharedData::from(Account { lamports: 111, data: mint_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() - }; + }); bank.store_account( &Pubkey::from_str(&new_mint.to_string()).unwrap(), &mint_account, @@ -6762,12 +7324,12 @@ pub mod tests { close_authority: COption::Some(owner), }; TokenAccount::pack(token_account, &mut account_data).unwrap(); - let token_account = Account { + let token_account = AccountSharedData::from(Account { lamports: 111, data: account_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() - }; + }); let token_with_smaller_balance = solana_sdk::pubkey::new_rand(); bank.store_account(&token_with_smaller_balance, &token_account); @@ -6826,12 +7388,12 @@ pub mod tests { close_authority: COption::Some(owner), }; TokenAccount::pack(token_account, &mut account_data).unwrap(); - let token_account = Account { + let token_account = AccountSharedData::from(Account { lamports: 111, data: account_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() - }; + }); let token_account_pubkey = solana_sdk::pubkey::new_rand(); bank.store_account(&token_account_pubkey, &token_account); @@ -6845,12 +7407,12 @@ pub mod tests { freeze_authority: COption::Some(owner), }; Mint::pack(mint_state, &mut mint_data).unwrap(); - let mint_account = Account { + let mint_account = AccountSharedData::from(Account { lamports: 111, data: mint_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() - }; + }); bank.store_account(&Pubkey::from_str(&mint.to_string()).unwrap(), &mint_account); let req = format!( @@ -7016,7 +7578,7 @@ pub mod tests { RpcHealth::stub(), cluster_info, Hash::default(), - &runtime::Runtime::new().unwrap(), + Arc::new(tokio::runtime::Runtime::new().unwrap()), None, optimistically_confirmed_bank.clone(), Arc::new(RwLock::new(LargestAccountsCache::new(30))), @@ -7026,7 +7588,8 @@ pub mod tests { ); let mut io = MetaIoHandler::default(); - io.extend_with(RpcSolImpl.to_delegate()); + io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); + io.extend_with(rpc_full::FullImpl.to_delegate()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment":"confirmed"}]}"#; diff --git a/core/src/rpc_health.rs b/core/src/rpc_health.rs index 51e35fca16..dd196c11c7 100644 --- a/core/src/rpc_health.rs +++ b/core/src/rpc_health.rs @@ -8,10 +8,11 @@ use { }, }; -#[derive(PartialEq, Clone, Copy)] +#[derive(PartialEq, Clone, Copy, Debug)] pub enum RpcHealthStatus { Ok, Behind { num_slots: Slot }, // Validator is behind its trusted validators + Unknown, } pub struct RpcHealth { @@ -51,52 +52,61 @@ impl RpcHealth { if self.override_health_check.load(Ordering::Relaxed) { RpcHealthStatus::Ok } else if let Some(trusted_validators) = &self.trusted_validators { - let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = { + match ( + self.cluster_info + .get_accounts_hash_for_node(&self.cluster_info.id(), |hashes| { + hashes + .iter() + .max_by(|a, b| a.0.cmp(&b.0)) + .map(|slot_hash| slot_hash.0) + }) + .flatten(), + trusted_validators + .iter() + .filter_map(|trusted_validator| { + self.cluster_info + .get_accounts_hash_for_node(&trusted_validator, |hashes| { + hashes + .iter() + .max_by(|a, b| a.0.cmp(&b.0)) + .map(|slot_hash| slot_hash.0) + }) + .flatten() + }) + .max(), + ) { ( - self.cluster_info - .get_accounts_hash_for_node(&self.cluster_info.id(), |hashes| { - hashes - .iter() - .max_by(|a, b| a.0.cmp(&b.0)) - .map(|slot_hash| slot_hash.0) - }) - .flatten() - .unwrap_or(0), - trusted_validators - .iter() - .map(|trusted_validator| { - self.cluster_info - .get_accounts_hash_for_node(&trusted_validator, |hashes| { - hashes - .iter() - .max_by(|a, b| a.0.cmp(&b.0)) - .map(|slot_hash| slot_hash.0) - }) - .flatten() - .unwrap_or(0) - }) - .max() - .unwrap_or(0), - ) - }; - - // This validator is considered healthy if its latest account hash slot is within - // `health_check_slot_distance` of the latest trusted validator's account hash slot - if latest_account_hash_slot > 0 - && latest_trusted_validator_account_hash_slot > 0 - && latest_account_hash_slot - > latest_trusted_validator_account_hash_slot - .saturating_sub(self.health_check_slot_distance) - { - RpcHealthStatus::Ok - } else { - let num_slots = latest_trusted_validator_account_hash_slot - .saturating_sub(latest_account_hash_slot); - warn!( - "health check: behind by {} slots: me={}, latest trusted_validator={}", - num_slots, latest_account_hash_slot, latest_trusted_validator_account_hash_slot - ); - RpcHealthStatus::Behind { num_slots } + Some(latest_account_hash_slot), + Some(latest_trusted_validator_account_hash_slot), + ) => { + // The validator is considered healthy if its latest account hash slot is within + // `health_check_slot_distance` of the latest trusted validator's account hash slot + if latest_account_hash_slot + > latest_trusted_validator_account_hash_slot + .saturating_sub(self.health_check_slot_distance) + { + RpcHealthStatus::Ok + } else { + let num_slots = latest_trusted_validator_account_hash_slot + .saturating_sub(latest_account_hash_slot); + warn!( + "health check: behind by {} slots: me={}, latest trusted_validator={}", + num_slots, + latest_account_hash_slot, + latest_trusted_validator_account_hash_slot + ); + RpcHealthStatus::Behind { num_slots } + } + } + (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) => { + if latest_account_hash_slot.is_none() { + warn!("health check: latest_account_hash_slot not available"); + } + if latest_trusted_validator_account_hash_slot.is_none() { + warn!("health check: latest_trusted_validator_account_hash_slot not available"); + } + RpcHealthStatus::Unknown + } } } else { // No trusted validator point of reference available, so this validator is healthy diff --git a/core/src/rpc_pubsub.rs b/core/src/rpc_pubsub.rs index fe5fc2ca3f..ddd59d239d 100644 --- a/core/src/rpc_pubsub.rs +++ b/core/src/rpc_pubsub.rs @@ -25,7 +25,7 @@ use std::{ sync::{atomic, Arc}, }; -const MAX_ACTIVE_SUBSCRIPTIONS: usize = 100_000; +pub const MAX_ACTIVE_SUBSCRIPTIONS: usize = 100_000; // Suppress needless_return due to // https://github.com/paritytech/jsonrpc/blob/2d38e6424d8461cdf72e78425ce67d51af9c6586/derive/src/lib.rs#L204 @@ -210,26 +210,38 @@ pub trait RpcSolPubSub { pub struct RpcSolPubSubImpl { uid: Arc, subscriptions: Arc, + max_active_subscriptions: usize, } impl RpcSolPubSubImpl { - pub fn new(subscriptions: Arc) -> Self { + pub fn new(subscriptions: Arc, max_active_subscriptions: usize) -> Self { let uid = Arc::new(atomic::AtomicUsize::default()); - Self { uid, subscriptions } + Self { + uid, + subscriptions, + max_active_subscriptions, + } } #[cfg(test)] fn default_with_bank_forks(bank_forks: Arc>) -> Self { let uid = Arc::new(atomic::AtomicUsize::default()); let subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks(bank_forks)); - Self { uid, subscriptions } + let max_active_subscriptions = MAX_ACTIVE_SUBSCRIPTIONS; + Self { + uid, + subscriptions, + max_active_subscriptions, + } } fn check_subscription_count(&self) -> Result<()> { let num_subscriptions = self.subscriptions.total(); debug!("Total existing subscriptions: {}", num_subscriptions); - if num_subscriptions >= MAX_ACTIVE_SUBSCRIPTIONS { + if num_subscriptions >= self.max_active_subscriptions { info!("Node subscription limit reached"); + datapoint_info!("rpc-subscription", ("total", num_subscriptions, i64)); + inc_new_counter_info!("rpc-subscription-refused-limit-reached", 1); Err(Error { code: ErrorCode::InternalError, message: "Internal Error: Subscription refused. Node subscription limit reached" @@ -237,6 +249,7 @@ impl RpcSolPubSubImpl { data: None, }) } else { + datapoint_info!("rpc-subscription", ("total", num_subscriptions + 1, i64)); Ok(()) } } @@ -606,9 +619,9 @@ mod tests { rpc_subscriptions::tests::robust_poll_or_panic, }; use crossbeam_channel::unbounded; - use jsonrpc_core::{futures::sync::mpsc, Response}; + use jsonrpc_core::{futures::channel::mpsc, Response}; use jsonrpc_pubsub::{PubSubHandler, Session}; - use serial_test_derive::serial; + use serial_test::serial; use solana_account_decoder::{parse_account_data::parse_account_data, UiAccountEncoding}; use solana_client::rpc_response::{ProcessedSignatureResult, ReceivedSignatureResult}; use solana_runtime::{ @@ -621,6 +634,7 @@ mod tests { }, }; use solana_sdk::{ + account::ReadableAccount, commitment_config::CommitmentConfig, hash::Hash, message::Message, @@ -661,7 +675,7 @@ mod tests { } fn create_session() -> Arc { - Arc::new(Session::new(mpsc::channel(1).0)) + Arc::new(Session::new(mpsc::unbounded().0)) } #[test] @@ -685,6 +699,7 @@ mod tests { OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), )), uid: Arc::new(atomic::AtomicUsize::default()), + max_active_subscriptions: MAX_ACTIVE_SUBSCRIPTIONS, }; // Test signature subscriptions @@ -827,7 +842,7 @@ mod tests { // Test bad parameter let req = r#"{"jsonrpc":"2.0","id":1,"method":"signatureUnsubscribe","params":[1]}"#; let res = io.handle_request_sync(&req, session); - let expected = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid Request: Subscription id does not exist"},"id":1}"#; + let expected = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid subscription id."},"id":1}"#; let expected: Response = serde_json::from_str(&expected).unwrap(); let result: Response = serde_json::from_str(&res.unwrap()).unwrap(); @@ -865,6 +880,7 @@ mod tests { OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), )), uid: Arc::new(atomic::AtomicUsize::default()), + max_active_subscriptions: MAX_ACTIVE_SUBSCRIPTIONS, }; let session = create_session(); let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification"); @@ -901,14 +917,14 @@ mod tests { sleep(Duration::from_millis(200)); // Test signature confirmation notification #1 - let expected_data = bank_forks + let account = bank_forks .read() .unwrap() .get(1) .unwrap() .get_account(&stake_account.pubkey()) - .unwrap() - .data; + .unwrap(); + let expected_data = account.data(); let expected = json!({ "jsonrpc": "2.0", "method": "accountNotification", @@ -980,6 +996,7 @@ mod tests { OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), )), uid: Arc::new(atomic::AtomicUsize::default()), + max_active_subscriptions: MAX_ACTIVE_SUBSCRIPTIONS, }; let session = create_session(); let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification"); @@ -1006,18 +1023,18 @@ mod tests { sleep(Duration::from_millis(200)); // Test signature confirmation notification #1 - let expected_data = bank_forks + let account = bank_forks .read() .unwrap() .get(1) .unwrap() .get_account(&nonce_account.pubkey()) - .unwrap() - .data; + .unwrap(); + let expected_data = account.data(); let expected_data = parse_account_data( &nonce_account.pubkey(), &system_program::id(), - &expected_data, + expected_data, None, ) .unwrap(); @@ -1074,7 +1091,7 @@ mod tests { // Test bad parameter let req = r#"{"jsonrpc":"2.0","id":1,"method":"accountUnsubscribe","params":[1]}"#; let res = io.handle_request_sync(&req, session); - let expected = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid Request: Subscription id does not exist"},"id":1}"#; + let expected = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid subscription id."},"id":1}"#; let expected: Response = serde_json::from_str(&expected).unwrap(); let result: Response = serde_json::from_str(&res.unwrap()).unwrap(); @@ -1335,14 +1352,16 @@ mod tests { }); // Process votes and check they were notified. - let (s, _r) = unbounded(); + let (verified_vote_sender, _verified_vote_receiver) = unbounded(); + let (gossip_verified_vote_hash_sender, _gossip_verified_vote_hash_receiver) = unbounded(); let (_replay_votes_sender, replay_votes_receiver) = unbounded(); ClusterInfoVoteListener::get_and_process_votes_for_tests( &votes_receiver, &vote_tracker, &bank, &rpc.subscriptions, - &s, + &gossip_verified_vote_hash_sender, + &verified_vote_sender, &replay_votes_receiver, ) .unwrap(); diff --git a/core/src/rpc_pubsub_service.rs b/core/src/rpc_pubsub_service.rs index 901e7616b0..8f11997fa0 100644 --- a/core/src/rpc_pubsub_service.rs +++ b/core/src/rpc_pubsub_service.rs @@ -1,7 +1,7 @@ //! The `pubsub` module implements a threaded subscription service on client RPC request use crate::{ - rpc_pubsub::{RpcSolPubSub, RpcSolPubSubImpl}, + rpc_pubsub::{RpcSolPubSub, RpcSolPubSubImpl, MAX_ACTIVE_SUBSCRIPTIONS}, rpc_subscriptions::RpcSubscriptions, }; use jsonrpc_pubsub::{PubSubHandler, Session}; @@ -27,6 +27,7 @@ pub struct PubSubConfig { pub max_fragment_size: usize, pub max_in_buffer_capacity: usize, pub max_out_buffer_capacity: usize, + pub max_active_subscriptions: usize, } impl Default for PubSubConfig { @@ -37,6 +38,7 @@ impl Default for PubSubConfig { max_fragment_size: 50 * 1024, // 50KB max_in_buffer_capacity: 50 * 1024, // 50KB max_out_buffer_capacity: 15 * 1024 * 1024, // max account size (10MB), then 5MB extra for base64 encoding overhead/etc + max_active_subscriptions: MAX_ACTIVE_SUBSCRIPTIONS, } } } @@ -53,22 +55,12 @@ impl PubSubService { exit: &Arc, ) -> Self { info!("rpc_pubsub bound to {:?}", pubsub_addr); - let rpc = RpcSolPubSubImpl::new(subscriptions.clone()); + let rpc = RpcSolPubSubImpl::new( + subscriptions.clone(), + pubsub_config.max_active_subscriptions, + ); let exit_ = exit.clone(); - // TODO: Once https://github.com/paritytech/jsonrpc/pull/594 lands, use - // `ServerBuilder::max_in_buffer_capacity()` and `Server::max_out_buffer_capacity() methods - // instead of only `ServerBuilder::max_payload` - let max_payload = *[ - pubsub_config.max_fragment_size, - pubsub_config.max_in_buffer_capacity, - pubsub_config.max_out_buffer_capacity, - ] - .iter() - .max() - .unwrap(); - info!("rpc_pubsub max_payload: {}", max_payload); - let thread_hdl = Builder::new() .name("solana-pubsub".to_string()) .spawn(move || { @@ -84,7 +76,9 @@ impl PubSubService { session }) .max_connections(pubsub_config.max_connections) - .max_payload(max_payload) + .max_payload(pubsub_config.max_fragment_size) + .max_in_buffer_capacity(pubsub_config.max_in_buffer_capacity) + .max_out_buffer_capacity(pubsub_config.max_out_buffer_capacity) .start(&pubsub_addr); if let Err(e) = server { diff --git a/core/src/rpc_service.rs b/core/src/rpc_service.rs index e5e707193a..7036310ea8 100644 --- a/core/src/rpc_service.rs +++ b/core/src/rpc_service.rs @@ -6,13 +6,13 @@ use crate::{ max_slots::MaxSlots, optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, poh_recorder::PohRecorder, - rpc::*, + rpc::{rpc_full::*, rpc_minimal::*, *}, rpc_health::*, send_transaction_service::{LeaderInfo, SendTransactionService}, validator::ValidatorExit, }; use evm_rpc::*; -use jsonrpc_core::MetaIoHandler; +use jsonrpc_core::{futures::prelude::*, MetaIoHandler}; use jsonrpc_http_server::{ hyper, AccessControlAllowOrigin, CloseHandle, DomainsValidation, RequestMiddleware, RequestMiddlewareAction, ServerBuilder, @@ -36,6 +36,7 @@ use std::{ thread::{self, Builder, JoinHandle}, }; use tokio::runtime; +use tokio_util::codec::{BytesCodec, FramedRead}; const LARGEST_ACCOUNTS_CACHE_DURATION: u64 = 60 * 60 * 2; @@ -46,7 +47,6 @@ pub struct JsonRpcService { pub request_processor: JsonRpcRequestProcessor, // Used only by test_rpc_new()... close_handle: Option, - runtime: runtime::Runtime, } struct RpcRequestMiddleware { @@ -112,10 +112,27 @@ impl RpcRequestMiddleware { } } - fn process_file_get(&self, path: &str) -> RequestMiddlewareAction { - // Stuck on tokio 0.1 until the jsonrpc-http-server crate upgrades to tokio 0.2 - use tokio_01::prelude::*; + #[cfg(unix)] + async fn open_no_follow(path: impl AsRef) -> std::io::Result { + // Stuck on tokio 0.2 until the jsonrpc crates upgrade + use tokio_02::fs::os::unix::OpenOptionsExt; + tokio_02::fs::OpenOptions::new() + .read(true) + .write(false) + .create(false) + .custom_flags(libc::O_NOFOLLOW) + .open(path) + .await + } + + #[cfg(not(unix))] + async fn open_no_follow(path: impl AsRef) -> std::io::Result { + // TODO: Is there any way to achieve the same on Windows? + // Stuck on tokio 0.2 until the jsonrpc crates upgrade + tokio_02::fs::File::open(path).await + } + fn process_file_get(&self, path: &str) -> RequestMiddlewareAction { let stem = path.split_at(1).1; // Drop leading '/' from path let filename = { match path { @@ -139,32 +156,31 @@ impl RpcRequestMiddleware { .unwrap_or(0) .to_string(); info!("get {} -> {:?} ({} bytes)", path, filename, file_length); - RequestMiddlewareAction::Respond { should_validate_hosts: true, - response: Box::new( - tokio_fs_01::file::File::open(filename) - .and_then(|file| { - use tokio_codec_01::{BytesCodec, FramedRead}; - - let stream = FramedRead::new(file, BytesCodec::new()) - .map(tokio_01_bytes::BytesMut::freeze); + response: Box::pin(async { + match Self::open_no_follow(filename).await { + Err(_) => Ok(Self::internal_server_error()), + Ok(file) => { + let stream = + FramedRead::new(file, BytesCodec::new()).map_ok(|b| b.freeze()); let body = hyper::Body::wrap_stream(stream); Ok(hyper::Response::builder() .header(hyper::header::CONTENT_LENGTH, file_length) .body(body) .unwrap()) - }) - .or_else(|_| Ok(RpcRequestMiddleware::not_found())), - ), + } + } + }), } } fn health_check(&self) -> &'static str { let response = match self.health.check() { RpcHealthStatus::Ok => "ok", - RpcHealthStatus::Behind { num_slots: _ } => "behind", + RpcHealthStatus::Behind { .. } => "behind", + RpcHealthStatus::Unknown => "unknown", }; info!("health check: {}", response); response @@ -178,57 +194,41 @@ impl RequestMiddleware for RpcRequestMiddleware { if let Some(ref snapshot_config) = self.snapshot_config { if request.uri().path() == "/snapshot.tar.bz2" { // Convenience redirect to the latest snapshot - return RequestMiddlewareAction::Respond { - should_validate_hosts: true, - response: Box::new(jsonrpc_core::futures::future::ok( - if let Some((snapshot_archive, _)) = - snapshot_utils::get_highest_snapshot_archive_path( - &snapshot_config.snapshot_package_output_path, - ) - { - RpcRequestMiddleware::redirect(&format!( - "/{}", - snapshot_archive - .file_name() - .unwrap_or_else(|| std::ffi::OsStr::new("")) - .to_str() - .unwrap_or(&"") - )) - } else { - RpcRequestMiddleware::not_found() - }, - )), - }; + return if let Some((snapshot_archive, _)) = + snapshot_utils::get_highest_snapshot_archive_path( + &snapshot_config.snapshot_package_output_path, + ) { + RpcRequestMiddleware::redirect(&format!( + "/{}", + snapshot_archive + .file_name() + .unwrap_or_else(|| std::ffi::OsStr::new("")) + .to_str() + .unwrap_or(&"") + )) + } else { + RpcRequestMiddleware::not_found() + } + .into(); } } if let Some(result) = process_rest(&self.bank_forks, request.uri().path()) { - RequestMiddlewareAction::Respond { - should_validate_hosts: true, - response: Box::new(jsonrpc_core::futures::future::ok( - hyper::Response::builder() - .status(hyper::StatusCode::OK) - .body(hyper::Body::from(result)) - .unwrap(), - )), - } + hyper::Response::builder() + .status(hyper::StatusCode::OK) + .body(hyper::Body::from(result)) + .unwrap() + .into() } else if self.is_file_get_path(request.uri().path()) { self.process_file_get(request.uri().path()) } else if request.uri().path() == "/health" { - RequestMiddlewareAction::Respond { - should_validate_hosts: true, - response: Box::new(jsonrpc_core::futures::future::ok( - hyper::Response::builder() - .status(hyper::StatusCode::OK) - .body(hyper::Body::from(self.health_check())) - .unwrap(), - )), - } + hyper::Response::builder() + .status(hyper::StatusCode::OK) + .body(hyper::Body::from(self.health_check())) + .unwrap() + .into() } else { - RequestMiddlewareAction::Proceed { - should_continue_on_invalid_cors: false, - request, - } + request.into() } } } @@ -269,7 +269,7 @@ impl JsonRpcService { poh_recorder: Option>>, genesis_hash: Hash, ledger_path: &Path, - validator_exit: Arc>>, + validator_exit: Arc>, trusted_validators: Option>, override_health_check: Arc, optimistically_confirmed_bank: Arc>, @@ -295,12 +295,13 @@ impl JsonRpcService { ))); let tpu_address = cluster_info.my_contact_info().tpu; - let mut runtime = runtime::Builder::new() - .threaded_scheduler() - .thread_name("rpc-runtime") - .enable_all() - .build() - .expect("Runtime"); + let runtime = Arc::new( + runtime::Builder::new_multi_thread() + .thread_name("rpc-runtime") + .enable_all() + .build() + .expect("Runtime"), + ); let exit_bigtable_ledger_upload_service = Arc::new(AtomicBool::new(false)); @@ -317,7 +318,7 @@ impl JsonRpcService { let bigtable_ledger_upload_service = if config.enable_bigtable_ledger_upload { Some(Arc::new(BigTableUploadService::new( - runtime.handle().clone(), + runtime.clone(), bigtable_ledger_storage.clone(), blockstore.clone(), block_commitment_cache.clone(), @@ -340,6 +341,7 @@ impl JsonRpcService { (None, None) }; + let minimal_api = config.minimal_api; let (request_processor, receiver) = JsonRpcRequestProcessor::new( config, snapshot_config.clone(), @@ -350,7 +352,7 @@ impl JsonRpcService { health.clone(), cluster_info.clone(), genesis_hash, - &runtime, + runtime, bigtable_ledger_storage, optimistically_confirmed_bank, largest_accounts_cache, @@ -382,9 +384,12 @@ impl JsonRpcService { // so that we avoid the single-threaded event loops from being created automatically by // jsonrpc for threads when .threads(N > 1) is given. let event_loop = { - tokio_01::runtime::Builder::new() + // Stuck on tokio 0.2 until the jsonrpc crates upgrade + tokio_02::runtime::Builder::new() .core_threads(rpc_threads) - .name_prefix("sol-rpc-el") + .threaded_scheduler() + .enable_all() + .thread_name("sol-rpc-el") .build() .unwrap() }; @@ -394,12 +399,14 @@ impl JsonRpcService { .name("solana-jsonrpc".to_string()) .spawn(move || { let mut io = MetaIoHandler::default(); - let rpc = RpcSolImpl; - io.extend_with(rpc.to_delegate()); - let ether_basic = super::evm_rpc_impl::BasicErpcImpl; - io.extend_with(ether_basic.to_delegate()); - let chain_mock = super::evm_rpc_impl::ChainMockErpcImpl; - io.extend_with(chain_mock.to_delegate()); + + io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); + if !minimal_api { + io.extend_with(rpc_full::FullImpl.to_delegate()); + } + + io.extend_with(super::evm_rpc_impl::BasicErpcImpl.to_delegate()); + io.extend_with(super::evm_rpc_impl::ChainMockErpcImpl.to_delegate()); let request_middleware = RpcRequestMiddleware::new( ledger_path, @@ -411,7 +418,7 @@ impl JsonRpcService { io, move |_req: &hyper::Request| request_processor.clone(), ) - .event_loop_executor(event_loop.executor()) + .event_loop_executor(event_loop.handle().clone()) .threads(1) .cors(DomainsValidation::AllowOnly(vec![ AccessControlAllowOrigin::Any, @@ -440,14 +447,12 @@ impl JsonRpcService { let close_handle = close_handle_receiver.recv().unwrap(); let close_handle_ = close_handle.clone(); - let mut validator_exit_write = validator_exit.write().unwrap(); - validator_exit_write - .as_mut() + validator_exit + .write() .unwrap() .register_exit(Box::new(move || close_handle_.close())); Self { thread_hdl, - runtime, #[cfg(test)] request_processor: test_request_processor, close_handle: Some(close_handle), @@ -461,7 +466,6 @@ impl JsonRpcService { } pub fn join(self) -> thread::Result<()> { - self.runtime.shutdown_background(); self.thread_hdl.join() } } @@ -479,6 +483,7 @@ mod tests { }; use solana_runtime::{bank::Bank, bank_forks::ArchiveFormat, snapshot_utils::SnapshotVersion}; use solana_sdk::{genesis_config::ClusterType, signature::Signer}; + use std::io::Write; use std::net::{IpAddr, Ipv4Addr}; #[test] @@ -594,23 +599,83 @@ mod tests { assert!(rrm_with_snapshot_config.is_file_get_path( "/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.zst" )); - assert!(!rrm_with_snapshot_config.is_file_get_path( - "../snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.zst" - )); assert!(rrm_with_snapshot_config .is_file_get_path("/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.gz")); assert!(rrm_with_snapshot_config .is_file_get_path("/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar")); - assert!(!rrm.is_file_get_path( + assert!(!rrm_with_snapshot_config.is_file_get_path( "/snapshot-notaslotnumber-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2" )); + assert!(!rrm_with_snapshot_config.is_file_get_path("../../../test/snapshot-123-xxx.tar")); + assert!(!rrm.is_file_get_path("/")); assert!(!rrm.is_file_get_path("..")); assert!(!rrm.is_file_get_path("🎣")); } + #[test] + fn test_process_file_get() { + let mut runtime = tokio_02::runtime::Runtime::new().unwrap(); + + let ledger_path = get_tmp_ledger_path!(); + std::fs::create_dir(&ledger_path).unwrap(); + + let genesis_path = ledger_path.join("genesis.tar.bz2"); + let rrm = RpcRequestMiddleware::new( + ledger_path.clone(), + None, + create_bank_forks(), + RpcHealth::stub(), + ); + + // File does not exist => request should fail. + let action = rrm.process_file_get("/genesis.tar.bz2"); + if let RequestMiddlewareAction::Respond { response, .. } = action { + let response = runtime.block_on(response); + let response = response.unwrap(); + assert_ne!(response.status(), 200); + } else { + panic!("Unexpected RequestMiddlewareAction variant"); + } + + { + let mut file = std::fs::File::create(&genesis_path).unwrap(); + file.write_all(b"should be ok").unwrap(); + } + + // Normal file exist => request should succeed. + let action = rrm.process_file_get("/genesis.tar.bz2"); + if let RequestMiddlewareAction::Respond { response, .. } = action { + let response = runtime.block_on(response); + let response = response.unwrap(); + assert_eq!(response.status(), 200); + } else { + panic!("Unexpected RequestMiddlewareAction variant"); + } + + #[cfg(unix)] + { + std::fs::remove_file(&genesis_path).unwrap(); + { + let mut file = std::fs::File::create(ledger_path.join("wrong")).unwrap(); + file.write_all(b"wrong file").unwrap(); + } + symlink::symlink_file("wrong", &genesis_path).unwrap(); + + // File is a symbolic link => request should fail. + let action = rrm.process_file_get("/genesis.tar.bz2"); + if let RequestMiddlewareAction::Respond { response, .. } = action { + let response = runtime.block_on(response); + let response = response.unwrap(); + assert_ne!(response.status(), 200); + } else { + panic!("Unexpected RequestMiddlewareAction variant"); + } + } + } + #[test] fn test_health_check_with_no_trusted_validators() { let rm = RpcRequestMiddleware::new( @@ -642,18 +707,20 @@ mod tests { let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, create_bank_forks(), health); - // No account hashes for this node or any trusted validators == "behind" - assert_eq!(rm.health_check(), "behind"); + // No account hashes for this node or any trusted validators + assert_eq!(rm.health_check(), "unknown"); - // No account hashes for any trusted validators == "behind" + // No account hashes for any trusted validators cluster_info.push_accounts_hashes(vec![(1000, Hash::default()), (900, Hash::default())]); cluster_info.flush_push_queue(); - assert_eq!(rm.health_check(), "behind"); + assert_eq!(rm.health_check(), "unknown"); + + // Override health check override_health_check.store(true, Ordering::Relaxed); assert_eq!(rm.health_check(), "ok"); override_health_check.store(false, Ordering::Relaxed); - // This node is ahead of the trusted validators == "ok" + // This node is ahead of the trusted validators cluster_info .gossip .write() @@ -673,7 +740,7 @@ mod tests { .unwrap(); assert_eq!(rm.health_check(), "ok"); - // Node is slightly behind the trusted validators == "ok" + // Node is slightly behind the trusted validators cluster_info .gossip .write() @@ -689,7 +756,7 @@ mod tests { .unwrap(); assert_eq!(rm.health_check(), "ok"); - // Node is far behind the trusted validators == "behind" + // Node is far behind the trusted validators cluster_info .gossip .write() diff --git a/core/src/rpc_subscriptions.rs b/core/src/rpc_subscriptions.rs index 852feab960..3415773f09 100644 --- a/core/src/rpc_subscriptions.rs +++ b/core/src/rpc_subscriptions.rs @@ -6,7 +6,6 @@ use crate::{ }; use core::hash::Hash; use evm_rpc::Hex; -use jsonrpc_core::futures::Future; use jsonrpc_pubsub::{ typed::{Sink, Subscriber}, SubscriptionId, @@ -30,7 +29,7 @@ use solana_runtime::{ commitment::{BlockCommitmentCache, CommitmentSlots}, }; use solana_sdk::{ - account::Account, + account::{AccountSharedData, ReadableAccount}, clock::{Slot, UnixTimestamp}, commitment_config::CommitmentConfig, pubkey::Pubkey, @@ -51,9 +50,6 @@ use std::{ time::Duration, }; -// Stuck on tokio 0.1 until the jsonrpc-pubsub crate upgrades to tokio 0.2 -use tokio_01::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor}; - const RECEIVE_DELAY_MILLIS: u64 = 100; trait BankGetTransactionLogsAdapter { @@ -273,20 +269,19 @@ where notified_set } -struct RpcNotifier(TaskExecutor); +struct RpcNotifier; impl RpcNotifier { fn notify(&self, value: T, sink: &Sink) where T: serde::Serialize, { - self.0 - .spawn(sink.notify(Ok(value)).map(|_| ()).map_err(|_| ())); + let _ = sink.notify(Ok(value)); } } fn filter_account_result( - result: Option<(Account, Slot)>, + result: Option<(AccountSharedData, Slot)>, pubkey: &Pubkey, last_notified_slot: Slot, encoding: Option, @@ -304,7 +299,7 @@ fn filter_account_result( Box::new(iter::once(get_parsed_token_account(bank, pubkey, account))) } else { Box::new(iter::once(UiAccount::encode( - pubkey, account, encoding, None, None, + pubkey, &account, encoding, None, None, ))) } } else { @@ -330,7 +325,7 @@ fn filter_signature_result( } fn filter_program_results( - accounts: Vec<(Pubkey, Account)>, + accounts: Vec<(Pubkey, AccountSharedData)>, program_id: &Pubkey, last_notified_slot: Slot, config: Option, @@ -342,8 +337,8 @@ fn filter_program_results( let accounts_is_empty = accounts.is_empty(); let keyed_accounts = accounts.into_iter().filter(move |(_, account)| { filters.iter().all(|filter_type| match filter_type { - RpcFilterType::DataSize(size) => account.data.len() as u64 == *size, - RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data), + RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, + RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), }) }); let accounts: Box> = if program_id == &spl_token_id_v2_0() @@ -355,7 +350,7 @@ fn filter_program_results( Box::new( keyed_accounts.map(move |(pubkey, account)| RpcKeyedAccount { pubkey: pubkey.to_string(), - account: UiAccount::encode(&pubkey, account, encoding.clone(), None, None), + account: UiAccount::encode(&pubkey, &account, encoding, None, None), }), ) }; @@ -432,7 +427,6 @@ pub struct RpcSubscriptions { subscriptions: Subscriptions, notification_sender: Arc>>, t_cleanup: Option>, - notifier_runtime: Option, bank_forks: Arc>, block_commitment_cache: Arc>, optimistically_confirmed_bank: Arc>, @@ -511,13 +505,7 @@ impl RpcSubscriptions { }; let _subscriptions = subscriptions.clone(); - let notifier_runtime = RuntimeBuilder::new() - .core_threads(1) - .name_prefix("solana-rpc-notifier-") - .build() - .unwrap(); - - let notifier = RpcNotifier(notifier_runtime.executor()); + let notifier = RpcNotifier {}; let t_cleanup = Builder::new() .name("solana-rpc-notifications".to_string()) .spawn(move || { @@ -534,7 +522,6 @@ impl RpcSubscriptions { Self { subscriptions, notification_sender, - notifier_runtime: Some(notifier_runtime), t_cleanup: Some(t_cleanup), bank_forks, block_commitment_cache, @@ -1377,12 +1364,6 @@ impl RpcSubscriptions { } fn shutdown(&mut self) -> std::thread::Result<()> { - if let Some(runtime) = self.notifier_runtime.take() { - info!("RPC Notifier runtime - shutting down"); - let _ = runtime.shutdown_now().wait(); - info!("RPC Notifier runtime - shut down"); - } - if self.t_cleanup.is_some() { info!("RPC Notification thread - shutting down"); self.exit.store(true, Ordering::Relaxed); @@ -1402,9 +1383,9 @@ pub(crate) mod tests { use crate::optimistically_confirmed_bank_tracker::{ BankNotification, OptimisticallyConfirmedBank, OptimisticallyConfirmedBankTracker, }; - use jsonrpc_core::futures::{self, stream::Stream}; + use jsonrpc_core::futures::StreamExt; use jsonrpc_pubsub::typed::Subscriber; - use serial_test_derive::serial; + use serial_test::serial; use solana_runtime::{ commitment::BlockCommitment, genesis_utils::{create_genesis_config, GenesisConfigInfo}, @@ -1415,31 +1396,37 @@ pub(crate) mod tests { system_instruction, system_program, system_transaction, transaction::Transaction, }; - use std::{fmt::Debug, sync::mpsc::channel, time::Instant}; - use tokio_01::{prelude::FutureExt, runtime::Runtime, timer::Delay}; + use std::{fmt::Debug, sync::mpsc::channel}; + use tokio::{ + runtime::Runtime, + time::{sleep, timeout}, + }; pub(crate) fn robust_poll_or_panic( - receiver: futures::sync::mpsc::Receiver, - ) -> (T, futures::sync::mpsc::Receiver) { + receiver: jsonrpc_core::futures::channel::mpsc::UnboundedReceiver, + ) -> ( + T, + jsonrpc_core::futures::channel::mpsc::UnboundedReceiver, + ) { let (inner_sender, inner_receiver) = channel(); - let mut rt = Runtime::new().unwrap(); - rt.spawn(futures::lazy(|| { - let recv_timeout = receiver - .into_future() - .timeout(Duration::from_millis(RECEIVE_DELAY_MILLIS)) - .map(move |result| match result { - (Some(value), receiver) => { - inner_sender.send((value, receiver)).expect("send error") - } - (None, _) => panic!("unexpected end of stream"), - }) - .map_err(|err| panic!("stream error {:?}", err)); - - const INITIAL_DELAY_MS: u64 = RECEIVE_DELAY_MILLIS * 2; - Delay::new(Instant::now() + Duration::from_millis(INITIAL_DELAY_MS)) - .and_then(|_| recv_timeout) - .map_err(|err| panic!("timer error {:?}", err)) - })); + let rt = Runtime::new().unwrap(); + rt.spawn(async move { + let result = timeout( + Duration::from_millis(RECEIVE_DELAY_MILLIS), + receiver.into_future(), + ) + .await + .unwrap_or_else(|err| panic!("stream error {:?}", err)); + + match result { + (Some(value), receiver) => { + inner_sender.send((value, receiver)).expect("send error") + } + (None, _) => panic!("unexpected end of stream"), + } + + sleep(Duration::from_millis(RECEIVE_DELAY_MILLIS * 2)).await; + }); inner_receiver.recv().expect("recv error") } diff --git a/core/src/send_transaction_service.rs b/core/src/send_transaction_service.rs index 4de435bbeb..782f197af9 100644 --- a/core/src/send_transaction_service.rs +++ b/core/src/send_transaction_service.rs @@ -321,7 +321,7 @@ mod test { create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs, }; use solana_sdk::{ - account::Account, + account::AccountSharedData, fee_calculator::FeeCalculator, genesis_config::create_genesis_config, nonce, @@ -331,7 +331,7 @@ mod test { system_program, system_transaction, timing::timestamp, }; - use std::sync::mpsc::channel; + use std::sync::{atomic::AtomicBool, mpsc::channel}; #[test] fn service_exit() { @@ -529,7 +529,8 @@ mod test { blockhash: durable_nonce, fee_calculator: FeeCalculator::new(42), })); - let nonce_account = Account::new_data(43, &nonce_state, &system_program::id()).unwrap(); + let nonce_account = + AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); root_bank.store_account(&nonce_address, &nonce_account); let working_bank = Arc::new(Bank::new_from_parent(&root_bank, &Pubkey::default(), 2)); @@ -753,7 +754,8 @@ mod test { blockhash: new_durable_nonce, fee_calculator: FeeCalculator::new(42), })); - let nonce_account = Account::new_data(43, &new_nonce_state, &system_program::id()).unwrap(); + let nonce_account = + AccountSharedData::new_data(43, &new_nonce_state, &system_program::id()).unwrap(); working_bank.store_account(&nonce_address, &nonce_account); let result = SendTransactionService::process_transactions( &working_bank, @@ -799,7 +801,7 @@ mod test { ); let bank = Arc::new(Bank::new(&genesis_config)); - let (poh_recorder, _entry_receiver) = PohRecorder::new( + let (poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, bank.last_blockhash(), 0, @@ -809,6 +811,7 @@ mod test { &Arc::new(blockstore), &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), &Arc::new(PohConfig::default()), + Arc::new(AtomicBool::default()), ); let node_keypair = Arc::new(Keypair::new()); diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 2a5e1b7fd3..4daeb56500 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -264,8 +264,10 @@ mod tests { &hasher, ); assert!(!packet.meta.discard); - - let coding = solana_ledger::shred::Shredder::generate_coding_shreds(1.0f32, &[shred], 1); + let coding = solana_ledger::shred::Shredder::generate_coding_shreds( + &[shred], + false, // is_last_in_slot + ); coding[0].copy_to_packet(&mut packet); ShredFetchStage::process_packet( &mut packet, diff --git a/core/src/test_validator.rs b/core/src/test_validator.rs index a629534814..674a5a3525 100644 --- a/core/src/test_validator.rs +++ b/core/src/test_validator.rs @@ -3,19 +3,21 @@ use { cluster_info::Node, gossip_service::discover_cluster, rpc::JsonRpcConfig, - validator::{Validator, ValidatorConfig}, + validator::{Validator, ValidatorConfig, ValidatorExit, ValidatorStartProgress}, }, solana_client::rpc_client::RpcClient, solana_ledger::{blockstore::create_new_ledger, create_new_tmp_ledger}, + solana_net_utils::PortRange, solana_runtime::{ bank_forks::{ArchiveFormat, SnapshotConfig, SnapshotVersion}, genesis_utils::create_genesis_config_with_leader_ex, hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, }, solana_sdk::{ - account::Account, + account::{Account, AccountSharedData}, clock::{Slot, DEFAULT_MS_PER_SLOT}, commitment_config::CommitmentConfig, + epoch_schedule::EpochSchedule, fee_calculator::{FeeCalculator, FeeRateGovernor}, hash::Hash, native_token::sol_to_lamports, @@ -27,8 +29,8 @@ use { collections::HashMap, fs::remove_dir_all, net::{IpAddr, Ipv4Addr, SocketAddr}, - path::PathBuf, - sync::Arc, + path::{Path, PathBuf}, + sync::{Arc, RwLock}, thread::sleep, time::Duration, }, @@ -41,6 +43,29 @@ pub struct ProgramInfo { pub program_path: PathBuf, } +#[derive(Debug)] +pub struct TestValidatorNodeConfig { + gossip_addr: SocketAddr, + port_range: PortRange, + bind_ip_addr: IpAddr, +} + +impl Default for TestValidatorNodeConfig { + fn default() -> Self { + const MIN_PORT_RANGE: u16 = 1024; + const MAX_PORT_RANGE: u16 = 65535; + + let bind_ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); + let port_range = (MIN_PORT_RANGE, MAX_PORT_RANGE); + + Self { + gossip_addr: socketaddr!("127.0.0.1:0"), + port_range, + bind_ip_addr, + } + } +} + #[derive(Default)] pub struct TestValidatorGenesis { fee_rate_governor: FeeRateGovernor, @@ -50,8 +75,14 @@ pub struct TestValidatorGenesis { rpc_ports: Option<(u16, u16)>, // (JsonRpc, JsonRpcPubSub), None == random ports warp_slot: Option, no_bpf_jit: bool, - accounts: HashMap, + accounts: HashMap, programs: Vec, + epoch_schedule: Option, + node_config: TestValidatorNodeConfig, + pub validator_exit: Arc>, + pub start_progress: Arc>, + pub authorized_voter_keypairs: Arc>>>, + pub max_ledger_shreds: Option, } impl TestValidatorGenesis { @@ -60,11 +91,21 @@ impl TestValidatorGenesis { self } + /// Check if a given TestValidator ledger has already been initialized + pub fn ledger_exists(ledger_path: &Path) -> bool { + ledger_path.join("vote-account-keypair.json").exists() + } + pub fn fee_rate_governor(&mut self, fee_rate_governor: FeeRateGovernor) -> &mut Self { self.fee_rate_governor = fee_rate_governor; self } + pub fn epoch_schedule(&mut self, epoch_schedule: EpochSchedule) -> &mut Self { + self.epoch_schedule = Some(epoch_schedule); + self + } + pub fn rent(&mut self, rent: Rent) -> &mut Self { self.rent = rent; self @@ -95,15 +136,35 @@ impl TestValidatorGenesis { self } + pub fn gossip_host(&mut self, gossip_host: IpAddr) -> &mut Self { + self.node_config.gossip_addr.set_ip(gossip_host); + self + } + + pub fn gossip_port(&mut self, gossip_port: u16) -> &mut Self { + self.node_config.gossip_addr.set_port(gossip_port); + self + } + + pub fn port_range(&mut self, port_range: PortRange) -> &mut Self { + self.node_config.port_range = port_range; + self + } + + pub fn bind_ip_addr(&mut self, bind_ip_addr: IpAddr) -> &mut Self { + self.node_config.bind_ip_addr = bind_ip_addr; + self + } + /// Add an account to the test environment - pub fn add_account(&mut self, address: Pubkey, account: Account) -> &mut Self { + pub fn add_account(&mut self, address: Pubkey, account: AccountSharedData) -> &mut Self { self.accounts.insert(address, account); self } pub fn add_accounts(&mut self, accounts: T) -> &mut Self where - T: IntoIterator, + T: IntoIterator, { for (address, account) in accounts { self.add_account(address, account); @@ -121,7 +182,7 @@ impl TestValidatorGenesis { error!("Failed to fetch {}: {}", address, err); crate::validator::abort(); }); - self.add_account(address, account); + self.add_account(address, AccountSharedData::from(account)); } self } @@ -136,7 +197,7 @@ impl TestValidatorGenesis { ) -> &mut Self { self.add_account( address, - Account { + AccountSharedData::from(Account { lamports, data: solana_program_test::read_file( solana_program_test::find_file(filename).unwrap_or_else(|| { @@ -146,7 +207,7 @@ impl TestValidatorGenesis { owner, executable: false, rent_epoch: 0, - }, + }), ) } @@ -161,14 +222,14 @@ impl TestValidatorGenesis { ) -> &mut Self { self.add_account( address, - Account { + AccountSharedData::from(Account { lamports, data: base64::decode(data_base64) .unwrap_or_else(|err| panic!("Failed to base64 decode: {}", err)), owner, executable: false, rent_epoch: 0, - }, + }), ) } @@ -296,13 +357,13 @@ impl TestValidator { let data = solana_program_test::read_file(&program.program_path); accounts.insert( program.program_id, - Account { + AccountSharedData::from(Account { lamports: Rent::default().minimum_balance(data.len()).min(1), data, owner: program.loader, executable: true, rent_epoch: 0, - }, + }), ); } @@ -319,12 +380,14 @@ impl TestValidator { solana_sdk::genesis_config::ClusterType::Development, accounts.into_iter().collect(), ); - genesis_config.epoch_schedule = solana_sdk::epoch_schedule::EpochSchedule::without_warmup(); + genesis_config.epoch_schedule = config + .epoch_schedule + .unwrap_or_else(EpochSchedule::without_warmup); let ledger_path = match &config.ledger_path { None => create_new_tmp_ledger!(&genesis_config).0, Some(ledger_path) => { - if ledger_path.join("validator-keypair.json").exists() { + if TestValidatorGenesis::ledger_exists(ledger_path) { return Ok(ledger_path.to_path_buf()); } @@ -350,6 +413,10 @@ impl TestValidator { &validator_identity, ledger_path.join("validator-keypair.json").to_str().unwrap(), )?; + + // `ledger_exists` should fail until the vote account keypair is written + assert!(!TestValidatorGenesis::ledger_exists(&ledger_path)); + write_keypair_file( &validator_vote_account, ledger_path @@ -378,7 +445,12 @@ impl TestValidator { .unwrap(), )?; - let mut node = Node::new_localhost_with_pubkey(&validator_identity.pubkey()); + let mut node = Node::new_single_bind( + &validator_identity.pubkey(), + &config.node_config.gossip_addr, + config.node_config.port_range, + config.node_config.bind_ip_addr, + ); if let Some((rpc, rpc_pubsub)) = config.rpc_ports { node.info.rpc = SocketAddr::new(node.info.gossip.ip(), rpc); node.info.rpc_pubsub = SocketAddr::new(node.info.gossip.ip(), rpc_pubsub); @@ -393,6 +465,16 @@ impl TestValidator { let mut rpc_config = config.rpc_config.clone(); rpc_config.identity_pubkey = validator_identity.pubkey(); + { + let mut authorized_voter_keypairs = config.authorized_voter_keypairs.write().unwrap(); + if !authorized_voter_keypairs + .iter() + .any(|x| x.pubkey() == vote_account_address) + { + authorized_voter_keypairs.push(Arc::new(validator_vote_account)) + } + } + let validator_config = ValidatorConfig { rpc_addrs: Some(( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), node.info.rpc.port()), @@ -415,6 +497,10 @@ impl TestValidator { enforce_ulimit_nofile: false, warp_slot: config.warp_slot, bpf_jit: !config.no_bpf_jit, + validator_exit: config.validator_exit.clone(), + rocksdb_compaction_interval: Some(100), // Compact every 100 slots + max_ledger_shreds: config.max_ledger_shreds, + no_wait_for_vote_to_start_leader: true, ..ValidatorConfig::default() }; @@ -422,16 +508,18 @@ impl TestValidator { node, &Arc::new(validator_identity), &ledger_path, - &validator_vote_account.pubkey(), - vec![Arc::new(validator_vote_account)], + &vote_account_address, + config.authorized_voter_keypairs.clone(), vec![], &validator_config, true, // should_check_duplicate_instance + config.start_progress.clone(), )); // Needed to avoid panics in `solana-responder-gossip` in tests that create a number of // test validators concurrently... - discover_cluster(&gossip, 1).expect("TestValidator startup failed"); + discover_cluster(&gossip, 1) + .map_err(|err| format!("TestValidator startup failed: {:?}", err))?; // This is a hack to delay until the fees are non-zero for test consistency // (fees from genesis are zero until the first block with a transaction in it is completed @@ -439,19 +527,24 @@ impl TestValidator { { let rpc_client = RpcClient::new_with_commitment(rpc_url.clone(), CommitmentConfig::processed()); - let fee_rate_governor = rpc_client - .get_fee_rate_governor() - .expect("get_fee_rate_governor") - .value; - if fee_rate_governor.target_lamports_per_signature > 0 { - while rpc_client - .get_recent_blockhash() - .expect("get_recent_blockhash") - .1 - .lamports_per_signature - == 0 - { - sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT)); + + if let Ok(result) = rpc_client.get_fee_rate_governor() { + let fee_rate_governor = result.value; + if fee_rate_governor.target_lamports_per_signature > 0 { + loop { + match rpc_client.get_recent_blockhash() { + Ok((_blockhash, fee_calculator)) => { + if fee_calculator.lamports_per_signature != 0 { + break; + } + } + Err(err) => { + warn!("get_recent_blockhash() failed: {:?}", err); + break; + } + } + sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT)); + } } } } @@ -504,6 +597,12 @@ impl TestValidator { (rpc_client, recent_blockhash, fee_calculator) } + + pub fn join(mut self) { + if let Some(validator) = self.validator.take() { + validator.join(); + } + } } impl Drop for TestValidator { diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 4b463cd364..600fb891e6 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -5,7 +5,10 @@ use crate::{ banking_stage::BankingStage, broadcast_stage::{BroadcastStage, BroadcastStageType, RetransmitSlotsReceiver}, cluster_info::ClusterInfo, - cluster_info_vote_listener::{ClusterInfoVoteListener, VerifiedVoteSender, VoteTracker}, + cluster_info_vote_listener::{ + ClusterInfoVoteListener, GossipDuplicateConfirmedSlotsSender, GossipVerifiedVoteHashSender, + VerifiedVoteSender, VoteTracker, + }, fetch_stage::FetchStage, optimistically_confirmed_bank_tracker::BankNotificationSender, poh_recorder::{PohRecorder, WorkingBankEntry}, @@ -58,10 +61,12 @@ impl Tpu { vote_tracker: Arc, bank_forks: Arc>, verified_vote_sender: VerifiedVoteSender, + gossip_verified_vote_hash_sender: GossipVerifiedVoteHashSender, replay_vote_receiver: ReplayVoteReceiver, replay_vote_sender: ReplayVoteSender, bank_notification_sender: Option, tpu_coalesce_ms: u64, + cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender, ) -> Self { let (packet_sender, packet_receiver) = channel(); let fetch_stage = FetchStage::new_with_sender( @@ -92,9 +97,11 @@ impl Tpu { bank_forks, subscriptions.clone(), verified_vote_sender, + gossip_verified_vote_hash_sender, replay_vote_receiver, blockstore.clone(), bank_notification_sender, + cluster_confirmed_slot_sender, ); let banking_stage = BankingStage::new( diff --git a/core/src/transaction_status_service.rs b/core/src/transaction_status_service.rs index b769ede37d..148648b39f 100644 --- a/core/src/transaction_status_service.rs +++ b/core/src/transaction_status_service.rs @@ -7,7 +7,7 @@ use solana_ledger::{ use solana_runtime::bank::{ Bank, InnerInstructionsList, NonceRollbackInfo, TransactionLogMessages, }; -use solana_transaction_status::{InnerInstructions, TransactionStatusMeta}; +use solana_transaction_status::{InnerInstructions, Reward, TransactionStatusMeta}; use std::{ sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -62,6 +62,7 @@ impl TransactionStatusService { token_balances, inner_instructions, transaction_logs, + rent_debits, }) => { let slot = bank.slot(); let inner_instructions_iter: Box< @@ -86,6 +87,7 @@ impl TransactionStatusService { post_token_balances, inner_instructions, log_messages, + rent_debits, ) in izip!( &transactions, statuses, @@ -94,7 +96,8 @@ impl TransactionStatusService { token_balances.pre_token_balances, token_balances.post_token_balances, inner_instructions_iter, - transaction_logs_iter + transaction_logs_iter, + rent_debits.into_iter(), ) { if Bank::can_commit(&status) && !transaction.signatures.is_empty() { let fee_calculator = nonce_rollback @@ -104,8 +107,9 @@ impl TransactionStatusService { }) .expect("FeeCalculator must exist"); let fee = fee_calculator.calculate_fee(transaction.message()); - let (writable_keys, readonly_keys) = - transaction.message.get_account_keys_by_lock_type(); + let (writable_keys, readonly_keys) = transaction + .message + .get_account_keys_by_lock_type(bank.demote_sysvar_write_locks()); let inner_instructions = inner_instructions.map(|inner_instructions| { inner_instructions @@ -122,6 +126,18 @@ impl TransactionStatusService { let log_messages = Some(log_messages); let pre_token_balances = Some(pre_token_balances); let post_token_balances = Some(post_token_balances); + let rewards = Some( + rent_debits + .0 + .into_iter() + .map(|(pubkey, reward_info)| Reward { + pubkey: pubkey.to_string(), + lamports: reward_info.lamports, + post_balance: reward_info.post_balance, + reward_type: Some(reward_info.reward_type), + }) + .collect(), + ); blockstore .write_transaction_status( @@ -138,6 +154,7 @@ impl TransactionStatusService { log_messages, pre_token_balances, post_token_balances, + rewards, }, ) .expect("Expect database write to succeed"); diff --git a/core/src/tree_diff.rs b/core/src/tree_diff.rs index bad6d31437..358c49d4ed 100644 --- a/core/src/tree_diff.rs +++ b/core/src/tree_diff.rs @@ -1,30 +1,30 @@ -use solana_sdk::clock::Slot; -use std::collections::HashSet; +use std::{collections::HashSet, hash::Hash}; pub trait TreeDiff { - fn children(&self, slot: Slot) -> Option<&[Slot]>; + type TreeKey: Hash + PartialEq + Eq + Copy; + fn children(&self, key: &Self::TreeKey) -> Option<&[Self::TreeKey]>; - fn contains_slot(&self, slot: Slot) -> bool; + fn contains_slot(&self, slot: &Self::TreeKey) -> bool; // Find all nodes reachable from `root1`, excluding subtree at `root2` - fn subtree_diff(&self, root1: Slot, root2: Slot) -> HashSet { - if !self.contains_slot(root1) { + fn subtree_diff(&self, root1: Self::TreeKey, root2: Self::TreeKey) -> HashSet { + if !self.contains_slot(&root1) { return HashSet::new(); } - let mut pending_slots = vec![root1]; + let mut pending_keys = vec![root1]; let mut reachable_set = HashSet::new(); - while !pending_slots.is_empty() { - let current_slot = pending_slots.pop().unwrap(); - if current_slot == root2 { + while !pending_keys.is_empty() { + let current_key = pending_keys.pop().unwrap(); + if current_key == root2 { continue; } - reachable_set.insert(current_slot); for child in self - .children(current_slot) + .children(¤t_key) .expect("slot was discovered earlier, must exist") { - pending_slots.push(*child); + pending_keys.push(*child); } + reachable_set.insert(current_key); } reachable_set diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 6189acd51a..8e05b01e45 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -4,9 +4,12 @@ use crate::{ accounts_hash_verifier::AccountsHashVerifier, broadcast_stage::RetransmitSlotsSender, - cache_block_time_service::CacheBlockTimeSender, + cache_block_meta_service::CacheBlockMetaSender, cluster_info::ClusterInfo, - cluster_info_vote_listener::{VerifiedVoteReceiver, VoteTracker}, + cluster_info_vote_listener::{ + GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, + VerifiedVoteReceiver, VoteTracker, + }, cluster_slots::ClusterSlots, completed_data_sets_service::CompletedDataSetsSender, consensus::Tower, @@ -85,6 +88,7 @@ pub struct TvuConfig { pub use_index_hash_calculation: bool, pub rocksdb_compaction_interval: Option, pub rocksdb_max_compaction_jitter: Option, + pub wait_for_vote_to_start_leader: bool, } impl Tvu { @@ -97,7 +101,7 @@ impl Tvu { #[allow(clippy::new_ret_no_self, clippy::too_many_arguments)] pub fn new( vote_account: &Pubkey, - authorized_voter_keypairs: Vec>, + authorized_voter_keypairs: Arc>>>, bank_forks: &Arc>, cluster_info: &Arc, sockets: Sockets, @@ -113,15 +117,17 @@ impl Tvu { cfg: Option>, transaction_status_sender: Option, rewards_recorder_sender: Option, - cache_block_time_sender: Option, + cache_block_meta_sender: Option, evm_block_recorder_sender: Option, snapshot_config_and_pending_package: Option<(SnapshotConfig, PendingSnapshotPackage)>, vote_tracker: Arc, retransmit_slots_sender: RetransmitSlotsSender, + gossip_verified_vote_hash_receiver: GossipVerifiedVoteHashReceiver, verified_vote_receiver: VerifiedVoteReceiver, replay_vote_sender: ReplayVoteSender, completed_data_sets_sender: CompletedDataSetsSender, bank_notification_sender: Option, + gossip_confirmed_slots_receiver: GossipDuplicateConfirmedSlotsReceiver, tvu_config: TvuConfig, max_slots: &Arc, ) -> Self { @@ -161,6 +167,7 @@ impl Tvu { let (duplicate_slots_reset_sender, duplicate_slots_reset_receiver) = unbounded(); let compaction_interval = tvu_config.rocksdb_compaction_interval; let max_compaction_jitter = tvu_config.rocksdb_max_compaction_jitter; + let (duplicate_slots_sender, duplicate_slots_receiver) = unbounded(); let retransmit_stage = RetransmitStage::new( bank_forks.clone(), leader_schedule_cache, @@ -181,6 +188,7 @@ impl Tvu { completed_data_sets_sender, max_slots, Some(subscriptions.clone()), + duplicate_slots_sender, ); let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = channel(); @@ -254,9 +262,10 @@ impl Tvu { block_commitment_cache, transaction_status_sender, rewards_recorder_sender, - cache_block_time_sender, + cache_block_meta_sender, evm_block_recorder_sender, bank_notification_sender, + wait_for_vote_to_start_leader: tvu_config.wait_for_vote_to_start_leader, }; let replay_stage = ReplayStage::new( @@ -265,6 +274,7 @@ impl Tvu { bank_forks.clone(), cluster_info.clone(), ledger_signal_receiver, + duplicate_slots_receiver, poh_recorder.clone(), tower, vote_tracker, @@ -272,6 +282,8 @@ impl Tvu { retransmit_slots_sender, duplicate_slots_reset_receiver, replay_vote_sender, + gossip_confirmed_slots_receiver, + gossip_verified_vote_hash_receiver, ); let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| { @@ -327,7 +339,7 @@ pub mod tests { cluster_info::{ClusterInfo, Node}, optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, }; - use serial_test_derive::serial; + use serial_test::serial; use solana_ledger::{ blockstore::BlockstoreSignals, create_new_tmp_ledger, @@ -371,14 +383,16 @@ pub mod tests { let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let (retransmit_slots_sender, _retransmit_slots_receiver) = unbounded(); + let (_gossip_verified_vote_hash_sender, gossip_verified_vote_hash_receiver) = unbounded(); let (_verified_vote_sender, verified_vote_receiver) = unbounded(); let (replay_vote_sender, _replay_vote_receiver) = unbounded(); let (completed_data_sets_sender, _completed_data_sets_receiver) = unbounded(); + let (_, gossip_confirmed_slots_receiver) = unbounded(); let bank_forks = Arc::new(RwLock::new(bank_forks)); let tower = Tower::new_with_key(&target1_keypair.pubkey()); let tvu = Tvu::new( &vote_keypair.pubkey(), - vec![Arc::new(vote_keypair)], + Arc::new(RwLock::new(vec![Arc::new(vote_keypair)])), &bank_forks, &cref1, { @@ -411,10 +425,12 @@ pub mod tests { None, Arc::new(VoteTracker::new(&bank)), retransmit_slots_sender, + gossip_verified_vote_hash_receiver, verified_vote_receiver, replay_vote_sender, completed_data_sets_sender, None, + gossip_confirmed_slots_receiver, TvuConfig::default(), &Arc::new(MaxSlots::default()), ); diff --git a/core/src/unfrozen_gossip_verified_vote_hashes.rs b/core/src/unfrozen_gossip_verified_vote_hashes.rs new file mode 100644 index 0000000000..4640e01e72 --- /dev/null +++ b/core/src/unfrozen_gossip_verified_vote_hashes.rs @@ -0,0 +1,132 @@ +use crate::latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks; +use solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey}; +use std::collections::{BTreeMap, HashMap}; + +#[derive(Default)] +pub(crate) struct UnfrozenGossipVerifiedVoteHashes { + pub votes_per_slot: BTreeMap>>, +} + +impl UnfrozenGossipVerifiedVoteHashes { + // Update `latest_validator_votes_for_frozen_banks` if gossip has seen a newer vote + // for a frozen bank. + #[allow(dead_code)] + pub(crate) fn add_vote( + &mut self, + pubkey: Pubkey, + vote_slot: Slot, + hash: Hash, + is_frozen: bool, + latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, + ) { + // If this is a frozen bank, then we need to update the `latest_validator_votes_for_frozen_banks` + let frozen_hash = if is_frozen { Some(hash) } else { None }; + let (was_added, latest_frozen_vote_slot) = latest_validator_votes_for_frozen_banks + .check_add_vote(pubkey, vote_slot, frozen_hash, false); + + if !was_added + && latest_frozen_vote_slot + .map(|latest_frozen_vote_slot| vote_slot >= latest_frozen_vote_slot) + // If there's no latest frozen vote slot yet, then we should also insert + .unwrap_or(true) + { + // At this point it must be that: + // 1) `vote_slot` was not yet frozen + // 2) and `vote_slot` >= than the latest frozen vote slot. + + // Thus we want to record this vote for later, in case a slot with this `vote_slot` + hash gets + // frozen later + self.votes_per_slot + .entry(vote_slot) + .or_default() + .entry(hash) + .or_default() + .push(pubkey); + } + } + + // Cleanup `votes_per_slot` based on new roots + pub(crate) fn set_root(&mut self, new_root: Slot) { + let mut slots_ge_root = self.votes_per_slot.split_off(&new_root); + // `self.votes_per_slot` now only contains entries >= `new_root` + std::mem::swap(&mut self.votes_per_slot, &mut slots_ge_root); + } + + pub(crate) fn remove_slot_hash(&mut self, slot: Slot, hash: &Hash) -> Option> { + self.votes_per_slot.get_mut(&slot).and_then(|slot_hashes| { + slot_hashes.remove(hash) + // If `slot_hashes` becomes empty, it'll be removed by `set_root()` later + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_unfrozen_gossip_verified_vote_hashes_add_vote() { + let mut unfrozen_gossip_verified_vote_hashes = UnfrozenGossipVerifiedVoteHashes::default(); + let mut latest_validator_votes_for_frozen_banks = + LatestValidatorVotesForFrozenBanks::default(); + let num_validators = 10; + let validator_keys: Vec = std::iter::repeat_with(Pubkey::new_unique) + .take(num_validators) + .collect(); + + // Case 1: Frozen banks shouldn't be added + let frozen_vote_slot = 1; + let num_repeated_iterations = 10; + for _ in 0..num_repeated_iterations { + let hash = Hash::new_unique(); + let is_frozen = true; + for vote_pubkey in validator_keys.iter() { + unfrozen_gossip_verified_vote_hashes.add_vote( + *vote_pubkey, + frozen_vote_slot, + hash, + is_frozen, + &mut latest_validator_votes_for_frozen_banks, + ); + } + + assert!(unfrozen_gossip_verified_vote_hashes + .votes_per_slot + .is_empty()); + } + + // Case 2: Other >= non-frozen banks should be added in case they're frozen later + for unfrozen_vote_slot in &[frozen_vote_slot - 1, frozen_vote_slot, frozen_vote_slot + 1] { + // If the vote slot is smaller than the latest known frozen `vote_slot` + // for each pubkey (which was added above), then they shouldn't be added + let num_duplicate_hashes = 10; + for _ in 0..num_duplicate_hashes { + let hash = Hash::new_unique(); + let is_frozen = false; + for vote_pubkey in validator_keys.iter() { + unfrozen_gossip_verified_vote_hashes.add_vote( + *vote_pubkey, + *unfrozen_vote_slot, + hash, + is_frozen, + &mut latest_validator_votes_for_frozen_banks, + ); + } + } + if *unfrozen_vote_slot >= frozen_vote_slot { + let vote_hashes_map = unfrozen_gossip_verified_vote_hashes + .votes_per_slot + .get(&unfrozen_vote_slot) + .unwrap(); + assert_eq!(vote_hashes_map.len(), num_duplicate_hashes); + for pubkey_votes in vote_hashes_map.values() { + assert_eq!(*pubkey_votes, validator_keys); + } + } else { + assert!(unfrozen_gossip_verified_vote_hashes + .votes_per_slot + .is_empty()); + } + } + } +} diff --git a/core/src/validator.rs b/core/src/validator.rs index af8e4e633a..13a0ed49f9 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -2,7 +2,7 @@ use crate::{ broadcast_stage::BroadcastStageType, - cache_block_time_service::{CacheBlockTimeSender, CacheBlockTimeService}, + cache_block_meta_service::{CacheBlockMetaSender, CacheBlockMetaService}, cluster_info::{ ClusterInfo, Node, DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS, DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS, @@ -47,7 +47,7 @@ use solana_ledger::{ use solana_measure::measure::Measure; use solana_metrics::datapoint_info; use solana_runtime::{ - accounts_index::AccountIndex, + accounts_index::AccountSecondaryIndexes, bank::Bank, bank_forks::{BankForks, SnapshotConfig}, commitment::BlockCommitmentCache, @@ -67,20 +67,21 @@ use solana_vote_program::vote_state::VoteState; use std::time::Instant; use std::{ collections::HashSet, + fmt, net::SocketAddr, ops::Deref, path::{Path, PathBuf}, sync::atomic::{AtomicBool, AtomicU64, Ordering}, sync::mpsc::Receiver, sync::{Arc, Mutex, RwLock}, - thread::sleep, + thread::{sleep, Builder}, time::Duration, }; const MAX_COMPLETED_DATA_SETS_IN_CHANNEL: usize = 100_000; const WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = 90; -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct ValidatorConfig { pub dev_halt_at_slot: Option, pub expected_genesis_hash: Option, @@ -115,6 +116,7 @@ pub struct ValidatorConfig { pub poh_verify: bool, // Perform PoH verification during blockstore processing at boo pub cuda: bool, pub require_tower: bool, + pub tower_path: Option, pub debug_keys: Option>>, pub contact_debug_interval: u64, pub contact_save_interval: u64, @@ -124,12 +126,14 @@ pub struct ValidatorConfig { pub no_poh_speed_test: bool, pub poh_pinned_cpu_core: usize, pub poh_hashes_per_batch: u64, - pub account_indexes: HashSet, + pub account_indexes: AccountSecondaryIndexes, pub accounts_db_caching_enabled: bool, pub warp_slot: Option, pub accounts_db_test_hash_calculation: bool, pub accounts_db_use_index_hash_calculation: bool, pub tpu_coalesce_ms: u64, + pub validator_exit: Arc>, + pub no_wait_for_vote_to_start_leader: bool, } impl Default for ValidatorConfig { @@ -159,7 +163,7 @@ impl Default for ValidatorConfig { halt_on_trusted_validators_accounts_hash_mismatch: false, accounts_hash_fault_injection_slots: 0, frozen_accounts: vec![], - no_rocksdb_compaction: false, + no_rocksdb_compaction: true, rocksdb_compaction_interval: None, rocksdb_max_compaction_jitter: None, accounts_hash_interval_slots: std::u64::MAX, @@ -168,6 +172,7 @@ impl Default for ValidatorConfig { poh_verify: true, cuda: false, require_tower: false, + tower_path: None, debug_keys: None, contact_debug_interval: DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS, contact_save_interval: DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS, @@ -177,33 +182,73 @@ impl Default for ValidatorConfig { no_poh_speed_test: true, poh_pinned_cpu_core: poh_service::DEFAULT_PINNED_CPU_CORE, poh_hashes_per_batch: poh_service::DEFAULT_HASHES_PER_BATCH, - account_indexes: HashSet::new(), + account_indexes: AccountSecondaryIndexes::default(), accounts_db_caching_enabled: false, warp_slot: None, accounts_db_test_hash_calculation: false, accounts_db_use_index_hash_calculation: true, tpu_coalesce_ms: DEFAULT_TPU_COALESCE_MS, + validator_exit: Arc::new(RwLock::new(ValidatorExit::default())), + no_wait_for_vote_to_start_leader: true, } } } +// `ValidatorStartProgress` contains status information that is surfaced to the node operator over +// the admin RPC channel to help them to follow the general progress of node startup without +// having to watch log messages. +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)] +pub enum ValidatorStartProgress { + Initializing, // Catch all, default state + SearchingForRpcService, + DownloadingSnapshot { slot: Slot, rpc_addr: SocketAddr }, + CleaningBlockStore, + CleaningAccounts, + LoadingLedger, + StartingServices, + Halted, // Validator halted due to `--dev-halt-at-slot` argument + WaitingForSupermajority, + + // `Running` is the terminal state once the validator fully starts and all services are + // operational + Running, +} + +impl Default for ValidatorStartProgress { + fn default() -> Self { + Self::Initializing + } +} + #[derive(Default)] pub struct ValidatorExit { + exited: bool, exits: Vec>, } impl ValidatorExit { pub fn register_exit(&mut self, exit: Box) { - self.exits.push(exit); + if self.exited { + exit(); + } else { + self.exits.push(exit); + } } - pub fn exit(self) { - for exit in self.exits { + pub fn exit(&mut self) { + self.exited = true; + for exit in self.exits.drain(..) { exit(); } } } +impl fmt::Debug for ValidatorExit { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{} exits", self.exits.len()) + } +} + #[derive(Default)] struct TransactionHistoryServices { transaction_status_sender: Option, @@ -211,31 +256,20 @@ struct TransactionHistoryServices { max_complete_transaction_status_slot: Arc, rewards_recorder_sender: Option, rewards_recorder_service: Option, - cache_block_time_sender: Option, - cache_block_time_service: Option, + cache_block_meta_sender: Option, + cache_block_meta_service: Option, evm_block_recorder_sender: Option, evm_block_recorder_service: Option, - // TODO: - // 1. EvmBlock header - // 2. EvmTransaction Statuses (tx receip by hash) - // 3. EvmTransaction Trace api - // 4. Add link SolanaTx -> EvmTx - // 5. link from EvmTx back to SolanaTx? -} - -struct RpcServices { - json_rpc_service: JsonRpcService, - pubsub_service: PubSubService, - optimistically_confirmed_bank_tracker: OptimisticallyConfirmedBankTracker, } pub struct Validator { - pub id: Pubkey, - validator_exit: Arc>>, - rpc_service: Option, + validator_exit: Arc>, + json_rpc_service: Option, + pubsub_service: Option, + optimistically_confirmed_bank_tracker: Option, transaction_status_service: Option, rewards_recorder_service: Option, - cache_block_time_service: Option, + cache_block_meta_service: Option, sample_performance_service: Option, evm_block_recorder_service: Option, gossip_service: GossipService, @@ -246,7 +280,7 @@ pub struct Validator { poh_service: PohService, tpu: Tpu, tvu: Tvu, - ip_echo_server: solana_net_utils::IpEchoServer, + ip_echo_server: Option, } // in the distant future, get rid of ::new()/exit() and use Result properly... @@ -269,10 +303,11 @@ impl Validator { identity_keypair: &Arc, ledger_path: &Path, vote_account: &Pubkey, - mut authorized_voter_keypairs: Vec>, + mut authorized_voter_keypairs: Arc>>>, cluster_entrypoints: Vec, config: &ValidatorConfig, should_check_duplicate_instance: bool, + start_progress: Arc>, ) -> Self { let id = identity_keypair.pubkey(); assert_eq!(id, node.info.id); @@ -282,12 +317,13 @@ impl Validator { if config.voting_disabled { warn!("voting disabled"); - authorized_voter_keypairs.clear(); + authorized_voter_keypairs.write().unwrap().clear(); } else { - for authorized_voter_keypair in &authorized_voter_keypairs { + for authorized_voter_keypair in authorized_voter_keypairs.read().unwrap().iter() { warn!("authorized voter: {}", authorized_voter_keypair.pubkey()); } } + report_target_features(); for cluster_entrypoint in &cluster_entrypoints { @@ -312,6 +348,7 @@ impl Validator { if let Some(shred_version) = config.expected_shred_version { if let Some(wait_for_supermajority_slot) = config.wait_for_supermajority { + *start_progress.write().unwrap() = ValidatorStartProgress::CleaningBlockStore; backup_and_clear_blockstore( ledger_path, wait_for_supermajority_slot + 1, @@ -321,6 +358,7 @@ impl Validator { } info!("Cleaning accounts paths.."); + *start_progress.write().unwrap() = ValidatorStartProgress::CleaningAccounts; let mut start = Measure::start("clean_accounts_paths"); for accounts_path in &config.account_paths { cleanup_accounts_path(accounts_path); @@ -333,11 +371,15 @@ impl Validator { start.stop(); info!("done. {}", start); - let mut validator_exit = ValidatorExit::default(); let exit = Arc::new(AtomicBool::new(false)); - let exit_ = exit.clone(); - validator_exit.register_exit(Box::new(move || exit_.store(true, Ordering::Relaxed))); - let validator_exit = Arc::new(RwLock::new(Some(validator_exit))); + { + let exit = exit.clone(); + config + .validator_exit + .write() + .unwrap() + .register_exit(Box::new(move || exit.store(true, Ordering::Relaxed))); + } let (replay_vote_sender, replay_vote_receiver) = unbounded(); let ( @@ -354,8 +396,8 @@ impl Validator { max_complete_transaction_status_slot, rewards_recorder_sender, rewards_recorder_service, - cache_block_time_sender, - cache_block_time_service, + cache_block_meta_sender, + cache_block_meta_service, evm_block_recorder_sender, evm_block_recorder_service, }, @@ -368,8 +410,12 @@ impl Validator { config.poh_verify, &exit, config.enforce_ulimit_nofile, + &start_progress, + config.no_poh_speed_test, ); + *start_progress.write().unwrap() = ValidatorStartProgress::StartingServices; + let leader_schedule_cache = Arc::new(leader_schedule_cache); let bank = bank_forks.working_bank(); if let Some(ref shrink_paths) = config.account_shrink_paths { @@ -455,33 +501,38 @@ impl Validator { ); let poh_config = Arc::new(genesis_config.poh_config.clone()); - let (mut poh_recorder, entry_receiver) = PohRecorder::new_with_clear_signal( - bank.tick_height(), - bank.last_blockhash(), - bank.slot(), - leader_schedule_cache.next_leader_slot( - &id, + let (mut poh_recorder, entry_receiver, record_receiver) = + PohRecorder::new_with_clear_signal( + bank.tick_height(), + bank.last_blockhash(), bank.slot(), - &bank, - Some(&blockstore), - GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS, - ), - bank.ticks_per_slot(), - &id, - &blockstore, - blockstore.new_shreds_signals.first().cloned(), - &leader_schedule_cache, - &poh_config, - ); + leader_schedule_cache.next_leader_slot( + &id, + bank.slot(), + &bank, + Some(&blockstore), + GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS, + ), + bank.ticks_per_slot(), + &id, + &blockstore, + blockstore.new_shreds_signals.first().cloned(), + &leader_schedule_cache, + &poh_config, + exit.clone(), + ); if config.snapshot_config.is_some() { poh_recorder.set_bank(&bank); } let poh_recorder = Arc::new(Mutex::new(poh_recorder)); let rpc_override_health_check = Arc::new(AtomicBool::new(false)); - let (rpc_service, bank_notification_sender) = if let Some((rpc_addr, rpc_pubsub_addr)) = - config.rpc_addrs - { + let ( + json_rpc_service, + pubsub_service, + optimistically_confirmed_bank_tracker, + bank_notification_sender, + ) = if let Some((rpc_addr, rpc_pubsub_addr)) = config.rpc_addrs { if ContactInfo::is_valid_address(&node.info.rpc) { assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub)); } else { @@ -489,46 +540,48 @@ impl Validator { } let (bank_notification_sender, bank_notification_receiver) = unbounded(); ( - Some(RpcServices { - json_rpc_service: JsonRpcService::new( - rpc_addr, - config.rpc_config.clone(), - config.snapshot_config.clone(), - bank_forks.clone(), - block_commitment_cache.clone(), - blockstore.clone(), - cluster_info.clone(), - Some(poh_recorder.clone()), - genesis_config.hash(), - ledger_path, - validator_exit.clone(), - config.trusted_validators.clone(), - rpc_override_health_check.clone(), - optimistically_confirmed_bank.clone(), - config.send_transaction_retry_ms, - config.send_transaction_leader_forward_count, - max_slots.clone(), - leader_schedule_cache.clone(), - max_complete_transaction_status_slot, - ), - pubsub_service: PubSubService::new( + Some(JsonRpcService::new( + rpc_addr, + config.rpc_config.clone(), + config.snapshot_config.clone(), + bank_forks.clone(), + block_commitment_cache.clone(), + blockstore.clone(), + cluster_info.clone(), + Some(poh_recorder.clone()), + genesis_config.hash(), + ledger_path, + config.validator_exit.clone(), + config.trusted_validators.clone(), + rpc_override_health_check.clone(), + optimistically_confirmed_bank.clone(), + config.send_transaction_retry_ms, + config.send_transaction_leader_forward_count, + max_slots.clone(), + leader_schedule_cache.clone(), + max_complete_transaction_status_slot, + )), + if config.rpc_config.minimal_api { + None + } else { + Some(PubSubService::new( config.pubsub_config.clone(), &subscriptions, rpc_pubsub_addr, &exit, - ), - optimistically_confirmed_bank_tracker: OptimisticallyConfirmedBankTracker::new( - bank_notification_receiver, - &exit, - bank_forks.clone(), - optimistically_confirmed_bank, - subscriptions.clone(), - ), - }), + )) + }, + Some(OptimisticallyConfirmedBankTracker::new( + bank_notification_receiver, + &exit, + bank_forks.clone(), + optimistically_confirmed_bank, + subscriptions.clone(), + )), Some(bank_notification_sender), ) } else { - (None, None) + (None, None, None, None) }; if config.dev_halt_at_slot.is_some() { @@ -541,10 +594,11 @@ impl Validator { // Park with the RPC service running, ready for inspection! warn!("Validator halted"); + *start_progress.write().unwrap() = ValidatorStartProgress::Halted; std::thread::park(); } - let ip_echo_server = solana_net_utils::ip_echo_server(node.sockets.ip_echo.unwrap()); + let ip_echo_server = node.sockets.ip_echo.map(solana_net_utils::ip_echo_server); let gossip_service = GossipService::new( &cluster_info, @@ -588,13 +642,20 @@ impl Validator { (None, None) }; - if !config.no_poh_speed_test { - check_poh_speed(&genesis_config, None); - } - - if wait_for_supermajority(config, &bank, &cluster_info, rpc_override_health_check) { + let waited_for_supermajority = if let Ok(waited) = wait_for_supermajority( + config, + &bank, + &cluster_info, + rpc_override_health_check, + &start_progress, + ) { + waited + } else { abort(); - } + }; + + let wait_for_vote_to_start_leader = + !waited_for_supermajority && !config.no_wait_for_vote_to_start_leader; let poh_service = PohService::new( poh_recorder.clone(), @@ -603,6 +664,7 @@ impl Validator { bank.ticks_per_slot(), config.poh_pinned_cpu_core, config.poh_hashes_per_batch, + record_receiver, ); assert_eq!( blockstore.new_shreds_signals.len(), @@ -616,6 +678,8 @@ impl Validator { let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded(); let (verified_vote_sender, verified_vote_receiver) = unbounded(); + let (gossip_verified_vote_hash_sender, gossip_verified_vote_hash_receiver) = unbounded(); + let (cluster_confirmed_slot_sender, cluster_confirmed_slot_receiver) = unbounded(); let tvu = Tvu::new( vote_account, authorized_voter_keypairs, @@ -658,15 +722,17 @@ impl Validator { config.enable_partition.clone(), transaction_status_sender.clone(), rewards_recorder_sender, - cache_block_time_sender, + cache_block_meta_sender, evm_block_recorder_sender, snapshot_config_and_pending_package, vote_tracker.clone(), retransmit_slots_sender, + gossip_verified_vote_hash_receiver, verified_vote_receiver, replay_vote_sender.clone(), completed_data_sets_sender, bank_notification_sender.clone(), + cluster_confirmed_slot_receiver, TvuConfig { max_ledger_shreds: config.max_ledger_shreds, halt_on_trusted_validators_accounts_hash_mismatch: config @@ -680,6 +746,7 @@ impl Validator { use_index_hash_calculation: config.accounts_db_use_index_hash_calculation, rocksdb_compaction_interval: config.rocksdb_compaction_interval, rocksdb_max_compaction_jitter: config.rocksdb_compaction_interval, + wait_for_vote_to_start_leader, }, &max_slots, ); @@ -701,39 +768,41 @@ impl Validator { vote_tracker, bank_forks, verified_vote_sender, + gossip_verified_vote_hash_sender, replay_vote_receiver, replay_vote_sender, bank_notification_sender, config.tpu_coalesce_ms, + cluster_confirmed_slot_sender, ); datapoint_info!("validator-new", ("id", id.to_string(), String)); + *start_progress.write().unwrap() = ValidatorStartProgress::Running; Self { - id, - validator_exit, - rpc_service, + gossip_service, + serve_repair_service, + json_rpc_service, + pubsub_service, + optimistically_confirmed_bank_tracker, transaction_status_service, rewards_recorder_service, - cache_block_time_service, + cache_block_meta_service, sample_performance_service, - evm_block_recorder_service, - gossip_service, - serve_repair_service, - completed_data_sets_service, snapshot_packager_service, - poh_recorder, - poh_service, + completed_data_sets_service, + evm_block_recorder_service, tpu, tvu, + poh_recorder, + poh_service, ip_echo_server, + validator_exit: config.validator_exit.clone(), } } // Used for notifying many nodes in parallel to exit pub fn exit(&mut self) { - if let Some(x) = self.validator_exit.write().unwrap().take() { - x.exit() - } + self.validator_exit.write().unwrap().exit(); } pub fn close(mut self) { @@ -769,18 +838,23 @@ impl Validator { pub fn join(self) { self.poh_service.join().expect("poh_service"); drop(self.poh_recorder); - if let Some(RpcServices { - json_rpc_service, - pubsub_service, - optimistically_confirmed_bank_tracker, - }) = self.rpc_service - { + + if let Some(json_rpc_service) = self.json_rpc_service { json_rpc_service.join().expect("rpc_service"); + } + + if let Some(pubsub_service) = self.pubsub_service { pubsub_service.join().expect("pubsub_service"); + } + + if let Some(optimistically_confirmed_bank_tracker) = + self.optimistically_confirmed_bank_tracker + { optimistically_confirmed_bank_tracker .join() .expect("optimistically_confirmed_bank_tracker"); } + if let Some(transaction_status_service) = self.transaction_status_service { transaction_status_service .join() @@ -793,10 +867,10 @@ impl Validator { .expect("rewards_recorder_service"); } - if let Some(cache_block_time_service) = self.cache_block_time_service { - cache_block_time_service + if let Some(cache_block_meta_service) = self.cache_block_meta_service { + cache_block_meta_service .join() - .expect("cache_block_time_service"); + .expect("cache_block_meta_service"); } if let Some(sample_performance_service) = self.sample_performance_service { @@ -824,13 +898,15 @@ impl Validator { self.completed_data_sets_service .join() .expect("completed_data_sets_service"); - self.ip_echo_server.shutdown_background(); + if let Some(ip_echo_server) = self.ip_echo_server { + ip_echo_server.shutdown_background(); + } } } fn active_vote_account_exists_in_bank(bank: &Arc, vote_account: &Pubkey) -> bool { if let Some(account) = &bank.get_account(vote_account) { - if let Some(vote_state) = VoteState::from(&account) { + if let Some(vote_state) = VoteState::from(account) { return !vote_state.votes.is_empty(); } } @@ -871,7 +947,7 @@ fn post_process_restored_tower( validator_identity: &Pubkey, vote_account: &Pubkey, config: &ValidatorConfig, - ledger_path: &Path, + tower_path: &Path, bank_forks: &BankForks, ) -> Tower { let mut should_require_tower = config.require_tower; @@ -950,7 +1026,7 @@ fn post_process_restored_tower( Tower::new_from_bankforks( &bank_forks, - &ledger_path, + tower_path, &validator_identity, &vote_account, ) @@ -966,6 +1042,8 @@ fn new_banks_from_ledger( poh_verify: bool, exit: &Arc, enforce_ulimit_nofile: bool, + start_progress: &Arc>, + no_poh_speed_test: bool, ) -> ( GenesisConfig, BankForks, @@ -978,6 +1056,7 @@ fn new_banks_from_ledger( Tower, ) { info!("loading ledger from {:?}...", ledger_path); + *start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger; let genesis_config = open_genesis_config(ledger_path, config.max_genesis_archive_unpacked_size); // This needs to be limited otherwise the state in the VoteAccount data @@ -998,6 +1077,10 @@ fn new_banks_from_ledger( } } + if !no_poh_speed_test { + check_poh_speed(&genesis_config, None); + } + let BlockstoreSignals { mut blockstore, ledger_signal_receiver, @@ -1011,7 +1094,9 @@ fn new_banks_from_ledger( .expect("Failed to open ledger database"); blockstore.set_no_compaction(config.no_rocksdb_compaction); - let restored_tower = Tower::restore(ledger_path, &validator_identity); + let tower_path = config.tower_path.as_deref().unwrap_or(ledger_path); + + let restored_tower = Tower::restore(tower_path, &validator_identity); if let Ok(tower) = &restored_tower { reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap_or_else(|err| { error!("Failed to reconcile blockstore with tower: {:?}", err); @@ -1019,6 +1104,23 @@ fn new_banks_from_ledger( }); } + let blockstore = Arc::new(blockstore); + let blockstore_root_scan = if config.rpc_addrs.is_some() + && config.rpc_config.enable_rpc_transaction_history + && config.rpc_config.rpc_scan_and_fix_roots + { + let blockstore = blockstore.clone(); + let exit = exit.clone(); + Some( + Builder::new() + .name("blockstore-root-scan".to_string()) + .spawn(move || blockstore.scan_and_fix_roots(&exit)) + .unwrap(), + ) + } else { + None + }; + let process_options = blockstore_processor::ProcessOptions { bpf_jit: config.bpf_jit, poh_verify, @@ -1031,7 +1133,6 @@ fn new_banks_from_ledger( ..blockstore_processor::ProcessOptions::default() }; - let blockstore = Arc::new(blockstore); let transaction_history_services = if config.rpc_addrs.is_some() && config.rpc_config.enable_rpc_transaction_history { initialize_rpc_transaction_history_services( @@ -1058,7 +1159,10 @@ fn new_banks_from_ledger( process_options, transaction_history_services .transaction_status_sender - .clone(), + .as_ref(), + transaction_history_services + .cache_block_meta_sender + .as_ref(), ) .unwrap_or_else(|err| { error!("Failed to load ledger: {:?}", err); @@ -1115,7 +1219,7 @@ fn new_banks_from_ledger( &validator_identity, &vote_account, &config, - &ledger_path, + tower_path, &bank_forks, ); @@ -1126,6 +1230,12 @@ fn new_banks_from_ledger( bank_forks.set_snapshot_config(config.snapshot_config.clone()); bank_forks.set_accounts_hash_interval_slots(config.accounts_hash_interval_slots); + if let Some(blockstore_root_scan) = blockstore_root_scan { + if let Err(err) = blockstore_root_scan.join() { + warn!("blockstore_root_scan failed to join {:?}", err); + } + } + ( genesis_config, bank_forks, @@ -1237,10 +1347,10 @@ fn initialize_rpc_transaction_history_services( exit, )); - let (cache_block_time_sender, cache_block_time_receiver) = unbounded(); - let cache_block_time_sender = Some(cache_block_time_sender); - let cache_block_time_service = Some(CacheBlockTimeService::new( - cache_block_time_receiver, + let (cache_block_meta_sender, cache_block_meta_receiver) = unbounded(); + let cache_block_meta_sender = Some(cache_block_meta_sender); + let cache_block_meta_service = Some(CacheBlockMetaService::new( + cache_block_meta_receiver, blockstore.clone(), exit, )); @@ -1259,31 +1369,48 @@ fn initialize_rpc_transaction_history_services( max_complete_transaction_status_slot, rewards_recorder_sender, rewards_recorder_service, - cache_block_time_sender, - cache_block_time_service, + cache_block_meta_sender, + cache_block_meta_service, evm_block_recorder_sender, evm_block_recorder_service, } } -// Return true on error, indicating the validator should exit. +#[derive(Debug, PartialEq)] +enum ValidatorError { + BadExpectedBankHash, + NotEnoughLedgerData, +} + +// Return if the validator waited on other nodes to start. In this case +// it should not wait for one of it's votes to land to produce blocks +// because if the whole network is waiting, then it will stall. +// +// Error indicates that a bad hash was encountered or another condition +// that is unrecoverable and the validator should exit. fn wait_for_supermajority( config: &ValidatorConfig, bank: &Bank, cluster_info: &ClusterInfo, rpc_override_health_check: Arc, -) -> bool { + start_progress: &Arc>, +) -> Result { if let Some(wait_for_supermajority) = config.wait_for_supermajority { match wait_for_supermajority.cmp(&bank.slot()) { - std::cmp::Ordering::Less => return false, + std::cmp::Ordering::Less => return Ok(false), std::cmp::Ordering::Greater => { - error!("Ledger does not have enough data to wait for supermajority, please enable snapshot fetch. Has {} needs {}", bank.slot(), wait_for_supermajority); - return true; + error!( + "Ledger does not have enough data to wait for supermajority, \ + please enable snapshot fetch. Has {} needs {}", + bank.slot(), + wait_for_supermajority + ); + return Err(ValidatorError::NotEnoughLedgerData); } _ => {} } } else { - return false; + return Ok(false); } if let Some(expected_bank_hash) = config.expected_bank_hash { @@ -1293,10 +1420,11 @@ fn wait_for_supermajority( bank.hash(), expected_bank_hash ); - return true; + return Err(ValidatorError::BadExpectedBankHash); } } + *start_progress.write().unwrap() = ValidatorStartProgress::WaitingForSupermajority; for i in 1.. { if i % 10 == 1 { info!( @@ -1318,7 +1446,7 @@ fn wait_for_supermajority( sleep(Duration::new(1, 0)); } rpc_override_health_check.store(false, Ordering::Relaxed); - false + Ok(true) } fn report_target_features() { @@ -1333,22 +1461,17 @@ fn report_target_features() { #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] { - unsafe { check_avx() }; - } -} - -// Validator binaries built on a machine with AVX support will generate invalid opcodes -// when run on machines without AVX causing a non-obvious process abort. Instead detect -// the mismatch and error cleanly. -#[target_feature(enable = "avx")] -unsafe fn check_avx() { - if is_x86_feature_detected!("avx") { - info!("AVX detected"); - } else { - error!( - "Your machine does not have AVX support, please rebuild from source on your machine" - ); - abort(); + // Validator binaries built on a machine with AVX support will generate invalid opcodes + // when run on machines without AVX causing a non-obvious process abort. Instead detect + // the mismatch and error cleanly. + if is_x86_feature_detected!("avx") { + info!("AVX detected"); + } else { + error!( + "Your machine does not have AVX support, please rebuild from source on your machine" + ); + abort(); + } } } @@ -1483,15 +1606,21 @@ mod tests { rpc_addrs: Some((validator_node.info.rpc, validator_node.info.rpc_pubsub)), ..ValidatorConfig::default() }; + let start_progress = Arc::new(RwLock::new(ValidatorStartProgress::default())); let validator = Validator::new( validator_node, &Arc::new(validator_keypair), &validator_ledger_path, &voting_keypair.pubkey(), - vec![voting_keypair.clone()], + Arc::new(RwLock::new(vec![voting_keypair.clone()])), vec![leader_node.info], &config, true, // should_check_duplicate_instance + start_progress.clone(), + ); + assert_eq!( + *start_progress.read().unwrap(), + ValidatorStartProgress::Running ); validator.close(); remove_dir_all(validator_ledger_path).unwrap(); @@ -1521,9 +1650,11 @@ mod tests { } drop(blockstore); + // this purges and compacts all slots greater than or equal to 5 backup_and_clear_blockstore(&blockstore_path, 5, 2); let blockstore = Blockstore::open(&blockstore_path).unwrap(); + // assert that slots less than 5 aren't affected assert!(blockstore.meta(4).unwrap().unwrap().next_slots.is_empty()); for i in 5..10 { assert!(blockstore @@ -1559,10 +1690,11 @@ mod tests { &Arc::new(validator_keypair), &validator_ledger_path, &vote_account_keypair.pubkey(), - vec![Arc::new(vote_account_keypair)], + Arc::new(RwLock::new(vec![Arc::new(vote_account_keypair)])), vec![leader_node.info.clone()], &config, true, // should_check_duplicate_instance + Arc::new(RwLock::new(ValidatorStartProgress::default())), ) }) .collect(); @@ -1594,21 +1726,29 @@ mod tests { let bank = Arc::new(Bank::new(&genesis_config)); let mut config = ValidatorConfig::default(); let rpc_override_health_check = Arc::new(AtomicBool::new(false)); + let start_progress = Arc::new(RwLock::new(ValidatorStartProgress::default())); + assert!(!wait_for_supermajority( &config, &bank, &cluster_info, - rpc_override_health_check.clone() - )); + rpc_override_health_check.clone(), + &start_progress, + ) + .unwrap()); // bank=0, wait=1, should fail config.wait_for_supermajority = Some(1); - assert!(wait_for_supermajority( - &config, - &bank, - &cluster_info, - rpc_override_health_check.clone() - )); + assert_eq!( + wait_for_supermajority( + &config, + &bank, + &cluster_info, + rpc_override_health_check.clone(), + &start_progress, + ), + Err(ValidatorError::NotEnoughLedgerData) + ); // bank=1, wait=0, should pass, bank is past the wait slot let bank = Bank::new_from_parent(&bank, &Pubkey::default(), 1); @@ -1617,18 +1757,24 @@ mod tests { &config, &bank, &cluster_info, - rpc_override_health_check.clone() - )); + rpc_override_health_check.clone(), + &start_progress, + ) + .unwrap()); // bank=1, wait=1, equal, but bad hash provided config.wait_for_supermajority = Some(1); config.expected_bank_hash = Some(hash(&[1])); - assert!(wait_for_supermajority( - &config, - &bank, - &cluster_info, - rpc_override_health_check - )); + assert_eq!( + wait_for_supermajority( + &config, + &bank, + &cluster_info, + rpc_override_health_check, + &start_progress, + ), + Err(ValidatorError::BadExpectedBankHash) + ); } #[test] diff --git a/core/src/vote_stake_tracker.rs b/core/src/vote_stake_tracker.rs index 0807df2208..feaede7bdc 100644 --- a/core/src/vote_stake_tracker.rs +++ b/core/src/vote_stake_tracker.rs @@ -1,4 +1,3 @@ -use solana_runtime::commitment::VOTE_THRESHOLD_SIZE; use solana_sdk::pubkey::Pubkey; use std::collections::HashSet; @@ -9,29 +8,33 @@ pub struct VoteStakeTracker { } impl VoteStakeTracker { - // Returns tuple (is_confirmed, is_new) where - // `is_confirmed` is true if the stake that has voted has just crosssed the supermajority - // of stake + // Returns tuple (reached_threshold_results, is_new) where + // Each index in `reached_threshold_results` is true if the corresponding threshold in the input + // `thresholds_to_check` was newly reached by adding the stake of the input `vote_pubkey` // `is_new` is true if the vote has not been seen before pub fn add_vote_pubkey( &mut self, vote_pubkey: Pubkey, stake: u64, total_stake: u64, - ) -> (bool, bool) { + thresholds_to_check: &[f64], + ) -> (Vec, bool) { let is_new = !self.voted.contains(&vote_pubkey); if is_new { self.voted.insert(vote_pubkey); - let supermajority_stake = (total_stake as f64 * VOTE_THRESHOLD_SIZE) as u64; let old_stake = self.stake; let new_stake = self.stake + stake; self.stake = new_stake; - ( - old_stake <= supermajority_stake && supermajority_stake < new_stake, - is_new, - ) + let reached_threshold_results: Vec = thresholds_to_check + .iter() + .map(|threshold| { + let threshold_stake = (total_stake as f64 * threshold) as u64; + old_stake <= threshold_stake && threshold_stake < new_stake + }) + .collect(); + (reached_threshold_results, is_new) } else { - (false, is_new) + (vec![false; thresholds_to_check.len()], is_new) } } @@ -47,6 +50,7 @@ impl VoteStakeTracker { #[cfg(test)] mod test { use super::*; + use solana_runtime::commitment::VOTE_THRESHOLD_SIZE; #[test] fn test_add_vote_pubkey() { @@ -54,24 +58,43 @@ mod test { let mut vote_stake_tracker = VoteStakeTracker::default(); for i in 0..10 { let pubkey = solana_sdk::pubkey::new_rand(); - let (is_confirmed, is_new) = - vote_stake_tracker.add_vote_pubkey(pubkey, 1, total_epoch_stake); + let (is_confirmed_thresholds, is_new) = vote_stake_tracker.add_vote_pubkey( + pubkey, + 1, + total_epoch_stake, + &[VOTE_THRESHOLD_SIZE, 0.0], + ); let stake = vote_stake_tracker.stake(); - let (is_confirmed2, is_new2) = - vote_stake_tracker.add_vote_pubkey(pubkey, 1, total_epoch_stake); + let (is_confirmed_thresholds2, is_new2) = vote_stake_tracker.add_vote_pubkey( + pubkey, + 1, + total_epoch_stake, + &[VOTE_THRESHOLD_SIZE, 0.0], + ); let stake2 = vote_stake_tracker.stake(); // Stake should not change from adding same pubkey twice assert_eq!(stake, stake2); - assert!(!is_confirmed2); + assert!(!is_confirmed_thresholds2[0]); + assert!(!is_confirmed_thresholds2[1]); assert!(!is_new2); + assert_eq!(is_confirmed_thresholds.len(), 2); + assert_eq!(is_confirmed_thresholds2.len(), 2); // at i == 6, the voted stake is 70%, which is the first time crossing // the supermajority threshold if i == 6 { - assert!(is_confirmed); + assert!(is_confirmed_thresholds[0]); } else { - assert!(!is_confirmed); + assert!(!is_confirmed_thresholds[0]); + } + + // at i == 6, the voted stake is 10%, which is the first time crossing + // the 0% threshold + if i == 0 { + assert!(is_confirmed_thresholds[1]); + } else { + assert!(!is_confirmed_thresholds[1]); } assert!(is_new); } diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 614c2f26c6..5d68b6d46e 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -26,7 +26,7 @@ use solana_metrics::{inc_new_counter_debug, inc_new_counter_error}; use solana_perf::packet::Packets; use solana_rayon_threadlimit::get_thread_count; use solana_runtime::{bank::Bank, bank_forks::BankForks}; -use solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::duration_as_ms}; +use solana_sdk::{clock::Slot, packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::duration_as_ms}; use solana_streamer::streamer::PacketSender; use std::{ net::{SocketAddr, UdpSocket}, @@ -36,6 +36,9 @@ use std::{ time::{Duration, Instant}, }; +pub type DuplicateSlotSender = CrossbeamSender; +pub type DuplicateSlotReceiver = CrossbeamReceiver; + fn verify_shred_slot(shred: &Shred, root: u64) -> bool { if shred.is_data() { // Only data shreds have parent information @@ -86,21 +89,25 @@ fn run_check_duplicate( cluster_info: &ClusterInfo, blockstore: &Blockstore, shred_receiver: &CrossbeamReceiver, + duplicate_slot_sender: &DuplicateSlotSender, ) -> Result<()> { let check_duplicate = |shred: Shred| -> Result<()> { - if !blockstore.has_duplicate_shreds_in_slot(shred.slot()) { + let shred_slot = shred.slot(); + if !blockstore.has_duplicate_shreds_in_slot(shred_slot) { if let Some(existing_shred_payload) = blockstore.is_shred_duplicate( - shred.slot(), + shred_slot, shred.index(), &shred.payload, shred.is_data(), ) { cluster_info.push_duplicate_shred(&shred, &existing_shred_payload)?; blockstore.store_duplicate_slot( - shred.slot(), + shred_slot, existing_shred_payload, shred.payload, )?; + + duplicate_slot_sender.send(shred_slot)?; } } @@ -319,6 +326,7 @@ impl WindowService { cluster_slots: Arc, verified_vote_receiver: VerifiedVoteReceiver, completed_data_sets_sender: CompletedDataSetsSender, + duplicate_slots_sender: DuplicateSlotSender, ) -> WindowService where F: 'static @@ -346,6 +354,7 @@ impl WindowService { exit.clone(), blockstore.clone(), duplicate_receiver, + duplicate_slots_sender, ); let t_insert = Self::start_window_insert_thread( @@ -381,6 +390,7 @@ impl WindowService { exit: Arc, blockstore: Arc, duplicate_receiver: CrossbeamReceiver, + duplicate_slot_sender: DuplicateSlotSender, ) -> JoinHandle<()> { let handle_error = || { inc_new_counter_error!("solana-check-duplicate-error", 1, 1); @@ -393,8 +403,12 @@ impl WindowService { } let mut noop = || {}; - if let Err(e) = run_check_duplicate(&cluster_info, &blockstore, &duplicate_receiver) - { + if let Err(e) = run_check_duplicate( + &cluster_info, + &blockstore, + &duplicate_receiver, + &duplicate_slot_sender, + ) { if Self::should_exit_on_error(e, &mut noop, &handle_error) { break; } @@ -408,7 +422,7 @@ impl WindowService { blockstore: &Arc, leader_schedule_cache: &Arc, insert_receiver: CrossbeamReceiver<(Vec, Vec>)>, - duplicate_sender: CrossbeamSender, + check_duplicate_sender: CrossbeamSender, completed_data_sets_sender: CompletedDataSetsSender, ) -> JoinHandle<()> { let exit = exit.clone(); @@ -423,7 +437,7 @@ impl WindowService { .name("solana-window-insert".to_string()) .spawn(move || { let handle_duplicate = |shred| { - let _ = duplicate_sender.send(shred); + let _ = check_duplicate_sender.send(shred); }; let mut metrics = BlockstoreInsertionMetrics::default(); let mut last_print = Instant::now(); @@ -538,6 +552,7 @@ impl WindowService { handle_timeout(); false } + Error::CrossbeamSendError => true, _ => { handle_error(); error!("thread {:?} error {:?}", thread::current().name(), e); @@ -566,7 +581,6 @@ mod test { shred::{DataShredHeader, Shredder}, }; use solana_sdk::{ - clock::Slot, epoch_schedule::MINIMUM_SLOTS_PER_EPOCH, hash::Hash, signature::{Keypair, Signer}, @@ -580,8 +594,7 @@ mod test { parent: Slot, keypair: &Arc, ) -> Vec { - let shredder = Shredder::new(slot, parent, 0.0, keypair.clone(), 0, 0) - .expect("Failed to create entry shredder"); + let shredder = Shredder::new(slot, parent, keypair.clone(), 0, 0).unwrap(); shredder.entries_to_shreds(&entries, true, 0).0 } @@ -680,6 +693,7 @@ mod test { let blockstore_path = get_tmp_ledger_path!(); let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap()); let (sender, receiver) = unbounded(); + let (duplicate_slot_sender, duplicate_slot_receiver) = unbounded(); let (shreds, _) = make_many_slot_entries(5, 5, 10); blockstore .insert_shreds(shreds.clone(), None, false) @@ -692,7 +706,17 @@ mod test { let keypair = Keypair::new(); let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), timestamp()); let cluster_info = ClusterInfo::new(contact_info, Arc::new(keypair)); - run_check_duplicate(&cluster_info, &blockstore, &receiver).unwrap(); + run_check_duplicate( + &cluster_info, + &blockstore, + &receiver, + &duplicate_slot_sender, + ) + .unwrap(); assert!(blockstore.has_duplicate_shreds_in_slot(duplicate_shred_slot)); + assert_eq!( + duplicate_slot_receiver.try_recv().unwrap(), + duplicate_shred_slot + ); } } diff --git a/core/tests/cluster_info.rs b/core/tests/cluster_info.rs index 97684b3e19..10df1bd8f3 100644 --- a/core/tests/cluster_info.rs +++ b/core/tests/cluster_info.rs @@ -1,7 +1,7 @@ #![allow(clippy::integer_arithmetic)] use rayon::iter::ParallelIterator; use rayon::prelude::*; -use serial_test_derive::serial; +use serial_test::serial; use solana_core::cluster_info::{compute_retransmit_peers, ClusterInfo}; use solana_core::contact_info::ContactInfo; use solana_sdk::pubkey::Pubkey; diff --git a/core/tests/crds_gossip.rs b/core/tests/crds_gossip.rs index 938eb7f1d3..04339db235 100644 --- a/core/tests/crds_gossip.rs +++ b/core/tests/crds_gossip.rs @@ -3,36 +3,66 @@ use bincode::serialized_size; use log::*; use rayon::prelude::*; use rayon::{ThreadPool, ThreadPoolBuilder}; -use serial_test_derive::serial; -use solana_core::cluster_info; -use solana_core::contact_info::ContactInfo; -use solana_core::crds_gossip::*; -use solana_core::crds_gossip_error::CrdsGossipError; -use solana_core::crds_gossip_pull::{ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS}; -use solana_core::crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS; -use solana_core::crds_value::CrdsValueLabel; -use solana_core::crds_value::{CrdsData, CrdsValue}; +use serial_test::serial; +use solana_core::{ + cluster_info, + contact_info::ContactInfo, + crds_gossip::*, + crds_gossip_error::CrdsGossipError, + crds_gossip_pull::{ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS}, + crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS, + crds_value::{CrdsData, CrdsValue, CrdsValueLabel}, + ping_pong::PingCache, +}; use solana_rayon_threadlimit::get_thread_count; -use solana_sdk::hash::hash; -use solana_sdk::pubkey::Pubkey; -use solana_sdk::timing::timestamp; -use std::collections::{HashMap, HashSet}; -use std::ops::Deref; -use std::sync::{Arc, Mutex}; +use solana_sdk::{ + hash::hash, + pubkey::Pubkey, + signature::{Keypair, Signer}, + timing::timestamp, +}; +use std::{ + collections::{HashMap, HashSet}, + ops::Deref, + sync::{Arc, Mutex}, + time::{Duration, Instant}, +}; #[derive(Clone)] struct Node { + keypair: Arc, + contact_info: ContactInfo, gossip: Arc>, + ping_cache: Arc>, stake: u64, } impl Node { - fn new(gossip: Arc>) -> Self { - Node { gossip, stake: 0 } + fn new( + keypair: Arc, + contact_info: ContactInfo, + gossip: Arc>, + ) -> Self { + Self::staked(keypair, contact_info, gossip, 0) } - fn staked(gossip: Arc>, stake: u64) -> Self { - Node { gossip, stake } + fn staked( + keypair: Arc, + contact_info: ContactInfo, + gossip: Arc>, + stake: u64, + ) -> Self { + let ping_cache = Arc::new(Mutex::new(PingCache::new( + Duration::from_secs(20 * 60), // ttl + 2048, // capacity + ))); + Node { + keypair, + contact_info, + gossip, + ping_cache, + stake, + } } } @@ -77,71 +107,72 @@ fn stakes(network: &Network) -> HashMap { } fn star_network_create(num: usize) -> Network { - let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - ))); + let node_keypair = Arc::new(Keypair::new()); + let contact_info = ContactInfo::new_localhost(&node_keypair.pubkey(), 0); + let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info.clone())); let mut network: HashMap<_, _> = (1..num) .map(|_| { - let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - ))); + let node_keypair = Arc::new(Keypair::new()); + let contact_info = ContactInfo::new_localhost(&node_keypair.pubkey(), 0); + let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info.clone())); let id = new.label().pubkey(); let mut node = CrdsGossip::default(); node.crds.insert(new.clone(), timestamp()).unwrap(); node.crds.insert(entry.clone(), timestamp()).unwrap(); node.set_self(&id); - (new.label().pubkey(), Node::new(Arc::new(Mutex::new(node)))) + let node = Node::new(node_keypair, contact_info, Arc::new(Mutex::new(node))); + (new.label().pubkey(), node) }) .collect(); let mut node = CrdsGossip::default(); let id = entry.label().pubkey(); node.crds.insert(entry, timestamp()).unwrap(); node.set_self(&id); - network.insert(id, Node::new(Arc::new(Mutex::new(node)))); + let node = Node::new(node_keypair, contact_info, Arc::new(Mutex::new(node))); + network.insert(id, node); Network::new(network) } fn rstar_network_create(num: usize) -> Network { - let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - ))); + let node_keypair = Arc::new(Keypair::new()); + let contact_info = ContactInfo::new_localhost(&node_keypair.pubkey(), 0); + let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info.clone())); let mut origin = CrdsGossip::default(); let id = entry.label().pubkey(); origin.crds.insert(entry, timestamp()).unwrap(); origin.set_self(&id); let mut network: HashMap<_, _> = (1..num) .map(|_| { - let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - ))); + let node_keypair = Arc::new(Keypair::new()); + let contact_info = ContactInfo::new_localhost(&node_keypair.pubkey(), 0); + let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info.clone())); let id = new.label().pubkey(); let mut node = CrdsGossip::default(); node.crds.insert(new.clone(), timestamp()).unwrap(); origin.crds.insert(new.clone(), timestamp()).unwrap(); node.set_self(&id); - (new.label().pubkey(), Node::new(Arc::new(Mutex::new(node)))) + + let node = Node::new(node_keypair, contact_info, Arc::new(Mutex::new(node))); + (new.label().pubkey(), node) }) .collect(); - network.insert(id, Node::new(Arc::new(Mutex::new(origin)))); + let node = Node::new(node_keypair, contact_info, Arc::new(Mutex::new(origin))); + network.insert(id, node); Network::new(network) } fn ring_network_create(num: usize) -> Network { let mut network: HashMap<_, _> = (0..num) .map(|_| { - let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - ))); + let node_keypair = Arc::new(Keypair::new()); + let contact_info = ContactInfo::new_localhost(&node_keypair.pubkey(), 0); + let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info.clone())); let id = new.label().pubkey(); let mut node = CrdsGossip::default(); node.crds.insert(new.clone(), timestamp()).unwrap(); node.set_self(&id); - (new.label().pubkey(), Node::new(Arc::new(Mutex::new(node)))) + let node = Node::new(node_keypair, contact_info, Arc::new(Mutex::new(node))); + (new.label().pubkey(), node) }) .collect(); let keys: Vec = network.keys().cloned().collect(); @@ -149,13 +180,9 @@ fn ring_network_create(num: usize) -> Network { let start_info = { let start = &network[&keys[k]]; let start_id = start.lock().unwrap().id; - start - .lock() - .unwrap() - .crds - .lookup(&CrdsValueLabel::ContactInfo(start_id)) - .unwrap() - .clone() + let label = CrdsValueLabel::ContactInfo(start_id); + let gossip = start.gossip.lock().unwrap(); + gossip.crds.get(&label).unwrap().value.clone() }; let end = network.get_mut(&keys[(k + 1) % keys.len()]).unwrap(); end.lock() @@ -171,18 +198,20 @@ fn connected_staked_network_create(stakes: &[u64]) -> Network { let num = stakes.len(); let mut network: HashMap<_, _> = (0..num) .map(|n| { - let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( - &solana_sdk::pubkey::new_rand(), - 0, - ))); + let node_keypair = Arc::new(Keypair::new()); + let contact_info = ContactInfo::new_localhost(&node_keypair.pubkey(), 0); + let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info.clone())); let id = new.label().pubkey(); let mut node = CrdsGossip::default(); node.crds.insert(new.clone(), timestamp()).unwrap(); node.set_self(&id); - ( - new.label().pubkey(), - Node::staked(Arc::new(Mutex::new(node)), stakes[n]), - ) + let node = Node::staked( + node_keypair, + contact_info, + Arc::new(Mutex::new(node)), + stakes[n], + ); + (new.label().pubkey(), node) }) .collect(); @@ -193,7 +222,7 @@ fn connected_staked_network_create(stakes: &[u64]) -> Network { let start = &network[k].lock().unwrap(); let start_id = start.id; let start_label = CrdsValueLabel::ContactInfo(start_id); - start.crds.lookup(&start_label).unwrap().clone() + start.crds.get(&start_label).unwrap().value.clone() }) .collect(); for end in network.values_mut() { @@ -242,11 +271,9 @@ fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_conver // push a message to the network network_values.par_iter().for_each(|locked_node| { let node = &mut locked_node.lock().unwrap(); - let mut m = node - .crds - .lookup(&CrdsValueLabel::ContactInfo(node.id)) - .and_then(|v| v.contact_info().cloned()) - .unwrap(); + let label = CrdsValueLabel::ContactInfo(node.id); + let entry = node.crds.get(&label).unwrap(); + let mut m = entry.value.contact_info().cloned().unwrap(); m.wallclock = now; node.process_push_message( &Pubkey::default(), @@ -300,9 +327,12 @@ fn network_run_push( .par_iter() .map(|node| { let mut node_lock = node.lock().unwrap(); - let timeouts = node_lock.make_timeouts_test(); + let timeouts = node_lock.make_timeouts( + &HashMap::default(), // stakes + Duration::from_millis(node_lock.pull.crds_timeout), + ); node_lock.purge(thread_pool, now, &timeouts); - node_lock.new_push_messages(vec![], now) + (node_lock.id, node_lock.new_push_messages(vec![], now)) }) .collect(); let transfered: Vec<_> = requests @@ -315,24 +345,17 @@ fn network_run_push( for (to, msgs) in push_messages { bytes += serialized_size(&msgs).unwrap() as usize; num_msgs += 1; - let updated = network + let origins: HashSet<_> = network .get(&to) - .map(|node| { - node.lock() - .unwrap() - .process_push_message(&from, msgs.clone(), now) - }) - .unwrap(); - - let updated_labels: Vec<_> = - updated.into_iter().map(|u| u.value.label()).collect(); + .unwrap() + .lock() + .unwrap() + .process_push_message(&from, msgs.clone(), now) + .into_iter() + .collect(); let prunes_map = network .get(&to) - .map(|node| { - node.lock() - .unwrap() - .prune_received_cache(updated_labels, &stakes) - }) + .map(|node| node.lock().unwrap().prune_received_cache(origins, &stakes)) .unwrap(); for (from, prune_set) in prunes_map { @@ -382,7 +405,10 @@ fn network_run_push( } total = network_values .par_iter() - .map(|v| v.lock().unwrap().push.num_pending()) + .map(|node| { + let gossip = node.gossip.lock().unwrap(); + gossip.push.num_pending(&gossip.crds) + }) .sum(); trace!( "network_run_push_{}: now: {} queue: {} bytes: {} num_msgs: {} prunes: {} stake_pruned: {} delivered: {}", @@ -416,23 +442,43 @@ fn network_run_pull( let network_values: Vec = network.values().cloned().collect(); let mut timeouts = HashMap::new(); timeouts.insert(Pubkey::default(), CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS); - + for node in &network_values { + let mut ping_cache = node.ping_cache.lock().unwrap(); + for other in &network_values { + if node.keypair.pubkey() != other.keypair.pubkey() { + ping_cache.mock_pong( + other.keypair.pubkey(), + other.contact_info.gossip, + Instant::now(), + ); + } + } + } for t in start..end { let now = t as u64 * 100; let requests: Vec<_> = { network_values .par_iter() .filter_map(|from| { - from.lock() + let mut pings = Vec::new(); + let (peer, filters) = from + .lock() .unwrap() .new_pull_request( &thread_pool, + from.keypair.deref(), now, None, &HashMap::new(), cluster_info::MAX_BLOOM_SIZE, + from.ping_cache.deref(), + &mut pings, ) - .ok() + .ok()?; + let gossip = from.gossip.lock().unwrap(); + let label = CrdsValueLabel::ContactInfo(gossip.id); + let self_info = gossip.crds.get(&label).unwrap().value.clone(); + Some((peer.id, filters, self_info)) }) .collect() }; @@ -478,7 +524,7 @@ fn network_run_pull( msgs += rsp.len(); if let Some(node) = network.get(&from) { let mut node = node.lock().unwrap(); - node.mark_pull_request_creation_time(&from, now); + node.mark_pull_request_creation_time(from, now); let mut stats = ProcessPullStats::default(); let (vers, vers_expired_timeout, failed_inserts) = node.filter_pull_responses(&timeouts, rsp, now, &mut stats); diff --git a/core/tests/gossip.rs b/core/tests/gossip.rs index b272b908bd..b66b034cb8 100644 --- a/core/tests/gossip.rs +++ b/core/tests/gossip.rs @@ -3,8 +3,11 @@ extern crate log; use rayon::iter::*; -use solana_core::cluster_info::{ClusterInfo, Node}; -use solana_core::gossip_service::GossipService; +use solana_core::{ + cluster_info::{ClusterInfo, Node}, + crds::Cursor, + gossip_service::GossipService, +}; use solana_runtime::bank_forks::BankForks; use solana_perf::packet::Packet; @@ -201,7 +204,7 @@ pub fn cluster_info_retransmit() { p.meta.size = 10; let peers = c1.tvu_peers(); let retransmit_peers: Vec<_> = peers.iter().collect(); - ClusterInfo::retransmit_to(&retransmit_peers, &mut p, &tn1, false).unwrap(); + ClusterInfo::retransmit_to(&retransmit_peers, &p, &tn1, false).unwrap(); let res: Vec<_> = [tn1, tn2, tn3] .into_par_iter() .map(|s| { @@ -305,12 +308,11 @@ pub fn cluster_info_scale() { let mut num_push_total = 0; let mut num_pushes = 0; let mut num_pulls = 0; - let mut num_inserts = 0; for node in nodes.iter() { //if node.0.get_votes(0).1.len() != (num_nodes * num_votes) { let has_tx = node .0 - .get_votes(0) + .get_votes(&mut Cursor::default()) .1 .iter() .filter(|v| v.message.account_keys == tx.message.account_keys) @@ -319,7 +321,6 @@ pub fn cluster_info_scale() { num_push_total += node.0.gossip.read().unwrap().push.num_total; num_pushes += node.0.gossip.read().unwrap().push.num_pushes; num_pulls += node.0.gossip.read().unwrap().pull.num_pulls; - num_inserts += node.0.gossip.read().unwrap().crds.num_inserts; if has_tx == 0 { not_done += 1; } @@ -329,7 +330,6 @@ pub fn cluster_info_scale() { warn!("num_push_total: {}", num_push_total); warn!("num_pushes: {}", num_pushes); warn!("num_pulls: {}", num_pulls); - warn!("num_inserts: {}", num_inserts); success = not_done < (nodes.len() / 20); if success { break; @@ -347,7 +347,6 @@ pub fn cluster_info_scale() { node.0.gossip.write().unwrap().push.num_total = 0; node.0.gossip.write().unwrap().push.num_pushes = 0; node.0.gossip.write().unwrap().pull.num_pulls = 0; - node.0.gossip.write().unwrap().crds.num_inserts = 0; } } diff --git a/core/tests/ledger_cleanup.rs b/core/tests/ledger_cleanup.rs index d741889b3a..69778edefa 100644 --- a/core/tests/ledger_cleanup.rs +++ b/core/tests/ledger_cleanup.rs @@ -3,15 +3,17 @@ #[cfg(test)] mod tests { + use log::*; use solana_core::ledger_cleanup_service::LedgerCleanupService; use solana_ledger::blockstore::{make_many_slot_entries, Blockstore}; use solana_ledger::get_tmp_ledger_path; use solana_ledger::shred::Shred; + use solana_measure::measure::Measure; use std::collections::VecDeque; use std::str::FromStr; use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::mpsc::channel; - use std::sync::{Arc, RwLock}; + use std::sync::{Arc, Mutex, RwLock}; use std::thread::{self, Builder, JoinHandle}; use std::time::{Duration, Instant}; use systemstat::{CPULoad, Platform, System}; @@ -37,6 +39,8 @@ mod tests { pub cleanup_blockstore: bool, pub emit_cpu_info: bool, pub assert_compaction: bool, + pub compaction_interval: Option, + pub no_compaction: bool, } #[derive(Clone, Copy, Debug)] @@ -152,6 +156,11 @@ mod tests { let emit_cpu_info = read_env("EMIT_CPU_INFO", true); // set default to `true` once compaction is merged let assert_compaction = read_env("ASSERT_COMPACTION", false); + let compaction_interval = match read_env("COMPACTION_INTERVAL", 0) { + maybe_zero if maybe_zero == 0 => None, + non_zero => Some(non_zero), + }; + let no_compaction = read_env("NO_COMPACTION", false); BenchmarkConfig { benchmark_slots, @@ -164,6 +173,8 @@ mod tests { cleanup_blockstore, emit_cpu_info, assert_compaction, + compaction_interval, + no_compaction, } } @@ -187,7 +198,7 @@ mod tests { let (cpu_user, cpu_system, cpu_idle) = (cpu.cpu_user, cpu.cpu_system, cpu.cpu_idle); println!( - "{},{},{},{},{},{},{},{},{},{},{}", + "{},{},{},{},{},{},{},{},{:.2},{:.2},{:.2}", time_now.duration_since(time_initial).as_millis(), time_now.duration_since(*time_previous).as_millis(), start_slot, @@ -207,9 +218,15 @@ mod tests { #[test] fn test_ledger_cleanup_compaction() { + solana_logger::setup(); let blockstore_path = get_tmp_ledger_path!(); - let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap()); + let mut blockstore = Blockstore::open(&blockstore_path).unwrap(); let config = get_benchmark_config(); + if config.no_compaction { + blockstore.set_no_compaction(true); + } + let blockstore = Arc::new(blockstore); + eprintln!("BENCHMARK CONFIG: {:?}", config); eprintln!("LEDGER_PATH: {:?}", &blockstore_path); @@ -220,6 +237,8 @@ mod tests { let stop_size_bytes = config.stop_size_bytes; let stop_size_iterations = config.stop_size_iterations; let pre_generate_data = config.pre_generate_data; + let compaction_interval = config.compaction_interval; + let batches = benchmark_slots / batch_size; let (sender, receiver) = channel(); @@ -229,7 +248,7 @@ mod tests { blockstore.clone(), max_ledger_shreds, &exit, - None, + compaction_interval, None, ); @@ -242,8 +261,8 @@ mod tests { let t0 = Instant::now(); eprintln!("PRE_GENERATE_DATA: (this may take a while)"); for i in 0..batches { - let x = i * batch_size; - let (shreds, _) = make_many_slot_entries(x, batch_size, entries_per_slot); + let start_slot = i * batch_size; + let (shreds, _) = make_many_slot_entries(start_slot, batch_size, entries_per_slot); generated_batches.push_back(shreds); } eprintln!("PRE_GENERATE_DATA: took {} ms", t0.elapsed().as_millis()); @@ -267,23 +286,118 @@ mod tests { &sys.get_stats(), ); + let mut total_make = 0; + let mut num_slots = 0; + let mut total_slots = 0; + let mut time = Instant::now(); + let mut start = Measure::start("start"); + let shreds: Arc>>> = Arc::new(Mutex::new(VecDeque::new())); + let shreds1 = shreds.clone(); + let insert_exit = Arc::new(AtomicBool::new(false)); + let insert_exit1 = insert_exit.clone(); + let blockstore1 = blockstore.clone(); + let insert_thread = Builder::new() + .name("insert_shreds".to_string()) + .spawn(move || { + let start = Instant::now(); + let mut now = Instant::now(); + let mut total = 0; + let mut total_batches = 0; + let mut total_inserted_shreds = 0; + let mut num_shreds = 0; + let mut max_speed = 0f32; + let mut min_speed = f32::MAX; + loop { + let (new_shreds, len) = { + let mut sl = shreds1.lock().unwrap(); + (sl.pop_front(), sl.len()) + }; + if now.elapsed().as_secs() > 0 { + let shreds_per_second = num_shreds as f32 / now.elapsed().as_secs() as f32; + warn!( + "tried: {} inserted: {} batches: {} len: {} shreds_per_second: {}", + total, total_inserted_shreds, total_batches, len, shreds_per_second, + ); + let average_speed = + total_inserted_shreds as f32 / start.elapsed().as_secs() as f32; + max_speed = max_speed.max(shreds_per_second); + min_speed = min_speed.min(shreds_per_second); + warn!( + "highest: {} lowest: {} avg: {}", + max_speed, min_speed, average_speed + ); + now = Instant::now(); + num_shreds = 0; + } + if let Some(new_shreds) = new_shreds { + total += new_shreds.len(); + total_batches += 1; + let br = blockstore1.insert_shreds(new_shreds, None, false).unwrap(); + total_inserted_shreds += br.1.len(); + num_shreds += br.1.len(); + } else { + thread::sleep(Duration::from_millis(200)); + } + if insert_exit1.load(Ordering::Relaxed) { + info!( + "insert exiting... highest shreds/s: {} lowest shreds/s: {}", + max_speed, min_speed + ); + break; + } + } + }) + .unwrap(); + let mut entries_batch = make_many_slot_entries(0, batch_size, entries_per_slot).0; + info!( + "batch size: {} entries_per_slot: {} shreds_per_slot: {}", + batch_size, + entries_per_slot, + entries_batch.len() + ); + shreds.lock().unwrap().push_back(entries_batch.clone()); for i in 0..batches { - let x = i * batch_size; + let start_slot = i * batch_size; + + if time.elapsed().as_secs() > 0 { + warn!( + "total slots: {} slots: {} make: {}ms {:.2}", + total_slots, + num_slots, + total_make / (1000), + num_slots as f32 / time.elapsed().as_secs() as f32, + ); + num_slots = 0; + total_make = 0; + time = Instant::now(); + } - let shreds = if pre_generate_data { - generated_batches.pop_front().unwrap() + if shreds.lock().unwrap().len() < 50 { + let mut make_time = Measure::start("make_entries"); + let new_shreds = if pre_generate_data { + generated_batches.pop_front().unwrap() + } else { + num_slots += batch_size; + total_slots += batch_size; + entries_batch + .iter_mut() + .for_each(|shred| shred.set_slot(shred.slot() + batch_size)); + entries_batch.clone() + }; + shreds.lock().unwrap().push_back(new_shreds); + make_time.stop(); + total_make += make_time.as_us(); } else { - make_many_slot_entries(x, batch_size, entries_per_slot).0 - }; + thread::sleep(Duration::from_millis(200)); + } - blockstore.insert_shreds(shreds, None, false).unwrap(); - sender.send(x).unwrap(); + sender.send(start_slot).unwrap(); emit_stats( time_initial, &mut time_previous, &mut storage_previous, - x, + start_slot, batch_size, batch_size, max_ledger_shreds as i64, @@ -303,7 +417,30 @@ mod tests { } } } + start.stop(); + let mut now = Instant::now(); + loop { + if now.elapsed().as_secs() > 1 { + warn!( + "waiting for insert queue to clear.. {}", + shreds.lock().unwrap().len() + ); + now = Instant::now(); + } + if shreds.lock().unwrap().is_empty() { + break; + } else { + thread::sleep(Duration::from_millis(200)); + } + } + insert_exit.store(true, Ordering::Relaxed); + insert_thread.join().unwrap(); + info!( + "done {} {} shreds/s", + start, + (batches * batch_size) as f32 / start.as_s() + ); let u1 = storage_previous; // send final `ledger_cleanup` notification (since iterations above are zero-based) @@ -330,6 +467,7 @@ mod tests { std::thread::sleep(Duration::from_millis(200)); } + info!("done polling"); emit_stats( time_initial, &mut time_previous, diff --git a/core/tests/rpc.rs b/core/tests/rpc.rs index d864c81da3..57daf863f2 100644 --- a/core/tests/rpc.rs +++ b/core/tests/rpc.rs @@ -1,8 +1,5 @@ use bincode::serialize; -use jsonrpc_core::futures::{ - future::{self, Future}, - stream::Stream, -}; +use jsonrpc_core::futures::StreamExt; use jsonrpc_core_client::transports::ws; use log::*; use reqwest::{self, header::CONTENT_TYPE}; @@ -12,6 +9,7 @@ use solana_client::{ rpc_client::RpcClient, rpc_config::{RpcAccountInfoConfig, RpcSignatureSubscribeConfig}, rpc_response::{Response, RpcSignatureResult, SlotUpdate}, + tpu_client::{TpuClient, TpuClientConfig}, }; use solana_core::{rpc_pubsub::gen_client::Client as PubsubClient, test_validator::TestValidator}; use solana_sdk::{ @@ -29,7 +27,7 @@ use std::{ thread::sleep, time::{Duration, Instant}, }; -use tokio_01::runtime::Runtime; +use tokio_02::runtime::Runtime; macro_rules! json_req { ($method: expr, $params: expr) => {{ @@ -152,31 +150,22 @@ fn test_rpc_slot_updates() { let test_validator = TestValidator::with_no_fees(Pubkey::new_unique(), None); // Create the pub sub runtime - let mut rt = Runtime::new().unwrap(); + let rt = Runtime::new().unwrap(); let rpc_pubsub_url = test_validator.rpc_pubsub_url(); let (update_sender, update_receiver) = channel::>(); // Subscribe to slot updates - rt.spawn({ + rt.spawn(async move { let connect = ws::try_connect::(&rpc_pubsub_url).unwrap(); - connect - .and_then(move |client| { - tokio_01::spawn( - client - .slots_updates_subscribe() - .and_then(move |update_stream| { - update_stream.for_each(move |update| { - update_sender.send(update).unwrap(); - future::ok(()) - }) - }) - .map_err(|err| { - eprintln!("slot update sub err: {:#?}", err); - }), - ); - future::ok(()) - }) - .map_err(|_| ()) + let client = connect.await.unwrap(); + + tokio_02::spawn(async move { + let mut update_sub = client.slots_updates_subscribe().unwrap(); + loop { + let response = update_sub.next().await.unwrap(); + update_sender.send(response.unwrap()).unwrap(); + } + }); }); let first_update = update_receiver @@ -216,8 +205,6 @@ fn test_rpc_slot_updates() { } } } - - rt.shutdown_now().wait().unwrap(); } #[test] @@ -261,74 +248,60 @@ fn test_rpc_subscriptions() { let (status_sender, status_receiver) = channel::<(String, Response)>(); // Create the pub sub runtime - let mut rt = Runtime::new().unwrap(); - - // Subscribe to all signatures - rt.spawn({ - let connect = ws::try_connect::(&test_validator.rpc_pubsub_url()).unwrap(); - let signature_set = signature_set.clone(); - connect - .and_then(move |client| { - for sig in signature_set { - let status_sender = status_sender.clone(); - tokio_01::spawn( - client - .signature_subscribe( - sig.clone(), - Some(RpcSignatureSubscribeConfig { - commitment: Some(CommitmentConfig::confirmed()), - ..RpcSignatureSubscribeConfig::default() - }), - ) - .and_then(move |sig_stream| { - sig_stream.for_each(move |result| { - status_sender.send((sig.clone(), result)).unwrap(); - future::ok(()) - }) - }) - .map_err(|err| { - eprintln!("sig sub err: {:#?}", err); - }), - ); - } - tokio_01::spawn( - client - .slot_subscribe() - .and_then(move |slot_stream| { - slot_stream.for_each(move |_| { - ready_sender.send(()).unwrap(); - future::ok(()) - }) - }) - .map_err(|err| { - eprintln!("slot sub err: {:#?}", err); - }), - ); - for pubkey in account_set { - let account_sender = account_sender.clone(); - tokio_01::spawn( - client - .account_subscribe( - pubkey, - Some(RpcAccountInfoConfig { - commitment: Some(CommitmentConfig::confirmed()), - ..RpcAccountInfoConfig::default() - }), - ) - .and_then(move |account_stream| { - account_stream.for_each(move |result| { - account_sender.send(result).unwrap(); - future::ok(()) - }) - }) - .map_err(|err| { - eprintln!("acct sub err: {:#?}", err); - }), - ); - } - future::ok(()) - }) - .map_err(|_| ()) + let rt = Runtime::new().unwrap(); + let rpc_pubsub_url = test_validator.rpc_pubsub_url(); + let signature_set_clone = signature_set.clone(); + rt.spawn(async move { + let connect = ws::try_connect::(&rpc_pubsub_url).unwrap(); + let client = connect.await.unwrap(); + + // Subscribe to signature notifications + for sig in signature_set_clone { + let status_sender = status_sender.clone(); + let mut sig_sub = client + .signature_subscribe( + sig.clone(), + Some(RpcSignatureSubscribeConfig { + commitment: Some(CommitmentConfig::confirmed()), + ..RpcSignatureSubscribeConfig::default() + }), + ) + .unwrap_or_else(|err| panic!("sig sub err: {:#?}", err)); + + tokio_02::spawn(async move { + let response = sig_sub.next().await.unwrap(); + status_sender + .send((sig.clone(), response.unwrap())) + .unwrap(); + }); + } + + // Subscribe to account notifications + for pubkey in account_set { + let account_sender = account_sender.clone(); + let mut client_sub = client + .account_subscribe( + pubkey, + Some(RpcAccountInfoConfig { + commitment: Some(CommitmentConfig::confirmed()), + ..RpcAccountInfoConfig::default() + }), + ) + .unwrap_or_else(|err| panic!("acct sub err: {:#?}", err)); + tokio_02::spawn(async move { + let response = client_sub.next().await.unwrap(); + account_sender.send(response.unwrap()).unwrap(); + }); + } + + // Signal ready after the next slot notification + let mut slot_sub = client + .slot_subscribe() + .unwrap_or_else(|err| panic!("sig sub err: {:#?}", err)); + tokio_02::spawn(async move { + let _response = slot_sub.next().await.unwrap(); + ready_sender.send(()).unwrap(); + }); }); // Wait for signature subscriptions @@ -400,6 +373,38 @@ fn test_rpc_subscriptions() { } } } +} - rt.shutdown_now().wait().unwrap(); +#[test] +fn test_tpu_send_transaction() { + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let test_validator = TestValidator::with_no_fees(mint_pubkey, None); + let rpc_client = Arc::new(RpcClient::new_with_commitment( + test_validator.rpc_url(), + CommitmentConfig::processed(), + )); + + let tpu_client = TpuClient::new( + rpc_client.clone(), + &test_validator.rpc_pubsub_url(), + TpuClientConfig::default(), + ) + .unwrap(); + + let recent_blockhash = rpc_client.get_recent_blockhash().unwrap().0; + let tx = + system_transaction::transfer(&mint_keypair, &Pubkey::new_unique(), 42, recent_blockhash); + assert!(tpu_client.send_transaction(&tx)); + + let timeout = Duration::from_secs(5); + let now = Instant::now(); + let signatures = vec![tx.signatures[0]]; + loop { + assert!(now.elapsed() < timeout); + let statuses = rpc_client.get_signature_statuses(&signatures).unwrap(); + if statuses.value.get(0).is_some() { + return; + } + } } diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 4e58a87866..2f85d27f15 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -11,25 +11,21 @@ macro_rules! DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS { const CLUSTER_TYPE: ClusterType = ClusterType::$y; #[test] - #[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_bank_forks_status_cache_snapshot_n() { run_test_bank_forks_status_cache_snapshot_n(SNAPSHOT_VERSION, CLUSTER_TYPE) } #[test] - #[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_bank_forks_snapshot_n() { run_test_bank_forks_snapshot_n(SNAPSHOT_VERSION, CLUSTER_TYPE) } #[test] - #[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_concurrent_snapshot_packaging() { run_test_concurrent_snapshot_packaging(SNAPSHOT_VERSION, CLUSTER_TYPE) } #[test] - #[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_slots_to_snapshot() { run_test_slots_to_snapshot(SNAPSHOT_VERSION, CLUSTER_TYPE) } @@ -51,6 +47,7 @@ mod tests { use solana_runtime::{ accounts_background_service::{AbsRequestSender, SnapshotRequestHandler}, accounts_db, + accounts_index::AccountSecondaryIndexes, bank::{Bank, BankSlotDelta}, bank_forks::{ArchiveFormat, BankForks, SnapshotConfig}, genesis_utils::{create_genesis_config, GenesisConfigInfo}, @@ -69,7 +66,7 @@ mod tests { use std::{ collections::HashSet, fs, - path::{Path, PathBuf}, + path::PathBuf, sync::{ atomic::{AtomicBool, Ordering}, mpsc::channel, @@ -79,18 +76,12 @@ mod tests { }; use tempfile::TempDir; - DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_3_0, Development, V1_3_0_Development); - DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_3_0, Devnet, V1_3_0_Devnet); - DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_3_0, Testnet, V1_3_0_Testnet); - DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_3_0, MainnetBeta, V1_3_0_MainnetBeta); + DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, Development, V1_2_0_Development); + DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, Devnet, V1_2_0_Devnet); + DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, Testnet, V1_2_0_Testnet); + DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_2_0, MainnetBeta, V1_2_0_MainnetBeta); - DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_4_0, Development, V1_4_0_Development); - DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_4_0, Devnet, V1_4_0_Devnet); - DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_4_0, Testnet, V1_4_0_Testnet); - DEFINE_SNAPSHOT_VERSION_PARAMETERIZED_TEST_FUNCTIONS!(V1_4_0, MainnetBeta, V1_4_0_MainnetBeta); struct SnapshotTestConfig { - _evm_state_dir: TempDir, - _evm_ledger_path: TempDir, accounts_dir: TempDir, snapshot_dir: TempDir, _snapshot_output_path: TempDir, @@ -105,40 +96,18 @@ mod tests { cluster_type: ClusterType, snapshot_interval_slots: u64, ) -> SnapshotTestConfig { - let evm_state_dir = TempDir::new().unwrap(); - let evm_state_json = TempDir::new().unwrap(); - let evm_ledger_path = TempDir::new().unwrap(); let accounts_dir = TempDir::new().unwrap(); let snapshot_dir = TempDir::new().unwrap(); let snapshot_output_path = TempDir::new().unwrap(); let mut genesis_config_info = create_genesis_config(10_000); - let evm_state_json_file = evm_state_json - .path() - .join(solana_sdk::genesis_config::EVM_GENESIS); - let root = solana_sdk::genesis_config::evm_genesis::generate_evm_state_json( - &evm_state_json_file, - ) - .unwrap(); - genesis_config_info.genesis_config.evm_root_hash = root; - genesis_config_info - .genesis_config - .generate_evm_state(evm_ledger_path.path(), Some(&evm_state_json_file)) - .unwrap(); genesis_config_info.genesis_config.cluster_type = cluster_type; let bank0 = Bank::new_with_paths( &genesis_config_info.genesis_config, - Some(( - evm_state_dir.as_ref(), - evm_ledger_path - .path() - .join(solana_sdk::genesis_config::EVM_GENESIS) - .as_ref(), - )), vec![accounts_dir.path().to_path_buf()], &[], None, None, - HashSet::new(), + AccountSecondaryIndexes::default(), false, ); bank0.freeze(); @@ -154,8 +123,6 @@ mod tests { }; bank_forks.set_snapshot_config(Some(snapshot_config.clone())); SnapshotTestConfig { - _evm_state_dir: evm_state_dir, - _evm_ledger_path: evm_ledger_path, accounts_dir, snapshot_dir, _snapshot_output_path: snapshot_output_path, @@ -170,7 +137,6 @@ mod tests { old_bank_forks: &BankForks, old_last_slot: Slot, old_genesis_config: &GenesisConfig, - evm_state_path: &Path, account_paths: &[PathBuf], ) { let (snapshot_path, snapshot_package_output_path) = old_bank_forks @@ -182,7 +148,6 @@ mod tests { let old_last_bank = old_bank_forks.get(old_last_slot).unwrap(); let deserialized_bank = snapshot_utils::bank_from_archive( - evm_state_path, &account_paths, &[], &old_bank_forks @@ -199,7 +164,7 @@ mod tests { old_genesis_config, None, None, - HashSet::new(), + AccountSecondaryIndexes::default(), false, ) .unwrap(); @@ -286,16 +251,9 @@ mod tests { snapshot_utils::archive_snapshot_package(&snapshot_package).unwrap(); // Restore bank from snapshot - let evm_state_path = TempDir::new().unwrap(); let account_paths = &[snapshot_test_config.accounts_dir.path().to_path_buf()]; let genesis_config = &snapshot_test_config.genesis_config_info.genesis_config; - restore_from_snapshot( - bank_forks, - last_slot, - genesis_config, - evm_state_path.path(), - account_paths, - ); + restore_from_snapshot(bank_forks, last_slot, genesis_config, account_paths); } fn run_test_bank_forks_snapshot_n( diff --git a/crate-features/Cargo.toml b/crate-features/Cargo.toml index 04d73caf83..a41b091c51 100644 --- a/crate-features/Cargo.toml +++ b/crate-features/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-crate-features" -version = "1.5.19" +version = "1.6.14" description = "Solana Crate Features" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -19,9 +19,9 @@ lazy_static = { version = "1.4.0", features = ["spin", "spin_no_std"] } libc = { version = "0.2.62", features = ["extra_traits"] } rand_chacha = { version = "0.2.2" } regex-syntax = { version = "0.6.12" } -reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] } +reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] } serde = { version = "1.0.100", features = ["rc"] } -ed25519-dalek = { version = "=1.0.0-pre.4", features = ["serde"] } +ed25519-dalek = { version = "=1.0.1", features = ["serde"] } syn_0_15 = { package = "syn", version = "0.15.42", features = ["extra-traits", "fold", "full"] } syn_1_0 = { package = "syn", version = "1.0.3", features = ["extra-traits", "fold", "full"] } tokio = { version = "0.1.22",features=["bytes", "codec", "default", "fs", "io", "mio", "num_cpus", "reactor", "rt-full", "sync", "tcp", "timer", "tokio-codec", "tokio-current-thread", "tokio-executor", "tokio-io", "tokio-io", "tokio-reactor", "tokio-tcp", "tokio-tcp", "tokio-threadpool", "tokio-timer", "tokio-udp", "tokio-uds", "udp", "uds"] } diff --git a/dos/Cargo.toml b/dos/Cargo.toml index d85a82328f..7fd3a04b1e 100644 --- a/dos/Cargo.toml +++ b/dos/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-dos" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,16 +13,16 @@ bincode = "1.3.1" clap = "2.33.1" log = "0.4.11" rand = "0.7.0" -rayon = "1.4.1" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-core = { path = "../core", version = "=1.5.19" } -solana-ledger = { path = "../ledger", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-net-utils = { path = "../net-utils", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-version = { path = "../version" } -solana-client = { path = "../client", version = "=1.5.19" } +rayon = "1.5.0" +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-core = { path = "../core", version = "=1.6.14" } +solana-ledger = { path = "../ledger", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-net-utils = { path = "../net-utils", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/dos/src/main.rs b/dos/src/main.rs index e2efce12d9..9d5c78b519 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -10,7 +10,7 @@ use solana_sdk::pubkey::Pubkey; use std::net::{SocketAddr, UdpSocket}; use std::process::exit; use std::str::FromStr; -use std::time::Instant; +use std::time::{Duration, Instant}; fn run_dos( nodes: &[ContactInfo], @@ -218,14 +218,14 @@ fn main() { if !skip_gossip { info!("Finding cluster entry: {:?}", entrypoint_addr); let (gossip_nodes, _validators) = discover( - None, + None, // keypair Some(&entrypoint_addr), - None, - Some(60), - None, - Some(&entrypoint_addr), - None, - 0, + None, // num_nodes + Duration::from_secs(60), // timeout + None, // find_node_by_pubkey + Some(&entrypoint_addr), // find_node_by_gossip_addr + None, // my_gossip_addr + 0, // my_shred_version ) .unwrap_or_else(|err| { eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err); diff --git a/download-utils/Cargo.toml b/download-utils/Cargo.toml index f7c548b667..c438fde6eb 100644 --- a/download-utils/Cargo.toml +++ b/download-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-download-utils" -version = "1.5.19" +version = "1.6.14" description = "Solana Download Utils" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,9 +14,9 @@ bzip2 = "0.3.3" console = "0.11.3" indicatif = "0.15.0" log = "0.4.11" -reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } +reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } tar = "0.4.28" [lib] diff --git a/evm-utils/evm-bridge/Cargo.toml b/evm-utils/evm-bridge/Cargo.toml index 872691c205..94382fba7c 100644 --- a/evm-utils/evm-bridge/Cargo.toml +++ b/evm-utils/evm-bridge/Cargo.toml @@ -4,8 +4,6 @@ version = "0.1.0" authors = ["Vladimir Motylenko "] edition = "2018" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [dependencies] evm-rpc = { path = "../evm-rpc" } solana-sdk = { path = "../../sdk", version = "1.4.0" } @@ -28,12 +26,13 @@ primitive-types = "0.8.0" secp256k1 = { version = "0.19.0", features = ["recovery", "global-context"] } evm-state = { path = "../evm-state" } log = "0.4.11" -jsonrpc-core = "15.0.0" -jsonrpc-core-client = { version = "15.0.0", features = ["ws"] } -jsonrpc-derive = "15.0.0" -jsonrpc-http-server = "15.0.0" -jsonrpc-pubsub = "15.0.0" -jsonrpc-ws-server = "15.0.0" +jsonrpc-core = "17.1.0" +jsonrpc-core-client = { version = "17.1.0", features = ["ws"] } +jsonrpc-derive = "17.1.0" +jsonrpc-http-server = "17.1.0" +jsonrpc-pubsub = "17.0.0" +jsonrpc-ws-server = "17.0.0" num_cpus = "1.13.0" snafu = "0.6" anyhow = "1.0" +futures-util = "0.3.15" diff --git a/evm-utils/evm-bridge/src/main.rs b/evm-utils/evm-bridge/src/main.rs index c4831efb1d..2d01dd9a8d 100644 --- a/evm-utils/evm-bridge/src/main.rs +++ b/evm-utils/evm-bridge/src/main.rs @@ -1,5 +1,6 @@ use log::*; +use std::future::Future; use std::str::FromStr; use std::sync::Arc; use std::thread::sleep; @@ -17,11 +18,13 @@ use evm_rpc::*; use evm_state::*; use sha3::{Digest, Keccak256}; +use futures_util::future::Either; use jsonrpc_core::Result; use serde_json::json; use snafu::ResultExt; use solana_account_decoder::{parse_token::UiTokenAmount, UiAccount}; +use solana_core::rpc::{self, OptionalContext}; use solana_evm_loader_program::{scope::*, tx_chunks::TxChunks}; use solana_sdk::{ clock::DEFAULT_TICKS_PER_SECOND, commitment_config::CommitmentLevel, @@ -53,8 +56,6 @@ use solana_client::{ rpc_response::*, }; -use solana_core::rpc::RpcSol; - use std::result::Result as StdResult; type EvmResult = StdResult; type FutureEvmResult = EvmResult; @@ -795,6 +796,7 @@ fn from_client_error(client_error: ClientError) -> evm_rpc::Error { InstructionError::InvalidArgument, )), logs: Some(logs), + .. }, ) if !logs.is_empty() => { let last_log = logs.last().unwrap(); @@ -840,7 +842,81 @@ macro_rules! proxy_sol_rpc { } pub struct RpcSolProxy; -impl RpcSol for RpcSolProxy { + +impl rpc::rpc_minimal::Minimal for RpcSolProxy { + type Metadata = Arc; + + fn get_balance( + &self, + meta: Self::Metadata, + pubkey_str: String, + commitment: Option, + ) -> Result> { + proxy_sol_rpc!(meta.rpc_client, GetBalance, pubkey_str, commitment) + } + + fn get_epoch_info( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result { + proxy_sol_rpc!(meta.rpc_client, GetEpochInfo, commitment) + } + + fn get_health(&self, meta: Self::Metadata) -> Result { + proxy_sol_rpc!(meta.rpc_client, GetHealth) + } + + fn get_identity(&self, meta: Self::Metadata) -> Result { + proxy_sol_rpc!(meta.rpc_client, GetMinimumBalanceForRentExemption) + } + + fn get_slot(&self, meta: Self::Metadata, commitment: Option) -> Result { + proxy_sol_rpc!(meta.rpc_client, GetSlot, commitment) + } + + fn get_block_height( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result { + proxy_sol_rpc!(meta.rpc_client, GetBlockHeight, commitment) + } + + fn get_snapshot_slot(&self, meta: Self::Metadata) -> Result { + proxy_sol_rpc!(meta.rpc_client, GetSnapshotSlot) + } + fn get_transaction_count( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result { + proxy_sol_rpc!(meta.rpc_client, GetTransactionCount, commitment) + } + + fn get_version(&self, meta: Self::Metadata) -> Result { + proxy_sol_rpc!(meta.rpc_client, GetVersion) + } + + fn get_vote_accounts( + &self, + meta: Self::Metadata, + commitment: Option, + ) -> Result { + proxy_sol_rpc!(meta.rpc_client, GetVoteAccounts, commitment) + } + + fn get_leader_schedule( + &self, + meta: Self::Metadata, + options: Option, + config: Option, + ) -> Result> { + proxy_sol_rpc!(meta.rpc_client, GetLeaderSchedule, options, config) + } +} + +impl rpc::rpc_full::Full for RpcSolProxy { type Metadata = Arc; fn confirm_transaction( @@ -889,7 +965,7 @@ impl RpcSol for RpcSolProxy { meta: Self::Metadata, program_id_str: String, config: Option, - ) -> Result> { + ) -> Result>> { proxy_sol_rpc!(meta.rpc_client, GetProgramAccounts, program_id_str, config) } @@ -909,27 +985,10 @@ impl RpcSol for RpcSolProxy { proxy_sol_rpc!(meta.rpc_client, GetEpochSchedule) } - fn get_balance( - &self, - meta: Self::Metadata, - pubkey_str: String, - commitment: Option, - ) -> Result> { - proxy_sol_rpc!(meta.rpc_client, GetBalance, pubkey_str, commitment) - } - fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result> { proxy_sol_rpc!(meta.rpc_client, GetClusterNodes) } - fn get_epoch_info( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result { - proxy_sol_rpc!(meta.rpc_client, GetEpochInfo, commitment) - } - fn get_block_commitment( &self, meta: Self::Metadata, @@ -942,15 +1001,6 @@ impl RpcSol for RpcSolProxy { proxy_sol_rpc!(meta.rpc_client, GetGenesisHash) } - fn get_leader_schedule( - &self, - meta: Self::Metadata, - slot: Option, - commitment: Option, - ) -> Result> { - proxy_sol_rpc!(meta.rpc_client, GetLeaderSchedule, slot, commitment) - } - fn get_recent_blockhash( &self, meta: Self::Metadata, @@ -988,33 +1038,19 @@ impl RpcSol for RpcSolProxy { proxy_sol_rpc!(meta.rpc_client, GetFeeRateGovernor) } - fn get_signature_confirmation( - &self, - meta: Self::Metadata, - signature_str: String, - commitment: Option, - ) -> Result> { - proxy_sol_rpc!( - meta.rpc_client, - GetSignatureConfirmation, - signature_str, - commitment - ) - } - - fn get_signature_status( - &self, - meta: Self::Metadata, - signature_str: String, - commitment: Option, - ) -> Result>> { - proxy_sol_rpc!( - meta.rpc_client, - GetSignatureStatus, - signature_str, - commitment - ) - } + // fn get_signature_confirmation( + // &self, + // meta: Self::Metadata, + // signature_str: String, + // commitment: Option, + // ) -> Result> { + // proxy_sol_rpc!( + // meta.rpc_client, + // GetSignatureConfirmation, + // signature_str, + // commitment + // ) + // } fn get_signature_statuses( &self, @@ -1030,18 +1066,6 @@ impl RpcSol for RpcSolProxy { ) } - fn get_slot(&self, meta: Self::Metadata, commitment: Option) -> Result { - proxy_sol_rpc!(meta.rpc_client, GetSlot, commitment) - } - - fn get_transaction_count( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result { - proxy_sol_rpc!(meta.rpc_client, GetTransactionCount, commitment) - } - fn get_total_supply( &self, meta: Self::Metadata, @@ -1121,30 +1145,6 @@ impl RpcSol for RpcSolProxy { proxy_sol_rpc!(meta.rpc_client, MinimumLedgerSlot) } - fn get_vote_accounts( - &self, - meta: Self::Metadata, - commitment: Option, - ) -> Result { - proxy_sol_rpc!(meta.rpc_client, GetVoteAccounts, commitment) - } - - fn validator_exit(&self, meta: Self::Metadata) -> Result { - proxy_sol_rpc!(meta.rpc_client, ValidatorExit) - } - - fn get_identity(&self, meta: Self::Metadata) -> Result { - proxy_sol_rpc!(meta.rpc_client, GetMinimumBalanceForRentExemption) - } - - fn get_version(&self, meta: Self::Metadata) -> Result { - proxy_sol_rpc!(meta.rpc_client, GetVersion) - } - - fn set_log_filter(&self, meta: Self::Metadata, filter: String) -> Result<()> { - proxy_sol_rpc!(meta.rpc_client, SetLogFilter, filter) - } - fn get_confirmed_block( &self, meta: Self::Metadata, @@ -1231,6 +1231,14 @@ impl RpcSol for RpcSolProxy { proxy_sol_rpc!(meta.rpc_client, GetStakeActivation, pubkey_str, config) } + fn get_block_production( + &self, + meta: Self::Metadata, + config: Option, + ) -> Result> { + proxy_sol_rpc!(meta.rpc_client, GetBlockProduction, config) + } + fn get_token_account_balance( &self, meta: Self::Metadata, @@ -1353,14 +1361,6 @@ impl RpcSol for RpcSolProxy { proxy_sol_rpc!(meta.rpc_client, GetSlotLeaders, start_slot, end_slot) } - fn get_health(&self, meta: Self::Metadata) -> Result { - proxy_sol_rpc!(meta.rpc_client, GetHealth) - } - - fn get_snapshot_slot(&self, meta: Self::Metadata) -> Result { - proxy_sol_rpc!(meta.rpc_client, GetSnapshotSlot) - } - fn get_max_retransmit_slot(&self, meta: Self::Metadata) -> Result { proxy_sol_rpc!(meta.rpc_client, GetMaxRetransmitSlot) } @@ -1396,18 +1396,13 @@ struct LoggingMiddleware; impl Middleware for LoggingMiddleware { type Future = NoopFuture; type CallFuture = NoopCallFuture; - fn on_call( - &self, - call: Call, - meta: M, - next: F, - ) -> futures::future::Either + fn on_call(&self, call: Call, meta: M, next: F) -> Either where F: Fn(Call, M) -> X + Send + Sync, - X: futures::Future> + Send + 'static, + X: Future> + Send + 'static, { debug!(target: "jsonrpc_core", "On Request = {:?}", call); - futures::future::Either::B(next(call, meta)) + Either::Right(next(call, meta)) } } @@ -1430,8 +1425,11 @@ fn main(args: Args) -> std::result::Result<(), Box> { let meta = Arc::new(meta); let mut io = MetaIoHandler::with_middleware(LoggingMiddleware); - let sol_rpc = RpcSolProxy; - io.extend_with(sol_rpc.to_delegate()); + { + use rpc::rpc_full::Full; + let sol_rpc = RpcSolProxy; + io.extend_with(sol_rpc.to_delegate()); + } let ether_bridge = BridgeErpcImpl; io.extend_with(ether_bridge.to_delegate()); let ether_basic = BasicErpcProxy; diff --git a/evm-utils/evm-rpc/Cargo.toml b/evm-utils/evm-rpc/Cargo.toml index 88c7fdbecc..19e3b4a707 100644 --- a/evm-utils/evm-rpc/Cargo.toml +++ b/evm-utils/evm-rpc/Cargo.toml @@ -7,11 +7,11 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -jsonrpc-core = "15.0.0" -jsonrpc-core-client = { version = "15.0.0", features = ["ws"] } -jsonrpc-derive = "15.0.0" -jsonrpc-http-server = "15.0.0" -jsonrpc-pubsub = "15.0.0" +jsonrpc-core = "17.1.0" +jsonrpc-core-client = { version = "17.1.0", features = ["ws"] } +jsonrpc-derive = "17.1.0" +jsonrpc-http-server = "17.1.0" +jsonrpc-pubsub = "17.1.0" serde = "1.0.112" serde_json = "1.0.54" primitive-types = "0.8.0" diff --git a/evm-utils/evm-state/Cargo.toml b/evm-utils/evm-state/Cargo.toml index bd534b4f01..e106bce3d5 100644 --- a/evm-utils/evm-state/Cargo.toml +++ b/evm-utils/evm-state/Cargo.toml @@ -9,7 +9,7 @@ evm = { git = "https://github.com/velas/evm", branch = "evm-estimate-dontrecord- secp256k1 = { version = "0.19.0", features = ["recovery", "global-context"] } # force rand version, because 0.6.5 break compatibility with secp256k1 rand2 = { version = "=0.6.1", package = "rand" } -rocksdb = { git = "https://github.com/rust-rocksdb/rust-rocksdb", rev = "39b877b", default-features = false } +rocksdb = { version = "0.16.0", default-features = false } triedb = { git = "https://github.com/velas/triedb", branch = "chore/bump-rocksdb", features = ["rocksdb"] } primitive-types = "0.8.0" diff --git a/evm-utils/evm-state/src/storage.rs b/evm-utils/evm-state/src/storage.rs index 434e88abe5..da7de0e17d 100644 --- a/evm-utils/evm-state/src/storage.rs +++ b/evm-utils/evm-state/src/storage.rs @@ -228,7 +228,7 @@ impl Storage { let key_bytes = rlp::encode(&key); self.db - .get_pinned_cf(&cf, key_bytes) + .get_pinned_cf(cf, key_bytes) .expect("Error on reading mapped column") .map(|slice| { CODER @@ -242,7 +242,7 @@ impl Storage { let key_bytes = rlp::encode(&key); let value_bytes = CODER.serialize(&value).expect("Unable to serialize value"); self.db - .put_cf(&cf, key_bytes, value_bytes) + .put_cf(cf, key_bytes, value_bytes) .expect("Error when put value into database"); } diff --git a/evm-utils/evm-state/src/transactions.rs b/evm-utils/evm-state/src/transactions.rs index eef17742d8..40d5c24f4d 100644 --- a/evm-utils/evm-state/src/transactions.rs +++ b/evm-utils/evm-state/src/transactions.rs @@ -539,7 +539,7 @@ mod test { test_vector("f864808504a817c800825208943535353535353535353535353535353535353535808025a0044852b2a670ade5407e78fb2863c51de9fcb96542a07186fe3aeda6bb8a116da0044852b2a670ade5407e78fb2863c51de9fcb96542a07186fe3aeda6bb8a116d", "0xb1e2188bc490908a78184e4818dca53684167507417fdb4c09c2d64d32a9896a"); test_vector("f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10", "0x588df025c4c2d757d3e314bd3dfbfe352687324e6b8557ad1731585e96928aed"); } - + // #[test] fn test_of_generic_tx() { diff --git a/evm-utils/programs/evm_loader/src/account_structure.rs b/evm-utils/programs/evm_loader/src/account_structure.rs index 2bd33584d1..bf232feb46 100644 --- a/evm-utils/programs/evm_loader/src/account_structure.rs +++ b/evm-utils/programs/evm_loader/src/account_structure.rs @@ -1,6 +1,6 @@ use solana_sdk::{keyed_account::KeyedAccount, pubkey::Pubkey}; -/// Helper structure that wrap all solana accounts, that is needed for evm loader. +/// Helper structure that wrap all solana accounts, that is needed for evm loader.l47 /// It will restrict and provide access to needed solana accounts in: /// 1. Instruction handlers (ExecuteTx, SwapToEvm, FreeOwnership) - full access to evm state. /// 2. Builtin contracts (SwapToNative) - Full access to evm state. @@ -13,7 +13,6 @@ pub struct AccountStructure<'a> { pub users: &'a [KeyedAccount<'a>], } - impl<'a> AccountStructure<'a> { /// Create new account structure, from keyed accounts. pub fn new(evm: &'a KeyedAccount<'a>, users: &'a [KeyedAccount<'a>]) -> AccountStructure<'a> { @@ -36,7 +35,7 @@ impl<'a> AccountStructure<'a> { where F: for<'r> Fn(AccountStructure<'r>) -> U, { - use solana_sdk::account::Account; + use solana_sdk::account::AccountSharedData; use std::cell::RefCell; let evm_key = Pubkey::new_unique(); @@ -45,7 +44,7 @@ impl<'a> AccountStructure<'a> { let keys: Vec<_> = std::iter::repeat_with(|| { let user_key = Pubkey::new_unique(); - let user_account = RefCell::new(Account { + let user_account = RefCell::new(AccountSharedData { lamports: 1000, data: vec![], owner: crate::ID, diff --git a/evm-utils/programs/evm_loader/src/lib.rs b/evm-utils/programs/evm_loader/src/lib.rs index 021ec082d5..6260d0a294 100644 --- a/evm-utils/programs/evm_loader/src/lib.rs +++ b/evm-utils/programs/evm_loader/src/lib.rs @@ -175,8 +175,8 @@ pub fn transfer_native_to_eth_ixs( } /// Create an account that represent evm locked lamports count. -pub fn create_state_account(lamports: u64) -> solana_sdk::account::Account { - solana_sdk::account::Account { +pub fn create_state_account(lamports: u64) -> solana_sdk::account::AccountSharedData { + solana_sdk::account::AccountSharedData { lamports: lamports + 1, owner: crate::ID, data: b"Evm state".to_vec(), diff --git a/evm-utils/programs/evm_loader/src/processor.rs b/evm-utils/programs/evm_loader/src/processor.rs index aab5e1c10a..e9db6439ff 100644 --- a/evm-utils/programs/evm_loader/src/processor.rs +++ b/evm-utils/programs/evm_loader/src/processor.rs @@ -586,7 +586,7 @@ mod test { let mut executor = Some(&mut executor); let processor = EvmProcessor::default(); let user_id = Pubkey::new_unique(); - let first_user_account = RefCell::new(solana_sdk::account::Account { + let first_user_account = RefCell::new(solana_sdk::account::AccountSharedData { lamports: 0, data: vec![], owner: crate::ID, @@ -885,7 +885,7 @@ mod test { let mut executor_orig = evm_state::Executor::testing(); let mut executor = Some(&mut executor_orig); let processor = EvmProcessor::default(); - let user_account = RefCell::new(solana_sdk::account::Account { + let user_account = RefCell::new(solana_sdk::account::AccountSharedData { lamports: 1000, data: vec![], owner: crate::ID, @@ -952,7 +952,7 @@ mod test { let mut executor_orig = evm_state::Executor::testing(); let mut executor = Some(&mut executor_orig); let processor = EvmProcessor::default(); - let first_user_account = RefCell::new(solana_sdk::account::Account { + let first_user_account = RefCell::new(solana_sdk::account::AccountSharedData { lamports: 1000, data: vec![], owner: crate::ID, @@ -1005,7 +1005,7 @@ mod test { // Transfer back - let second_user_account = RefCell::new(solana_sdk::account::Account { + let second_user_account = RefCell::new(solana_sdk::account::AccountSharedData { lamports: 0, data: vec![], owner: crate::ID, @@ -1079,7 +1079,7 @@ mod test { let mut executor_orig = evm_state::Executor::testing(); let mut executor = Some(&mut executor_orig); let processor = EvmProcessor::default(); - let first_user_account = RefCell::new(solana_sdk::account::Account { + let first_user_account = RefCell::new(solana_sdk::account::AccountSharedData { lamports: 1000, data: vec![], owner: crate::ID, @@ -1135,7 +1135,7 @@ mod test { // Transfer back - let second_user_account = RefCell::new(solana_sdk::account::Account { + let second_user_account = RefCell::new(solana_sdk::account::AccountSharedData { lamports: 0, data: vec![], owner: crate::ID, @@ -1213,7 +1213,7 @@ mod test { let mut executor_orig = evm_state::Executor::testing(); let mut executor = Some(&mut executor_orig); let processor = EvmProcessor::default(); - let first_user_account = RefCell::new(solana_sdk::account::Account { + let first_user_account = RefCell::new(solana_sdk::account::AccountSharedData { lamports: 1000, data: vec![], owner: crate::ID, @@ -1269,7 +1269,7 @@ mod test { // Transfer back - let second_user_account = RefCell::new(solana_sdk::account::Account { + let second_user_account = RefCell::new(solana_sdk::account::AccountSharedData { lamports: 0, data: vec![], owner: crate::ID, @@ -1346,19 +1346,19 @@ mod test { ] } - fn account_by_key(pubkey: solana::Address) -> solana_sdk::account::Account { + fn account_by_key(pubkey: solana::Address) -> solana_sdk::account::AccountSharedData { match &pubkey { id if id == &crate::ID => { - native_loader::create_loadable_account_for_test("Evm Processor") + native_loader::create_loadable_account_for_test("EVM Processor") } - id if id == &solana_sdk::sysvar::rent::id() => solana_sdk::account::Account { + id if id == &solana_sdk::sysvar::rent::id() => solana_sdk::account::AccountSharedData { lamports: 10, owner: native_loader::id(), data: bincode::serialize(&Rent::default()).unwrap(), executable: false, rent_epoch: 0, }, - _rest => solana_sdk::account::Account { + _rest => solana_sdk::account::AccountSharedData { lamports: 20000000, owner: crate::ID, // EVM should only operate with accounts that it owns. data: vec![0u8], @@ -1473,7 +1473,7 @@ mod test { let processor = EvmProcessor::default(); - let first_user_account = RefCell::new(solana_sdk::account::Account { + let first_user_account = RefCell::new(solana_sdk::account::AccountSharedData { lamports: 1000, data: vec![], owner: crate::ID, @@ -1554,7 +1554,7 @@ mod test { let evm_account = RefCell::new(crate::create_state_account(0)); let evm_keyed_account = KeyedAccount::new(&solana::evm_state::ID, false, &evm_account); - let user_account = RefCell::new(solana_sdk::account::Account { + let user_account = RefCell::new(solana_sdk::account::AccountSharedData { lamports: 1000, data: vec![0; evm_state::MAX_TX_LEN as usize], owner: crate::ID, @@ -1608,7 +1608,7 @@ mod test { let batch_size: u64 = 500; - let user_account = RefCell::new(solana_sdk::account::Account { + let user_account = RefCell::new(solana_sdk::account::AccountSharedData { lamports: 1000, data: vec![0; batch_size as usize], owner: crate::ID, @@ -1711,7 +1711,7 @@ mod test { let evm_account = RefCell::new(crate::create_state_account(0)); let evm_keyed_account = KeyedAccount::new(&solana::evm_state::ID, false, &evm_account); - let user_account = RefCell::new(solana_sdk::account::Account { + let user_account = RefCell::new(solana_sdk::account::AccountSharedData { lamports: 1000, data: vec![], owner: crate::ID, diff --git a/faucet/Cargo.toml b/faucet/Cargo.toml index 789300b736..5ae568c290 100644 --- a/faucet/Cargo.toml +++ b/faucet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-faucet" -version = "1.5.19" +version = "1.6.14" description = "Solana Faucet" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,17 +14,17 @@ bincode = "1.3.1" byteorder = "1.3.4" clap = "2.33" log = "0.4.11" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-cli-config = { path = "../cli-config", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-version = { path = "../version" } -spl-memo = { version = "=3.0.0", features = ["no-entrypoint"] } -tokio = { version = "0.3.5", features = ["full"] } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-cli-config = { path = "../cli-config", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } +spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } thiserror = "1.0" +tokio = { version = "1", features = ["full"] } [lib] crate-type = ["lib"] diff --git a/faucet/src/bin/faucet.rs b/faucet/src/bin/faucet.rs index c164e2c9b8..340bcdd974 100644 --- a/faucet/src/bin/faucet.rs +++ b/faucet/src/bin/faucet.rs @@ -1,5 +1,5 @@ use { - clap::{crate_description, crate_name, App, Arg}, + clap::{crate_description, crate_name, values_t, App, Arg}, log::*, solana_clap_utils::input_parsers::{lamports_of_sol, value_of}, solana_faucet::{ @@ -8,7 +8,8 @@ use { }, solana_sdk::signature::read_keypair_file, std::{ - net::{Ipv4Addr, SocketAddr}, + collections::HashSet, + net::{IpAddr, Ipv4Addr, SocketAddr}, sync::{Arc, Mutex}, thread, }, @@ -55,6 +56,17 @@ async fn main() { .takes_value(true) .help("Request limit for a single request, in VLX"), ) + .arg( + Arg::with_name("allowed_ip") + .long("allow-ip") + .value_name("IP_ADDRESS") + .takes_value(true) + .multiple(true) + .help( + "Allow requests from a particular IP address without request limit; \ + recipient address will be used to check request limits instead", + ), + ) .get_matches(); let faucet_keypair = read_keypair_file(matches.value_of("keypair").unwrap()) @@ -64,13 +76,19 @@ async fn main() { let per_time_cap = lamports_of_sol(&matches, "per_time_cap"); let per_request_cap = lamports_of_sol(&matches, "per_request_cap"); + let allowed_ips: HashSet<_> = values_t!(matches.values_of("allowed_ip"), IpAddr) + .unwrap_or_default() + .into_iter() + .collect(); + let faucet_addr = socketaddr!(0, FAUCET_PORT); - let faucet = Arc::new(Mutex::new(Faucet::new( + let faucet = Arc::new(Mutex::new(Faucet::new_with_allowed_ips( faucet_keypair, time_slice, per_time_cap, per_request_cap, + allowed_ips, ))); let faucet1 = faucet.clone(); diff --git a/faucet/src/faucet.rs b/faucet/src/faucet.rs index 06d34ca216..92954448d8 100644 --- a/faucet/src/faucet.rs +++ b/faucet/src/faucet.rs @@ -22,7 +22,7 @@ use { transaction::Transaction, }, std::{ - collections::HashMap, + collections::{HashMap, HashSet}, io::{Read, Write}, net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream}, sync::{mpsc::Sender, Arc, Mutex}, @@ -107,6 +107,7 @@ pub struct Faucet { pub time_slice: Duration, per_time_cap: Option, per_request_cap: Option, + allowed_ips: HashSet, } impl Faucet { @@ -115,7 +116,23 @@ impl Faucet { time_input: Option, per_time_cap: Option, per_request_cap: Option, - ) -> Faucet { + ) -> Self { + Self::new_with_allowed_ips( + faucet_keypair, + time_input, + per_time_cap, + per_request_cap, + HashSet::new(), + ) + } + + pub fn new_with_allowed_ips( + faucet_keypair: Keypair, + time_input: Option, + per_time_cap: Option, + per_request_cap: Option, + allowed_ips: HashSet, + ) -> Self { let time_slice = Duration::new(time_input.unwrap_or(TIME_SLICE), 0); if let Some((per_request_cap, per_time_cap)) = per_request_cap.zip(per_time_cap) { if per_time_cap < per_request_cap { @@ -127,13 +144,14 @@ impl Faucet { ); } } - Faucet { + Self { faucet_keypair, ip_cache: HashMap::new(), address_cache: HashMap::new(), time_slice, per_time_cap, per_request_cap, + allowed_ips, } } @@ -206,7 +224,7 @@ impl Faucet { ))); } } - if !ip.is_loopback() { + if !ip.is_loopback() && !self.allowed_ips.contains(&ip) { self.check_time_request_limit(lamports, ip)?; } self.check_time_request_limit(lamports, to)?; @@ -588,6 +606,25 @@ mod tests { let tx1 = faucet.build_airdrop_transaction(request1, ip); assert!(tx1.is_err()); + // Test multiple requests from allowed ip with different addresses succeed + let mint = Keypair::new(); + let ip = socketaddr!([203, 0, 113, 1], 0).ip(); + let mut allowed_ips = HashSet::new(); + allowed_ips.insert(ip); + faucet = Faucet::new_with_allowed_ips(mint, None, Some(2), None, allowed_ips); + let other = Pubkey::new_unique(); + let _tx0 = faucet.build_airdrop_transaction(request, ip).unwrap(); // first request succeeds + let request1 = FaucetRequest::GetAirdrop { + lamports: 2, + to: other, + blockhash, + }; + let _tx1 = faucet.build_airdrop_transaction(request1, ip).unwrap(); // first request succeeds + let tx0 = faucet.build_airdrop_transaction(request, ip); + assert!(tx0.is_err()); + let tx1 = faucet.build_airdrop_transaction(request1, ip); + assert!(tx1.is_err()); + // Test per-request cap let mint = Keypair::new(); let mint_pubkey = mint.pubkey(); diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index ddd399124d..8f33799ce4 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-frozen-abi" -version = "1.5.19" +version = "1.6.14" description = "Solana Frozen ABI" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,14 +13,14 @@ edition = "2018" bs58 = "0.3.1" bv = { version = "0.11.1", features = ["serde"] } log = "0.4.11" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" sha2 = "0.9.2" -solana-frozen-abi-macro = { path = "macro", version = "=1.5.19" } +solana-frozen-abi-macro = { path = "macro", version = "=1.6.14" } thiserror = "1.0" [target.'cfg(not(target_arch = "bpf"))'.dependencies] -solana-logger = { path = "../logger", version = "=1.5.19" } +solana-logger = { path = "../logger", version = "=1.6.14" } generic-array = { version = "0.14.3", default-features = false, features = ["serde", "more_lengths"]} memmap2 = "0.1.0" diff --git a/frozen-abi/macro/Cargo.toml b/frozen-abi/macro/Cargo.toml index 8029c47fe4..05be27963e 100644 --- a/frozen-abi/macro/Cargo.toml +++ b/frozen-abi/macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-frozen-abi-macro" -version = "1.5.19" +version = "1.6.14" description = "Solana Frozen ABI Macro" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/frozen-abi/macro/src/lib.rs b/frozen-abi/macro/src/lib.rs index 2298bc30fb..45305fe281 100644 --- a/frozen-abi/macro/src/lib.rs +++ b/frozen-abi/macro/src/lib.rs @@ -257,7 +257,7 @@ pub fn derive_abi_sample(item: TokenStream) -> TokenStream { fn do_derive_abi_enum_visitor(input: ItemEnum) -> TokenStream { let type_name = &input.ident; let mut serialized_variants = quote! {}; - let mut variant_count = 0; + let mut variant_count: u64 = 0; let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); for variant in &input.variants { // Don't digest a variant with serde(skip) @@ -265,10 +265,14 @@ fn do_derive_abi_enum_visitor(input: ItemEnum) -> TokenStream { continue; }; let sample_variant = quote_sample_variant(&type_name, &ty_generics, &variant); - variant_count += 1; + variant_count = if let Some(variant_count) = variant_count.checked_add(1) { + variant_count + } else { + break; + }; serialized_variants.extend(quote! { #sample_variant; - Serialize::serialize(&sample_variant, digester.create_enum_child())?; + Serialize::serialize(&sample_variant, digester.create_enum_child()?)?; }); } @@ -281,7 +285,7 @@ fn do_derive_abi_enum_visitor(input: ItemEnum) -> TokenStream { use ::solana_frozen_abi::abi_example::AbiExample; digester.update_with_string(format!("enum {} (variants = {})", enum_name, #variant_count)); #serialized_variants - Ok(digester.create_child()) + digester.create_child() } } }).into() diff --git a/frozen-abi/src/abi_digester.rs b/frozen-abi/src/abi_digester.rs index c233a29867..7831a70c94 100644 --- a/frozen-abi/src/abi_digester.rs +++ b/frozen-abi/src/abi_digester.rs @@ -27,6 +27,8 @@ pub enum DigestError { Node(Sstr, Box), #[error("leaf error")] Leaf(Sstr, Sstr, Box), + #[error("arithmetic overflow")] + ArithmeticOverflow, } impl SerdeError for DigestError { @@ -77,22 +79,30 @@ impl AbiDigester { } } - pub fn create_child(&self) -> Self { - Self { + pub fn create_child(&self) -> Result { + let depth = self + .depth + .checked_add(1) + .ok_or(DigestError::ArithmeticOverflow)?; + Ok(Self { data_types: self.data_types.clone(), - depth: self.depth + 1, + depth, for_enum: false, opaque_scope: self.opaque_scope.clone(), - } + }) } - pub fn create_enum_child(&self) -> Self { - Self { + pub fn create_enum_child(&self) -> Result { + let depth = self + .depth + .checked_add(1) + .ok_or(DigestError::ArithmeticOverflow)?; + Ok(Self { data_types: self.data_types.clone(), - depth: self.depth + 1, + depth, for_enum: true, opaque_scope: self.opaque_scope.clone(), - } + }) } pub fn digest_data(&mut self, value: &T) -> DigestResult { @@ -120,7 +130,12 @@ impl AbiDigester { }) .collect::>() .join(" "); - buf = format!("{:0width$}{}\n", "", buf, width = self.depth * INDENT_WIDTH); + buf = format!( + "{:0width$}{}\n", + "", + buf, + width = self.depth.saturating_mul(INDENT_WIDTH) + ); info!("updating with: {}", buf.trim_end()); (*self.data_types.borrow_mut()).push(buf); } @@ -141,7 +156,7 @@ impl AbiDigester { fn digest_element(&mut self, v: &T) -> Result<(), DigestError> { self.update_with_type::("element"); - self.create_child().digest_data(v).map(|_| ()) + self.create_child()?.digest_data(v).map(|_| ()) } fn digest_named_field( @@ -150,7 +165,7 @@ impl AbiDigester { v: &T, ) -> Result<(), DigestError> { self.update_with_string(format!("field {}: {}", key, type_name::())); - self.create_child() + self.create_child()? .digest_data(v) .map(|_| ()) .map_err(|e| DigestError::wrap_by_str(e, key)) @@ -158,7 +173,7 @@ impl AbiDigester { fn digest_unnamed_field(&mut self, v: &T) -> Result<(), DigestError> { self.update_with_type::("field"); - self.create_child().digest_data(v).map(|_| ()) + self.create_child()?.digest_data(v).map(|_| ()) } #[allow(clippy::unnecessary_wraps)] @@ -293,12 +308,12 @@ impl Serializer for AbiDigester { { // emulate the ABI digest for the Option enum; see TestMyOption self.update(&["enum Option (variants = 2)"]); - let mut variant_digester = self.create_child(); + let mut variant_digester = self.create_child()?; variant_digester.update_with_string("variant(0) None (unit)".to_owned()); variant_digester .update_with_string(format!("variant(1) Some({}) (newtype)", type_name::())); - variant_digester.create_child().digest_data(v) + variant_digester.create_child()?.digest_data(v) } fn serialize_unit_struct(mut self, name: Sstr) -> DigestResult { @@ -317,7 +332,7 @@ impl Serializer for AbiDigester { T: ?Sized + Serialize, { self.update_with_string(format!("struct {}({}) (newtype)", name, type_name::())); - self.create_child() + self.create_child()? .digest_data(v) .map_err(|e| DigestError::wrap_by_str(e, "newtype_struct")) } @@ -339,7 +354,7 @@ impl Serializer for AbiDigester { variant, type_name::() )); - self.create_child() + self.create_child()? .digest_data(v) .map_err(|e| DigestError::wrap_by_str(e, "newtype_variant")) } @@ -351,17 +366,17 @@ impl Serializer for AbiDigester { "Exactly 1 seq element is needed to generate the ABI digest precisely" ); self.update_with_string(format!("seq (elements = {})", len)); - Ok(self.create_child()) + self.create_child() } fn serialize_tuple(mut self, len: usize) -> DigestResult { self.update_with_string(format!("tuple (elements = {})", len)); - Ok(self.create_child()) + self.create_child() } fn serialize_tuple_struct(mut self, name: Sstr, len: usize) -> DigestResult { self.update_with_string(format!("struct {} (fields = {}) (tuple)", name, len)); - Ok(self.create_child()) + self.create_child() } fn serialize_tuple_variant( @@ -373,7 +388,7 @@ impl Serializer for AbiDigester { ) -> DigestResult { self.check_for_enum("tuple_variant", variant)?; self.update_with_string(format!("variant({}) {} (fields = {})", i, variant, len)); - Ok(self.create_child()) + self.create_child() } fn serialize_map(mut self, len: Option) -> DigestResult { @@ -383,12 +398,12 @@ impl Serializer for AbiDigester { "Exactly 1 map entry is needed to generate the ABI digest precisely" ); self.update_with_string(format!("map (entries = {})", len)); - Ok(self.create_child()) + self.create_child() } fn serialize_struct(mut self, name: Sstr, len: usize) -> DigestResult { self.update_with_string(format!("struct {} (fields = {})", name, len)); - Ok(self.create_child()) + self.create_child() } fn serialize_struct_variant( @@ -403,7 +418,7 @@ impl Serializer for AbiDigester { "variant({}) struct {} (fields = {})", i, variant, len )); - Ok(self.create_child()) + self.create_child() } } @@ -464,12 +479,12 @@ impl SerializeMap for AbiDigester { fn serialize_key(&mut self, key: &T) -> Result<(), DigestError> { self.update_with_type::("key"); - self.create_child().digest_data(key).map(|_| ()) + self.create_child()?.digest_data(key).map(|_| ()) } fn serialize_value(&mut self, value: &T) -> Result<(), DigestError> { self.update_with_type::("value"); - self.create_child().digest_data(value).map(|_| ()) + self.create_child()?.digest_data(value).map(|_| ()) } fn end(self) -> DigestResult { diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index fdb858aa5c..ebb74e31ca 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -512,11 +512,11 @@ impl AbiEnumVisitor for Result { digester.update(&["enum Result (variants = 2)"]); let variant: Self = Result::Ok(O::example()); - variant.serialize(digester.create_enum_child())?; + variant.serialize(digester.create_enum_child()?)?; let variant: Self = Result::Err(E::example()); - variant.serialize(digester.create_enum_child())?; + variant.serialize(digester.create_enum_child()?)?; - Ok(digester.create_child()) + digester.create_child() } } diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index 651cc13144..9f248bc702 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-genesis" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,24 +13,26 @@ documentation = "https://docs.rs/solana-genesis" base64 = "0.12.3" clap = "2.33.1" chrono = "0.4" -serde = "1.0.118" +serde = "1.0.122" serde_json = "1.0.56" serde_yaml = "0.8.13" -solana-budget-program = { path = "../programs/budget", version = "=1.5.19" } -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-cli-config = { path = "../cli-config", version = "=1.5.19" } -solana-exchange-program = { path = "../programs/exchange", version = "=1.5.19" } -solana-ledger = { path = "../ledger", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } -solana-version = { path = "../version" } -solana-vest-program = { path = "../programs/vest", version = "=1.5.19" } -solana-vote-program = { path = "../programs/vote", version = "=1.5.19" } +solana-budget-program = { path = "../programs/budget", version = "=1.6.14" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-cli-config = { path = "../cli-config", version = "=1.6.14" } +solana-exchange-program = { path = "../programs/exchange", version = "=1.6.14" } +solana-ledger = { path = "../ledger", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } +solana-vest-program = { path = "../programs/vest", version = "=1.6.14" } +solana-vote-program = { path = "../programs/vote", version = "=1.6.14" } + solana-evm-loader-program = { path = "../evm-utils/programs/evm_loader" } evm-state = { path = "../evm-utils/evm-state" } evm-rpc = { path = "../evm-utils/evm-rpc" } + tempfile = "3.1.0" once_cell = "1.7.2" log = "0.4" diff --git a/genesis/src/main.rs b/genesis/src/main.rs index d38b32ef40..9287881490 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -13,7 +13,7 @@ use evm_state::U256; use log::{error, info}; use solana_clap_utils::{ input_parsers::{cluster_type_of, pubkey_of, pubkeys_of, unix_timestamp_from_rfc3339_datetime}, - input_validators::{is_pubkey_or_keypair, is_rfc3339_datetime, is_valid_percentage}, + input_validators::{is_pubkey_or_keypair, is_rfc3339_datetime, is_slot, is_valid_percentage}, }; use solana_genesis::Base64Account; use solana_ledger::{ @@ -21,7 +21,7 @@ use solana_ledger::{ }; use solana_runtime::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE; use solana_sdk::{ - account::Account, + account::{Account, AccountSharedData}, clock, epoch_schedule::EpochSchedule, fee_calculator::FeeRateGovernor, @@ -85,14 +85,16 @@ pub fn load_genesis_accounts(file: &str, genesis_config: &mut GenesisConfig) -> ) })?; - let mut account = Account::new(account_details.balance, 0, &owner_program_id); + let mut account = AccountSharedData::new(account_details.balance, 0, &owner_program_id); if account_details.data != "~" { - account.data = base64::decode(account_details.data.as_str()).map_err(|err| { - io::Error::new( - io::ErrorKind::Other, - format!("Invalid account data: {}: {:?}", account_details.data, err), - ) - })?; + account.set_data( + base64::decode(account_details.data.as_str()).map_err(|err| { + io::Error::new( + io::ErrorKind::Other, + format!("Invalid account data: {}: {:?}", account_details.data, err), + ) + })?, + ); } account.executable = account_details.executable; lamports += account.lamports; @@ -329,6 +331,7 @@ fn main() -> Result<(), Box> { Arg::with_name("slots_per_epoch") .long("slots-per-epoch") .value_name("SLOTS") + .validator(is_slot) .takes_value(true) .help("The number of slots in an epoch"), ) @@ -592,7 +595,7 @@ fn main() -> Result<(), Box> { genesis_config.add_account( *identity_pubkey, - Account::new(bootstrap_validator_lamports, 0, &system_program::id()), + AccountSharedData::new(bootstrap_validator_lamports, 0, &system_program::id()), ); let vote_account = vote_state::create_account_with_authorized( @@ -626,7 +629,7 @@ fn main() -> Result<(), Box> { if let Some(faucet_pubkey) = faucet_pubkey { genesis_config.add_account( faucet_pubkey, - Account::new(faucet_lamports, 0, &system_program::id()), + AccountSharedData::new(faucet_lamports, 0, &system_program::id()), ); } @@ -705,13 +708,13 @@ fn main() -> Result<(), Box> { }); genesis_config.add_account( address, - Account { + AccountSharedData::from(Account { lamports: genesis_config.rent.minimum_balance(program_data.len()), data: program_data, executable: true, owner: loader, rent_epoch: 0, - }, + }), ); } _ => unreachable!(), diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 835980a5b2..321e3b57cb 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "velas-gossip" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -11,13 +11,12 @@ documentation = "https://docs.rs/solana-gossip" [dependencies] clap = "2.33.1" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-core = { path = "../core", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-net-utils = { path = "../net-utils", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-version = { path = "../version" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-core = { path = "../core", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-net-utils = { path = "../net-utils", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/gossip/src/main.rs b/gossip/src/main.rs index 716334dd2d..7f98c475e9 100644 --- a/gossip/src/main.rs +++ b/gossip/src/main.rs @@ -8,7 +8,6 @@ use solana_clap_utils::{ input_parsers::keypair_of, input_validators::{is_keypair_or_ask_keyword, is_port, is_pubkey}, }; -use solana_client::rpc_client::RpcClient; use solana_core::{contact_info::ContactInfo, gossip_service::discover}; use solana_sdk::pubkey::Pubkey; use std::{ @@ -16,6 +15,7 @@ use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, process::exit, sync::Arc, + time::Duration, }; fn parse_matches() -> ArgMatches<'static> { @@ -141,29 +141,6 @@ fn parse_matches() -> ArgMatches<'static> { .help("Maximum time to wait in seconds [default: wait forever]"), ), ) - .subcommand( - SubCommand::with_name("stop") - .about("Send stop request to a node") - .setting(AppSettings::DisableVersion) - .arg( - Arg::with_name("entrypoint") - .short("n") - .long("entrypoint") - .value_name("HOST:PORT") - .takes_value(true) - .required(true) - .validator(solana_net_utils::is_host_port) - .help("Rendezvous with the cluster at this entry point"), - ) - .arg( - Arg::with_name("node_pubkey") - .index(1) - .required(true) - .value_name("PUBKEY") - .validator(is_pubkey) - .help("Public key of a specific node to stop"), - ), - ) .get_matches() } @@ -214,7 +191,7 @@ fn process_spy_results( } } if let Some(node) = pubkey { - if validators.iter().find(|x| x.id == node).is_none() { + if !validators.iter().any(|x| x.id == node) { eprintln!("Error: Could not find node {:?}", node); exit(1); } @@ -262,15 +239,15 @@ fn process_spy(matches: &ArgMatches) -> std::io::Result<()> { .expect("unable to find an available gossip port") }), ); - + let discover_timeout = Duration::from_secs(timeout.unwrap_or(u64::MAX)); let (_all_peers, validators) = discover( identity_keypair, entrypoint_addr.as_ref(), num_nodes, - timeout, - pubkey, - None, - Some(&gossip_addr), + discover_timeout, + pubkey, // find_node_by_pubkey + None, // find_node_by_gossip_addr + Some(&gossip_addr), // my_gossip_addr shred_version, )?; @@ -295,13 +272,13 @@ fn process_rpc_url(matches: &ArgMatches) -> std::io::Result<()> { let timeout = value_t_or_exit!(matches, "timeout", u64); let shred_version = value_t_or_exit!(matches, "shred_version", u16); let (_all_peers, validators) = discover( - None, - entrypoint_addr.as_ref(), - Some(1), - Some(timeout), - None, + None, // keypair entrypoint_addr.as_ref(), - None, + Some(1), // num_nodes + Duration::from_secs(timeout), + None, // find_node_by_pubkey + entrypoint_addr.as_ref(), // find_node_by_gossip_addr + None, // my_gossip_addr shred_version, )?; @@ -332,44 +309,6 @@ fn process_rpc_url(matches: &ArgMatches) -> std::io::Result<()> { Ok(()) } -fn process_stop(matches: &ArgMatches) -> Result<(), Box> { - let entrypoint_addr = parse_entrypoint(&matches); - let pubkey = matches - .value_of("node_pubkey") - .unwrap() - .parse::() - .unwrap(); - let (_all_peers, validators) = discover( - None, - entrypoint_addr.as_ref(), - None, - None, - Some(pubkey), - None, - None, - 0, - )?; - let validator = validators.iter().find(|x| x.id == pubkey).unwrap(); - - if !ContactInfo::is_valid_address(&validator.rpc) { - eprintln!( - "Error: RPC service is not enabled on validator {:?}", - pubkey - ); - exit(1); - } - println!("\nSending stop request to validator {:?}", pubkey); - - let result = RpcClient::new_socket(validator.rpc).validator_exit()?; - if result { - println!("Stop signal accepted"); - } else { - eprintln!("Error: Stop signal ignored"); - } - - Ok(()) -} - fn main() -> Result<(), Box> { solana_logger::setup_with_default("solana=info"); @@ -382,9 +321,6 @@ fn main() -> Result<(), Box> { ("rpc-url", Some(matches)) => { process_rpc_url(matches)?; } - ("stop", Some(matches)) => { - process_stop(matches)?; - } _ => unreachable!(), } diff --git a/install/Cargo.toml b/install/Cargo.toml index da08ae5c06..b6a4b2f18b 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -1,10 +1,8 @@ [package] -authors = ["Solana Maintainers "] edition = "2018" name = "velas-install" -description = "The velas cluster software installer" -version = "1.5.19" -repository = "https://github.com/solana-labs/solana" +description = "The Velas cluster software installer" +version = "1.6.14" license = "Apache-2.0" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-install" @@ -21,15 +19,15 @@ dirs-next = "2.0.0" indicatif = "0.15.0" lazy_static = "1.4.0" nix = "0.19.0" -reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] } -serde = { version = "1.0.118", features = ["derive"] } -serde_json = "1.0.56" +reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] } +serde = { version = "1.0.122", features = ["derive"] } +serde_json = "1.0.62" serde_yaml = "0.8.13" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-config-program = { path = "../programs/config", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-config-program = { path = "../programs/config", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } solana-version = { path = "../version" } semver = "0.9.0" tar = "0.4.28" diff --git a/install/src/command.rs b/install/src/command.rs index 8122afbe96..40e8ce1f10 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -1054,7 +1054,9 @@ pub fn init_or_update(config_file: &str, is_init: bool, check_only: bool) -> Res print_update_manifest(&update_manifest); if timestamp_secs() - < str::parse::(crate::build_env::BUILD_SECONDS_SINCE_UNIX_EPOCH).unwrap() + < crate::build_env::BUILD_SECONDS_SINCE_UNIX_EPOCH + .parse::() + .unwrap() { return Err("Unable to update as system time seems unreliable".to_string()); } diff --git a/keygen/Cargo.toml b/keygen/Cargo.toml index 10c9eefc72..59389b0a70 100644 --- a/keygen/Cargo.toml +++ b/keygen/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "velas-keygen" -version = "1.5.19" +name = "solana-keygen" +version = "1.6.14" description = "Solana key generation utility" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,11 +14,11 @@ bs58 = "0.3.1" clap = "2.33" dirs-next = "2.0.0" num_cpus = "1.13.0" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-cli-config = { path = "../cli-config", version = "=1.5.19" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-version = { path = "../version" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-cli-config = { path = "../cli-config", version = "=1.6.14" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } tiny-bip39 = "0.7.0" [[bin]] diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index 72932333e1..431909e465 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -1,15 +1,16 @@ #![allow(clippy::integer_arithmetic)] use bip39::{Language, Mnemonic, MnemonicType, Seed}; use clap::{ - crate_description, crate_name, value_t, values_t_or_exit, App, AppSettings, Arg, ArgMatches, - SubCommand, + crate_description, crate_name, value_t, value_t_or_exit, values_t_or_exit, App, AppSettings, + Arg, ArgMatches, SubCommand, }; use solana_clap_utils::{ + input_validators::{is_parsable, is_prompt_signer_source}, keypair::{ - keypair_from_seed_phrase, prompt_passphrase, signer_from_path, + keypair_from_path, keypair_from_seed_phrase, prompt_passphrase, signer_from_path, SKIP_SEED_PHRASE_VALIDATION_ARG, }, - DisplayError, + ArgConstant, DisplayError, }; use solana_cli_config::{Config, CONFIG_FILE}; use solana_remote_wallet::remote_wallet::RemoteWalletManager; @@ -40,6 +41,86 @@ struct GrindMatch { count: AtomicU64, } +const WORD_COUNT_ARG: ArgConstant<'static> = ArgConstant { + long: "word-count", + name: "word_count", + help: "Specify the number of words that will be present in the generated seed phrase", +}; + +const LANGUAGE_ARG: ArgConstant<'static> = ArgConstant { + long: "language", + name: "language", + help: "Specify the mnemonic lanaguage that will be present in the generated seed phrase", +}; + +const NO_PASSPHRASE_ARG: ArgConstant<'static> = ArgConstant { + long: "no-bip39-passphrase", + name: "no_passphrase", + help: "Do not prompt for a BIP39 passphrase", +}; + +const NO_OUTFILE_ARG: ArgConstant<'static> = ArgConstant { + long: "no-outfile", + name: "no_outfile", + help: "Only print a seed phrase and pubkey. Do not output a keypair file", +}; + +fn word_count_arg<'a, 'b>() -> Arg<'a, 'b> { + Arg::with_name(WORD_COUNT_ARG.name) + .long(WORD_COUNT_ARG.long) + .possible_values(&["12", "15", "18", "21", "24"]) + .default_value("12") + .value_name("NUMBER") + .takes_value(true) + .help(WORD_COUNT_ARG.help) +} + +fn language_arg<'a, 'b>() -> Arg<'a, 'b> { + Arg::with_name(LANGUAGE_ARG.name) + .long(LANGUAGE_ARG.long) + .possible_values(&[ + "english", + "chinese-simplified", + "chinese-traditional", + "japanese", + "spanish", + "korean", + "french", + "italian", + ]) + .default_value("english") + .value_name("LANGUAGE") + .takes_value(true) + .help(LANGUAGE_ARG.help) +} + +fn no_passphrase_arg<'a, 'b>() -> Arg<'a, 'b> { + Arg::with_name(NO_PASSPHRASE_ARG.name) + .long(NO_PASSPHRASE_ARG.long) + .alias("no-passphrase") + .help(NO_PASSPHRASE_ARG.help) +} + +fn no_outfile_arg<'a, 'b>() -> Arg<'a, 'b> { + Arg::with_name(NO_OUTFILE_ARG.name) + .long(NO_OUTFILE_ARG.long) + .conflicts_with_all(&["outfile", "silent"]) + .help(NO_OUTFILE_ARG.help) +} + +trait KeyGenerationCommonArgs { + fn key_generation_common_args(self) -> Self; +} + +impl KeyGenerationCommonArgs for App<'_, '_> { + fn key_generation_common_args(self) -> Self { + self.arg(word_count_arg()) + .arg(language_arg()) + .arg(no_passphrase_arg()) + .arg(no_outfile_arg()) + } +} + fn check_for_overwrite(outfile: &str, matches: &ArgMatches) { let force = matches.is_present("force"); if !force && Path::new(outfile).exists() { @@ -130,8 +211,47 @@ fn grind_validator_starts_and_ends_with(v: String) -> Result<(), String> { Ok(()) } -fn grind_print_info(grind_matches: &[GrindMatch]) { - println!("Searching with {} threads for:", num_cpus::get()); +fn acquire_language(matches: &ArgMatches<'_>) -> Language { + match matches.value_of(LANGUAGE_ARG.name).unwrap() { + "english" => Language::English, + "chinese-simplified" => Language::ChineseSimplified, + "chinese-traditional" => Language::ChineseTraditional, + "japanese" => Language::Japanese, + "spanish" => Language::Spanish, + "korean" => Language::Korean, + "french" => Language::French, + "italian" => Language::Italian, + _ => unreachable!(), + } +} + +fn no_passphrase_and_message() -> (String, String) { + (NO_PASSPHRASE.to_string(), "".to_string()) +} + +fn acquire_passphrase_and_message( + matches: &ArgMatches<'_>, +) -> Result<(String, String), Box> { + if matches.is_present(NO_PASSPHRASE_ARG.name) { + Ok(no_passphrase_and_message()) + } else { + match prompt_passphrase( + "\nFor added security, enter a BIP39 passphrase\n\ + \nNOTE! This passphrase improves security of the recovery seed phrase NOT the\n\ + keypair file itself, which is stored as insecure plain text\n\ + \nBIP39 Passphrase (empty for none): ", + ) { + Ok(passphrase) => { + println!(); + Ok((passphrase, " and your BIP39 passphrase".to_string())) + } + Err(e) => Err(e), + } + } +} + +fn grind_print_info(grind_matches: &[GrindMatch], num_threads: usize) { + println!("Searching with {} threads for:", num_threads); for gm in grind_matches { let mut msg = Vec::::new(); if gm.count.load(Ordering::Relaxed) > 1 { @@ -160,6 +280,7 @@ fn grind_parse_args( starts_with_args: HashSet, ends_with_args: HashSet, starts_and_ends_with_args: HashSet, + num_threads: usize, ) -> Vec { let mut grind_matches = Vec::::new(); for sw in starts_with_args { @@ -202,11 +323,12 @@ fn grind_parse_args( count: AtomicU64::new(args[2].parse::().unwrap()), }); } - grind_print_info(&grind_matches); + grind_print_info(&grind_matches, num_threads); grind_matches } fn main() -> Result<(), Box> { + let default_num_threads = num_cpus::get().to_string(); let matches = App::new(crate_name!()) .about(crate_description!()) .version(solana_version::version!()) @@ -262,42 +384,13 @@ fn main() -> Result<(), Box> { .long("force") .help("Overwrite the output file if it exists"), ) - .arg( - Arg::with_name("word_count") - .long("word-count") - .possible_values(&["12", "15", "18", "21", "24"]) - .default_value("12") - .value_name("NUMBER") - .takes_value(true) - .help("Specify the number of words that will be present in the generated seed phrase"), - ) - .arg( - Arg::with_name("language") - .long("language") - .possible_values(&["english", "chinese-simplified", "chinese-traditional", "japanese", "spanish", "korean", "french", "italian"]) - .default_value("english") - .value_name("LANGUAGE") - .takes_value(true) - .help("Specify the mnemonic lanaguage that will be present in the generated seed phrase"), - ) - .arg( - Arg::with_name("no_passphrase") - .long("no-bip39-passphrase") - .alias("no-passphrase") - .help("Do not prompt for a BIP39 passphrase"), - ) - .arg( - Arg::with_name("no_outfile") - .long("no-outfile") - .conflicts_with_all(&["outfile", "silent"]) - .help("Only print a seed phrase and pubkey. Do not output a keypair file"), - ) .arg( Arg::with_name("silent") .short("s") .long("silent") .help("Do not display seed phrase. Useful when piping output to other programs that prompt for user input, like gpg"), ) + .key_generation_common_args() ) .subcommand( SubCommand::with_name("grind") @@ -337,7 +430,22 @@ fn main() -> Result<(), Box> { .multiple(true) .validator(grind_validator_starts_and_ends_with) .help("Saves specified number of keypairs whos public key starts and ends with the indicated perfix and suffix\nExample: --starts-and-ends-with sol:ana:4\nPREFIX and SUFFIX type is Base58\nCOUNT type is u64"), - ), + ) + .arg( + Arg::with_name("num_threads") + .long("num-threads") + .value_name("NUMBER") + .takes_value(true) + .validator(is_parsable::) + .default_value(&default_num_threads) + .help("Specify the number of grind threads"), + ) + .arg( + Arg::with_name("use_mnemonic") + .long("use-mnemonic") + .help("Generate using a mnemonic key phrase. Expect a significant slowdown in this mode"), + ) + .key_generation_common_args() ) .subcommand( SubCommand::with_name("pubkey") @@ -374,6 +482,14 @@ fn main() -> Result<(), Box> { SubCommand::with_name("recover") .about("Recover keypair from seed phrase and optional BIP39 passphrase") .setting(AppSettings::DisableVersion) + .arg( + Arg::with_name("prompt_signer") + .index(1) + .value_name("KEYPAIR") + .takes_value(true) + .validator(is_prompt_signer_source) + .help("`prompt:` URI scheme or `ASK` keyword"), + ) .arg( Arg::with_name("outfile") .short("o") @@ -426,7 +542,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { let mut path = dirs_next::home_dir().expect("home directory"); let outfile = if matches.is_present("outfile") { matches.value_of("outfile") - } else if matches.is_present("no_outfile") { + } else if matches.is_present(NO_OUTFILE_ARG.name) { None } else { path.extend(&[".config", "velas", "id.json"]); @@ -439,43 +555,16 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { None => (), } - let word_count = value_t!(matches.value_of("word_count"), usize).unwrap(); + let word_count = value_t!(matches.value_of(WORD_COUNT_ARG.name), usize).unwrap(); let mnemonic_type = MnemonicType::for_word_count(word_count)?; - let language = match matches.value_of("language").unwrap() { - "english" => Language::English, - "chinese-simplified" => Language::ChineseSimplified, - "chinese-traditional" => Language::ChineseTraditional, - "japanese" => Language::Japanese, - "spanish" => Language::Spanish, - "korean" => Language::Korean, - "french" => Language::French, - "italian" => Language::Italian, - _ => unreachable!(), - }; + let language = acquire_language(matches); let silent = matches.is_present("silent"); if !silent { println!("Generating a new keypair"); } let mnemonic = Mnemonic::new(mnemonic_type, language); - let passphrase = if matches.is_present("no_passphrase") { - NO_PASSPHRASE.to_string() - } else { - let passphrase = prompt_passphrase( - "\nFor added security, enter a BIP39 passphrase\n\ - \nNOTE! This passphrase improves security of the recovery seed phrase NOT the\n\ - keypair file itself, which is stored as insecure plain text\n\ - \nBIP39 Passphrase (empty for none): ", - )?; - println!(); - passphrase - }; - - let passphrase_message = if passphrase == NO_PASSPHRASE { - "".to_string() - } else { - " and your BIP39 passphrase".to_string() - }; + let (passphrase, passphrase_message) = acquire_passphrase_and_message(matches).unwrap(); let seed = Seed::new(&mnemonic, &passphrase); let keypair = keypair_from_seed(seed.as_bytes())?; @@ -507,8 +596,13 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { check_for_overwrite(&outfile, &matches); } - let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); - let keypair = keypair_from_seed_phrase("recover", skip_validation, true)?; + let keypair_name = "recover"; + let keypair = if let Some(path) = matches.value_of("prompt_signer") { + keypair_from_path(matches, path, keypair_name, true)? + } else { + let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); + keypair_from_seed_phrase(keypair_name, skip_validation, true, None, true)? + }; output_keypair(&keypair, &outfile, "recovered")?; } ("grind", Some(matches)) => { @@ -549,25 +643,43 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { exit(1); } + let num_threads = value_t_or_exit!(matches.value_of("num_threads"), usize); + let grind_matches = grind_parse_args( ignore_case, starts_with_args, ends_with_args, starts_and_ends_with_args, + num_threads, ); + let use_mnemonic = matches.is_present("use_mnemonic"); + + let word_count = value_t!(matches.value_of(WORD_COUNT_ARG.name), usize).unwrap(); + let mnemonic_type = MnemonicType::for_word_count(word_count)?; + let language = acquire_language(matches); + + let (passphrase, passphrase_message) = if use_mnemonic { + acquire_passphrase_and_message(matches).unwrap() + } else { + no_passphrase_and_message() + }; + let no_outfile = matches.is_present(NO_OUTFILE_ARG.name); + let grind_matches_thread_safe = Arc::new(grind_matches); let attempts = Arc::new(AtomicU64::new(1)); let found = Arc::new(AtomicU64::new(0)); let start = Instant::now(); let done = Arc::new(AtomicBool::new(false)); - let thread_handles: Vec<_> = (0..num_cpus::get()) + let thread_handles: Vec<_> = (0..num_threads) .map(|_| { let done = done.clone(); let attempts = attempts.clone(); let found = found.clone(); let grind_matches_thread_safe = grind_matches_thread_safe.clone(); + let passphrase = passphrase.clone(); + let passphrase_message = passphrase_message.clone(); thread::spawn(move || loop { if done.load(Ordering::Relaxed) { @@ -582,7 +694,13 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { found.load(Ordering::Relaxed), ); } - let keypair = Keypair::new(); + let (keypair, phrase) = if use_mnemonic { + let mnemonic = Mnemonic::new(mnemonic_type, language); + let seed = Seed::new(&mnemonic, &passphrase); + (keypair_from_seed(seed.as_bytes()).unwrap(), mnemonic.phrase().to_string()) + } else { + (Keypair::new(), "".to_string()) + }; let mut pubkey = bs58::encode(keypair.pubkey()).into_string(); if ignore_case { pubkey = pubkey.to_lowercase(); @@ -608,12 +726,24 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { grind_matches_thread_safe[i] .count .fetch_sub(1, Ordering::Relaxed); - println!( - "Wrote keypair to {}", - &format!("{}.json", keypair.pubkey()) - ); - write_keypair_file(&keypair, &format!("{}.json", keypair.pubkey())) + if !no_outfile { + write_keypair_file(&keypair, &format!("{}.json", keypair.pubkey())) .unwrap(); + println!( + "Wrote keypair to {}", + &format!("{}.json", keypair.pubkey()) + ); + } + if use_mnemonic { + let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap(); + println!( + "{}\nFound matching key {}", + ÷r, keypair.pubkey()); + println!( + "\nSave this seed phrase{} to recover your new keypair:\n{}\n{}", + passphrase_message, phrase, ÷r + ); + } } } if total_matches_found == grind_matches_thread_safe.len() { @@ -630,7 +760,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { ("verify", Some(matches)) => { let keypair = get_keypair_from_matches(matches, config, &mut wallet_manager)?; let simple_message = Message::new( - &[Instruction::new( + &[Instruction::new_with_bincode( Pubkey::default(), &0, vec![AccountMeta::new(keypair.pubkey(), true)], diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 7a18117f61..452cc42212 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-ledger-tool" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -23,22 +23,24 @@ regex = "1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.56" serde_yaml = "0.8.13" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-cli-output = { path = "../cli-output", version = "=1.5.19" } -solana-ledger = { path = "../ledger", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-measure = { path = "../measure", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } -solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.5.19" } -solana-transaction-status = { path = "../transaction-status", version = "=1.5.19" } -solana-version = { path = "../version" } -solana-vote-program = { path = "../programs/vote", version = "=1.5.19" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-cli-output = { path = "../cli-output", version = "=1.6.14" } +solana-ledger = { path = "../ledger", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-measure = { path = "../measure", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } +solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.6.14" } +solana-transaction-status = { path = "../transaction-status", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } +solana-vote-program = { path = "../programs/vote", version = "=1.6.14" } + +tempfile = "3.1.0" +tokio = { version = "1", features = ["full"] } + evm-state = { path = "../evm-utils/evm-state" } evm-rpc = { path = "../evm-utils/evm-rpc" } -tempfile = "3.1.0" -tokio = { version = "0.2.22", features = ["full"] } [dev-dependencies] assert_cmd = "1.0" diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index 6ed69ff70e..f44a32eb12 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -534,7 +534,21 @@ impl BigTableSubCommand for App<'_, '_> { } pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { - let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let runtime = tokio::runtime::Runtime::new().unwrap(); + + let verbose = matches.is_present("verbose"); + let output_format = matches + .value_of("output_format") + .map(|value| match value { + "json" => OutputFormat::Json, + "json-compact" => OutputFormat::JsonCompact, + _ => unreachable!(), + }) + .unwrap_or(if verbose { + OutputFormat::DisplayVerbose + } else { + OutputFormat::Display + }); let verbose = matches.is_present("verbose"); let output_format = matches diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index b2bfcad5ea..a474af9671 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -31,7 +31,7 @@ use solana_runtime::{ snapshot_utils::SnapshotVersion, }; use solana_sdk::{ - account::Account, + account::{AccountSharedData, ReadableAccount}, clock::{Epoch, Slot}, feature::{self, Feature}, feature_set, @@ -51,7 +51,6 @@ use solana_vote_program::{ }; use std::{ collections::{BTreeMap, BTreeSet, HashMap, HashSet}, - convert::TryInto, ffi::OsStr, fs::{self, File}, io::{self, stdout, BufRead, BufReader, Write}, @@ -79,12 +78,27 @@ fn output_slot_rewards(blockstore: &Blockstore, slot: Slot, method: &LedgerOutpu if let Ok(Some(rewards)) = blockstore.read_rewards(slot) { if !rewards.is_empty() { println!(" Rewards:"); + println!( + " {:<44} {:^15} {:<15} {:<20}", + "Address", "Type", "Amount", "New Balance" + ); + for reward in rewards { + let sign = if reward.lamports < 0 { "-" } else { "" }; println!( - " Account {}: {}{} VLX", + " {:<44} {:^15} {:<15} {} VLX", reward.pubkey, - if reward.lamports < 0 { '-' } else { ' ' }, - lamports_to_sol(reward.lamports.abs().try_into().unwrap()) + if let Some(reward_type) = reward.reward_type { + format!("{}", reward_type) + } else { + "-".to_string() + }, + format!( + "{}◎{:<14.9}", + sign, + lamports_to_sol(reward.lamports.abs() as u64) + ), + format!("◎{:<18.9}", lamports_to_sol(reward.post_balance)) ); } } @@ -713,6 +727,7 @@ fn load_bank_forks( snapshot_config.as_ref(), process_options, None, + None, ) } @@ -739,6 +754,7 @@ fn main() { } const DEFAULT_ROOT_COUNT: &str = "1"; + const DEFAULT_MAX_SLOTS_ROOT_REPAIR: &str = "2000"; solana_logger::setup_with_default("solana=info"); let starting_slot_arg = Arg::with_name("starting_slot") @@ -756,10 +772,10 @@ fn main() { .long("no-snapshot") .takes_value(false) .help("Do not start from a local snapshot if present"); - let bpf_jit_arg = Arg::with_name("bpf_jit") - .long("bpf-jit") + let no_bpf_jit_arg = Arg::with_name("no_bpf_jit") + .long("no-bpf-jit") .takes_value(false) - .help("Process with JIT instead of interpreter"); + .help("Disable the just-in-time compiler and instead use the interpreter for BP"); let no_accounts_db_caching_arg = Arg::with_name("no_accounts_db_caching") .long("no-accounts-db-caching") .takes_value(false) @@ -933,6 +949,11 @@ fn main() { .arg(&starting_slot_arg) .about("Print all the dead slots in the ledger") ) + .subcommand( + SubCommand::with_name("duplicate-slots") + .arg(&starting_slot_arg) + .about("Print all the duplicate slots in the ledger") + ) .subcommand( SubCommand::with_name("set-dead-slot") .about("Mark one or more slots dead") @@ -1041,7 +1062,7 @@ fn main() { .arg(&halt_at_slot_arg) .arg(&hard_forks_arg) .arg(&no_accounts_db_caching_arg) - .arg(&bpf_jit_arg) + .arg(&no_bpf_jit_arg) .arg(&allow_dead_slots_arg) .arg(&max_genesis_archive_unpacked_size_arg) .arg( @@ -1313,7 +1334,7 @@ fn main() { .long("dead-slots-only") .required(false) .takes_value(false) - .help("Limit puring to dead slots only") + .help("Limit purging to dead slots only") ) ) .subcommand( @@ -1353,6 +1374,34 @@ fn main() { .help("Number of roots in the output"), ) ) + .subcommand( + SubCommand::with_name("repair-roots") + .about("Traverses the AncestorIterator backward from a last known root \ + to restore missing roots to the Root column") + .arg( + Arg::with_name("start_root") + .long("before") + .value_name("NUM") + .takes_value(true) + .help("First good root after the range to repair") + ) + .arg( + Arg::with_name("end_root") + .long("until") + .value_name("NUM") + .takes_value(true) + .help("Last slot to check for root repair") + ) + .arg( + Arg::with_name("max_slots") + .long("repair-limit") + .value_name("NUM") + .takes_value(true) + .default_value(DEFAULT_MAX_SLOTS_ROOT_REPAIR) + .required(true) + .help("Override the maximum number of slots to check for root repair") + ) + ) .subcommand( SubCommand::with_name("analyze-storage") .about("Output statistics in JSON format about \ @@ -1627,6 +1676,17 @@ fn main() { println!("{}", slot); } } + ("duplicate-slots", Some(arg_matches)) => { + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + for slot in blockstore.duplicate_slots_iterator(starting_slot).unwrap() { + println!("{}", slot); + } + } ("set-dead-slot", Some(arg_matches)) => { let slots = values_t_or_exit!(arg_matches, "slots", Slot); let blockstore = @@ -1666,35 +1726,31 @@ fn main() { let log_file = PathBuf::from(value_t_or_exit!(arg_matches, "log_path", String)); let f = BufReader::new(File::open(log_file).unwrap()); println!("Reading log file"); - for line in f.lines() { - if let Ok(line) = line { - let parse_results = { - if let Some(slot_string) = frozen_regex.captures_iter(&line).next() { - Some((slot_string, &mut frozen)) - } else { - full_regex - .captures_iter(&line) - .next() - .map(|slot_string| (slot_string, &mut full)) - } - }; + for line in f.lines().flatten() { + let parse_results = { + if let Some(slot_string) = frozen_regex.captures_iter(&line).next() { + Some((slot_string, &mut frozen)) + } else { + full_regex + .captures_iter(&line) + .next() + .map(|slot_string| (slot_string, &mut full)) + } + }; - if let Some((slot_string, map)) = parse_results { - let slot = slot_string - .get(1) - .expect("Only one match group") - .as_str() - .parse::() - .unwrap(); - if ancestors.contains(&slot) && !map.contains_key(&slot) { - map.insert(slot, line); - } - if slot == ending_slot - && frozen.contains_key(&slot) - && full.contains_key(&slot) - { - break; - } + if let Some((slot_string, map)) = parse_results { + let slot = slot_string + .get(1) + .expect("Only one match group") + .as_str() + .parse::() + .unwrap(); + if ancestors.contains(&slot) && !map.contains_key(&slot) { + map.insert(slot, line); + } + if slot == ending_slot && frozen.contains_key(&slot) && full.contains_key(&slot) + { + break; } } } @@ -1712,7 +1768,7 @@ fn main() { dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), new_hard_forks: hardforks_of(arg_matches, "hard_forks"), poh_verify: !arg_matches.is_present("skip_poh_verify"), - bpf_jit: arg_matches.is_present("bpf_jit"), + bpf_jit: !matches.is_present("no_bpf_jit"), accounts_db_caching_enabled: !arg_matches.is_present("no_accounts_db_caching"), allow_dead_slots: arg_matches.is_present("allow_dead_slots"), ..ProcessOptions::default() @@ -1882,8 +1938,7 @@ fn main() { || remove_stake_accounts || !accounts_to_remove.is_empty() || faucet_pubkey.is_some() - || bootstrap_validator_pubkeys.is_some() - || warp_time; + || bootstrap_validator_pubkeys.is_some(); if child_bank_required { let mut child_bank = @@ -1906,7 +1961,7 @@ fn main() { if let Some(faucet_pubkey) = faucet_pubkey { bank.store_account( &faucet_pubkey, - &Account::new(faucet_lamports, 0, &system_program::id()), + &AccountSharedData::new(faucet_lamports, 0, &system_program::id()), ); } @@ -1966,7 +2021,7 @@ fn main() { bank.store_account( identity_pubkey, - &Account::new( + &AccountSharedData::new( bootstrap_validator_lamports, 0, &system_program::id(), @@ -2017,36 +2072,6 @@ fn main() { } } - if warp_time { - // warp to end of epoch, to ensure feature will be activated at next block - let epoch_start_slot = - genesis_config.epoch_schedule.get_first_slot_in_epoch( - genesis_config.epoch_schedule.get_epoch(snapshot_slot) + 1, - ); - - info!("Warping time at slot = {}", epoch_start_slot); - let feature = Feature { - activated_at: Some(epoch_start_slot), - }; - bank.store_account( - &feature_set::warp_timestamp_again::id(), - &feature::create_account(&feature, sol_to_lamports(1.)), - ); - - if let Some(warp_slot) = warp_slot { - if warp_slot != epoch_start_slot - 1 { - eprintln!( - "Error: --warp-slot should warp to epoch start. Must be == {}", - epoch_start_slot - 1 - ); - exit(1); - } - } else { - warn!("Warping to slot {}", epoch_start_slot - 1); - warp_slot = Some(epoch_start_slot - 1); - } - } - if child_bank_required { while !bank.is_complete() { bank.register_tick(&Hash::new_unique()); @@ -2145,7 +2170,7 @@ fn main() { println!("---"); for (pubkey, (account, slot)) in accounts.into_iter() { - let data_len = account.data.len(); + let data_len = account.data().len(); println!("{}:", pubkey); println!(" - balance: {} VLX", lamports_to_sol(account.lamports)); println!(" - owner: '{}'", account.owner); @@ -2153,7 +2178,7 @@ fn main() { println!(" - slot: {}", slot); println!(" - rent_epoch: {}", account.rent_epoch); if !exclude_account_data { - println!(" - data: '{}'", bs58::encode(account.data).into_string()); + println!(" - data: '{}'", bs58::encode(account.data()).into_string()); } println!(" - data_len: {}", data_len); } @@ -2289,7 +2314,7 @@ fn main() { // capitalizaion, which doesn't affect inflation behavior! base_bank.store_account( &feature_set::secp256k1_program_enabled::id(), - &Account::default(), + &AccountSharedData::default(), ); force_enabled_count -= 1; } else { @@ -2306,7 +2331,7 @@ fn main() { // capitalizaion, which doesn't affect inflation behavior! base_bank.store_account( &feature_set::instructions_sysvar_enabled::id(), - &Account::default(), + &AccountSharedData::default(), ); force_enabled_count -= 1; } else { @@ -2603,7 +2628,7 @@ fn main() { owner: format!("{}", base_account.owner), old_balance: base_account.lamports, new_balance: warped_account.lamports, - data_size: base_account.data.len(), + data_size: base_account.data().len(), delegation: format_or_na(detail.map(|d| d.voter)), delegation_owner: format_or_na( detail.map(|d| d.voter_owner), @@ -2686,6 +2711,8 @@ fn main() { } assert_capitalization(&bank); + println!("Inflation: {:?}", bank.inflation()); + println!("RentCollector: {:?}", bank.rent_collector()); println!("Capitalization: {}", Sol(bank.capitalization())); } } @@ -2842,6 +2869,56 @@ fn main() { } }); } + ("repair-roots", Some(arg_matches)) => { + let blockstore = open_blockstore( + &ledger_path, + AccessType::TryPrimaryThenSecondary, + wal_recovery_mode, + ); + let start_root = if let Some(root) = arg_matches.value_of("start_root") { + Slot::from_str(root).expect("Before root must be a number") + } else { + blockstore.max_root() + }; + let max_slots = value_t_or_exit!(arg_matches, "max_slots", u64); + let end_root = if let Some(root) = arg_matches.value_of("end_root") { + Slot::from_str(root).expect("Until root must be a number") + } else { + start_root.saturating_sub(max_slots) + }; + assert!(start_root > end_root); + assert!(blockstore.is_root(start_root)); + let num_slots = start_root - end_root - 1; // Adjust by one since start_root need not be checked + if arg_matches.is_present("end_root") && num_slots > max_slots { + eprintln!( + "Requested range {} too large, max {}. \ + Either adjust `--until` value, or pass a larger `--repair-limit` \ + to override the limit", + num_slots, max_slots, + ); + exit(1); + } + let ancestor_iterator = + AncestorIterator::new(start_root, &blockstore).take_while(|&slot| slot >= end_root); + let roots_to_fix: Vec<_> = ancestor_iterator + .filter(|slot| !blockstore.is_root(*slot)) + .collect(); + if !roots_to_fix.is_empty() { + eprintln!("{} slots to be rooted", roots_to_fix.len()); + for chunk in roots_to_fix.chunks(100) { + eprintln!("{:?}", chunk); + blockstore.set_roots(&roots_to_fix).unwrap_or_else(|err| { + eprintln!("Unable to set roots {:?}: {}", roots_to_fix, err); + exit(1); + }); + } + } else { + println!( + "No missing roots found in range {} to {}", + end_root, start_root + ); + } + } ("bounds", Some(arg_matches)) => { let blockstore = open_blockstore( &ledger_path, diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 2b82aeff61..b5894a66bb 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-ledger" -version = "1.5.19" +version = "1.6.14" description = "Solana ledger" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,9 +10,6 @@ documentation = "https://docs.rs/solana-ledger" edition = "2018" [dependencies] -# Avoid the vendored bzip2 within rocksdb-sys that can cause linker conflicts when also using the bzip2 crate -rocksdb = { git = "https://github.com/rust-rocksdb/rust-rocksdb", rev = "39b877b", default-features = false, features = ["lz4"] } - bincode = "1.3.1" byteorder = "1.3.4" chrono = { version = "0.4.11", features = ["serde"] } @@ -20,7 +17,7 @@ chrono-humanize = "0.1.1" crossbeam-channel = "0.4" dlopen_derive = "0.1.4" dlopen = "0.1.8" -ed25519-dalek = "1.0.0-pre.4" +ed25519-dalek = "1.0.1" fs_extra = "1.2.0" futures = "0.3.8" futures-util = "0.3.5" @@ -29,42 +26,49 @@ lazy_static = "1.4.0" libc = "0.2.81" log = { version = "0.4.11" } num_cpus = "1.13.0" -prost = "0.6.1" +prost = "0.7.0" rand = "0.7.0" rand_chacha = "0.2.2" -rayon = "1.4.1" +rayon = "1.5.0" reed-solomon-erasure = { version = "4.0.2", features = ["simd-accel"] } -serde = "1.0.118" +serde = "1.0.122" serde_bytes = "0.11.4" sha2 = "0.9.2" -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.5.19" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.5.19" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.5.19" } -solana-transaction-status = { path = "../transaction-status", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-measure = { path = "../measure", version = "=1.5.19" } -solana-merkle-tree = { path = "../merkle-tree", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } -solana-perf = { path = "../perf", version = "=1.5.19" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } -solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.5.19" } -solana-storage-proto = { path = "../storage-proto", version = "=1.5.19" } -solana-vote-program = { path = "../programs/vote", version = "=1.5.19" } +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.6.14" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.6.14" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.6.14" } +solana-transaction-status = { path = "../transaction-status", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-measure = { path = "../measure", version = "=1.6.14" } +solana-merkle-tree = { path = "../merkle-tree", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } +solana-perf = { path = "../perf", version = "=1.6.14" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } +solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.6.14" } +solana-storage-proto = { path = "../storage-proto", version = "=1.6.14" } +solana-vote-program = { path = "../programs/vote", version = "=1.6.14" } tempfile = "3.1.0" thiserror = "1.0" -tokio = { version = "0.2.22", features = ["full"] } +tokio = { version = "1", features = ["full"] } +tokio-stream = "0.1" trees = "0.2.1" evm-state = { path = "../evm-utils/evm-state" } +[dependencies.rocksdb] +# Avoid the vendored bzip2 within rocksdb-sys that can cause linker conflicts +# when also using the bzip2 crate +version = "0.16.0" +default-features = false +features = ["lz4"] [dev-dependencies] assert_matches = "1.3.0" matches = "0.1.6" -solana-account-decoder = { path = "../account-decoder", version = "=1.5.19" } -solana-budget-program = { path = "../programs/budget", version = "=1.5.19" } +solana-account-decoder = { path = "../account-decoder", version = "=1.6.14" } +solana-budget-program = { path = "../programs/budget", version = "=1.6.14" } [build-dependencies] rustc_version = "0.2" diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 4f01f78713..a828cde304 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -1,8 +1,8 @@ use crate::{ blockstore::Blockstore, blockstore_processor::{ - self, BlockstoreProcessorError, BlockstoreProcessorResult, ProcessOptions, - TransactionStatusSender, + self, BlockstoreProcessorError, BlockstoreProcessorResult, CacheBlockMetaSender, + ProcessOptions, TransactionStatusSender, }, entry::VerifyRecyclers, leader_schedule_cache::LeaderScheduleCache, @@ -38,7 +38,8 @@ pub fn load( shrink_paths: Option>, snapshot_config: Option<&SnapshotConfig>, process_options: ProcessOptions, - transaction_status_sender: Option, + transaction_status_sender: Option<&TransactionStatusSender>, + cache_block_meta_sender: Option<&CacheBlockMetaSender>, ) -> LoadResult { if let Some(snapshot_config) = snapshot_config.as_ref() { info!( @@ -99,6 +100,7 @@ pub fn load( &process_options, &VerifyRecyclers::default(), transaction_status_sender, + cache_block_meta_sender, ), Some(deserialized_snapshot_hash), ); @@ -118,6 +120,7 @@ pub fn load( evm_genesis_path, account_paths, process_options, + cache_block_meta_sender, ), None, ) diff --git a/ledger/src/bigtable_upload.rs b/ledger/src/bigtable_upload.rs index 57e2a0b6d6..a7de767409 100644 --- a/ledger/src/bigtable_upload.rs +++ b/ledger/src/bigtable_upload.rs @@ -11,7 +11,7 @@ use std::{ }, time::Duration, }; -use tokio::time::delay_for; +use tokio::time::sleep; // Attempt to upload this many blocks in parallel const NUM_BLOCKS_TO_UPLOAD_IN_PARALLEL: usize = 32; @@ -82,7 +82,7 @@ pub async fn upload_confirmed_blocks( Err(err) => { error!("get_confirmed_blocks for {} failed: {:?}", start_slot, err); // Consider exponential backoff... - delay_for(Duration::from_secs(2)).await; + tokio::time::sleep(Duration::from_secs(2)).await; } } }; @@ -169,7 +169,7 @@ pub async fn upload_confirmed_blocks( use futures::stream::StreamExt; let mut stream = - tokio::stream::iter(receiver.into_iter()).chunks(NUM_BLOCKS_TO_UPLOAD_IN_PARALLEL); + tokio_stream::iter(receiver.into_iter()).chunks(NUM_BLOCKS_TO_UPLOAD_IN_PARALLEL); while let Some(blocks) = stream.next().await { if exit.load(Ordering::Relaxed) { @@ -293,7 +293,7 @@ pub async fn upload_evm_confirmed_blocks( start_block, err ); // Consider exponential backoff... - delay_for(Duration::from_secs(2)).await; + sleep(Duration::from_secs(2)).await; } } }; @@ -381,7 +381,7 @@ pub async fn upload_evm_confirmed_blocks( use futures::stream::StreamExt; let mut stream = - tokio::stream::iter(receiver.into_iter()).chunks(NUM_BLOCKS_TO_UPLOAD_IN_PARALLEL); + tokio_stream::iter(receiver.into_iter()).chunks(NUM_BLOCKS_TO_UPLOAD_IN_PARALLEL); while let Some(blocks) = stream.next().await { if exit.load(Ordering::Relaxed) { diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 22a48bd131..e7b414883f 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -57,8 +57,9 @@ use std::{ path::{Path, PathBuf}, rc::Rc, sync::{ + atomic::{AtomicBool, Ordering}, mpsc::{sync_channel, Receiver, SyncSender, TrySendError}, - Arc, Mutex, RwLock, + Arc, Mutex, RwLock, RwLockWriteGuard, }, }; use tempfile::TempDir; @@ -97,6 +98,7 @@ type CompletedRanges = Vec<(u32, u32)>; pub enum PurgeType { Exact, PrimaryIndex, + CompactionFilter, } #[derive(Error, Debug)] @@ -145,17 +147,18 @@ pub struct Blockstore { blocktime_cf: LedgerColumn, perf_samples_cf: LedgerColumn, - // evm - evm_blocks_cf: LedgerColumn, - evm_transactions_cf: LedgerColumn, - evm_blocks_by_hash_cf: LedgerColumn, - + block_height_cf: LedgerColumn, last_root: Arc>, insert_shreds_lock: Arc>, pub new_shreds_signals: Vec>, pub completed_slots_senders: Vec>>, - pub lowest_cleanup_slot: Arc>, + pub lowest_cleanup_slot: Arc>, no_compaction: bool, + + // EVM scope + evm_blocks_cf: LedgerColumn, + evm_transactions_cf: LedgerColumn, + evm_blocks_by_hash_cf: LedgerColumn, } pub struct IndexMetaWorkingSetEntry { @@ -320,9 +323,9 @@ impl Blockstore { let rewards_cf = db.column(); let blocktime_cf = db.column(); let perf_samples_cf = db.column(); + let block_height_cf = db.column(); let evm_blocks_cf = db.column(); - let evm_transactions_cf = db.column(); let evm_blocks_by_hash_cf = db.column(); @@ -372,15 +375,16 @@ impl Blockstore { rewards_cf, blocktime_cf, perf_samples_cf, - evm_blocks_cf, - evm_transactions_cf, - evm_blocks_by_hash_cf, + block_height_cf, new_shreds_signals: vec![], completed_slots_senders: vec![], insert_shreds_lock: Arc::new(Mutex::new(())), last_root, lowest_cleanup_slot: Arc::new(RwLock::new(0)), no_compaction: false, + evm_blocks_cf, + evm_transactions_cf, + evm_blocks_by_hash_cf, }; if initialize_transaction_status_index { blockstore.initialize_transaction_status_index()?; @@ -593,8 +597,7 @@ impl Blockstore { prev_inserted_codes: &mut HashMap<(u64, u64), Shred>, code_cf: &LedgerColumn, ) { - (erasure_meta.first_coding_index - ..erasure_meta.first_coding_index + erasure_meta.config.num_coding() as u64) + (erasure_meta.set_index..erasure_meta.set_index + erasure_meta.config.num_coding() as u64) .for_each(|i| { if let Some(shred) = prev_inserted_codes .remove(&(slot, i)) @@ -665,7 +668,6 @@ impl Blockstore { erasure_meta.config.num_data(), erasure_meta.config.num_coding(), set_index as usize, - erasure_meta.first_coding_index as usize, slot, ) { Self::submit_metrics( @@ -1089,12 +1091,10 @@ impl Blockstore { ); let erasure_meta = erasure_metas.entry((slot, set_index)).or_insert_with(|| { - let first_coding_index = - u64::from(shred.index()) - u64::from(shred.coding_header.position); self.erasure_meta_cf .get((slot, set_index)) .expect("Expect database get to succeed") - .unwrap_or_else(|| ErasureMeta::new(set_index, first_coding_index, &erasure_config)) + .unwrap_or_else(|| ErasureMeta::new(set_index, erasure_config)) }); if erasure_config != erasure_meta.config { @@ -1148,10 +1148,10 @@ impl Blockstore { ) -> Option> { // Search for the shred which set the initial erasure config, either inserted, // or in the current batch in just_received_coding_shreds. - let coding_start = erasure_meta.first_coding_index; - let coding_end = coding_start + erasure_meta.config.num_coding() as u64; + let coding_indices = erasure_meta.set_index + ..erasure_meta.set_index + erasure_meta.config.num_coding() as u64; let mut conflicting_shred = None; - for coding_index in coding_start..coding_end { + for coding_index in coding_indices { let maybe_shred = self.get_coding_shred(slot, coding_index); if let Ok(Some(shred_data)) = maybe_shred { let potential_shred = Shred::new_from_serialized_shred(shred_data).unwrap(); @@ -1462,7 +1462,16 @@ impl Blockstore { } pub fn get_data_shred(&self, slot: Slot, index: u64) -> Result>> { - self.data_shred_cf.get_bytes((slot, index)) + use crate::shred::SHRED_PAYLOAD_SIZE; + self.data_shred_cf.get_bytes((slot, index)).map(|data| { + data.map(|mut d| { + // For forward compatibility, pad the payload out to + // SHRED_PAYLOAD_SIZE incase the shred was inserted + // with any padding stripped off. + d.resize(cmp::max(d.len(), SHRED_PAYLOAD_SIZE), 0); + d + }) + }) } pub fn get_data_shreds_for_slot( @@ -1538,7 +1547,7 @@ impl Blockstore { // Only used by tests #[allow(clippy::too_many_arguments)] - pub fn write_entries( + pub(crate) fn write_entries( &self, start_slot: Slot, num_ticks_in_start_slot: u64, @@ -1549,7 +1558,7 @@ impl Blockstore { keypair: &Arc, entries: Vec, version: u16, - ) -> Result { + ) -> Result { let mut parent_slot = parent.map_or(start_slot.saturating_sub(1), |v| v); let num_slots = (start_slot - parent_slot).max(1); // Note: slot 0 has parent slot 0 assert!(num_ticks_in_start_slot < num_slots * ticks_per_slot); @@ -1557,8 +1566,7 @@ impl Blockstore { let mut current_slot = start_slot; let mut shredder = - Shredder::new(current_slot, parent_slot, 0.0, keypair.clone(), 0, version) - .expect("Failed to create entry shredder"); + Shredder::new(current_slot, parent_slot, keypair.clone(), 0, version).unwrap(); let mut all_shreds = vec![]; let mut slot_entries = vec![]; // Find all the entries for start_slot @@ -1583,12 +1591,11 @@ impl Blockstore { shredder = Shredder::new( current_slot, parent_slot, - 0.0, keypair.clone(), (ticks_per_slot - remaining_ticks_in_slot) as u8, version, ) - .expect("Failed to create entry shredder"); + .unwrap(); } if entry.is_tick() { @@ -1603,10 +1610,9 @@ impl Blockstore { all_shreds.append(&mut data_shreds); all_shreds.append(&mut coding_shreds); } - - let num_shreds = all_shreds.len(); + let num_data = all_shreds.iter().filter(|shred| shred.is_data()).count(); self.insert_shreds(all_shreds, None, false)?; - Ok(num_shreds) + Ok(num_data) } pub fn get_index(&self, slot: Slot) -> Result> { @@ -1739,9 +1745,6 @@ impl Blockstore { } pub fn cache_block_time(&self, slot: Slot, timestamp: UnixTimestamp) -> Result<()> { - if !self.is_root(slot) { - return Err(BlockstoreError::SlotNotRooted); - } self.blocktime_cf.put(slot, ×tamp) } @@ -1779,6 +1782,24 @@ impl Blockstore { })) } + pub fn get_block_height(&self, slot: Slot) -> Result> { + datapoint_info!( + "blockstore-rpc-api", + ("method", "get_block_height".to_string(), String) + ); + let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap(); + // lowest_cleanup_slot is the last slot that was not cleaned up by + // LedgerCleanupService + if *lowest_cleanup_slot > 0 && *lowest_cleanup_slot >= slot { + return Err(BlockstoreError::SlotCleanedUp); + } + self.block_height_cf.get(slot) + } + + pub fn cache_block_height(&self, slot: Slot, block_height: u64) -> Result<()> { + self.block_height_cf.put(slot, &block_height) + } + pub fn get_first_available_evm_block(&self) -> Result { Ok(self .evm_blocks_cf @@ -1796,28 +1817,6 @@ impl Blockstore { .next()) } - pub fn get_confirmed_block_hash(&self, slot: Slot) -> Result { - datapoint_info!( - "blockstore-rpc-api", - ("method", "get_confirmed_block_hash".to_string(), String) - ); - let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap(); - // lowest_cleanup_slot is the last slot that was not cleaned up by - // LedgerCleanupService - if *lowest_cleanup_slot > 0 && *lowest_cleanup_slot >= slot { - return Err(BlockstoreError::SlotCleanedUp); - } - if self.is_root(slot) { - let slot_entries = self.get_slot_entries(slot, 0)?; - if !slot_entries.is_empty() { - let blockhash = get_last_hash(slot_entries.iter()) - .unwrap_or_else(|| panic!("Rooted slot {:?} must have blockhash", slot)); - return Ok(blockhash.to_string()); - } - } - Err(BlockstoreError::SlotNotRooted) - } - pub fn get_rooted_block( &self, slot: Slot, @@ -1890,7 +1889,12 @@ impl Blockstore { .get_protobuf_or_bincode::(slot)? .unwrap_or_default() .into(); + + // The Blocktime and BlockHeight column families are updated asynchronously; they + // may not be written by the time the complete slot entries are available. In this + // case, these fields will be `None`. let block_time = self.blocktime_cf.get(slot)?; + let block_height = self.block_height_cf.get(slot)?; let block = ConfirmedBlock { previous_blockhash: previous_blockhash.to_string(), @@ -1900,6 +1904,7 @@ impl Blockstore { .map_transactions_to_statuses(slot, slot_transaction_iterator), rewards, block_time, + block_height, }; return Ok(block); } @@ -1907,7 +1912,7 @@ impl Blockstore { Err(BlockstoreError::SlotUnavailable) } - /// Returns block, and flag if that block was rooted (confirmed) + /// Returns EVM block, and flag if that block was rooted (confirmed) pub fn get_evm_block(&self, block_number: evm::BlockNum) -> Result<(evm::Block, bool)> { datapoint_info!( "blockstore-rpc-api", @@ -2024,18 +2029,24 @@ impl Blockstore { batch.put::(0, &index0)?; Ok(None) } else { - let result = if index0.frozen && to_slot > index0.max_slot { - debug!("Pruning transaction index 0 at slot {}", index0.max_slot); + let purge_target_primary_index = if index0.frozen && to_slot > index0.max_slot { + info!( + "Pruning expired primary index 0 up to slot {} (max requested: {})", + index0.max_slot, to_slot + ); Some(0) } else if index1.frozen && to_slot > index1.max_slot { - debug!("Pruning transaction index 1 at slot {}", index1.max_slot); + info!( + "Pruning expired primary index 1 up to slot {} (max requested: {})", + index1.max_slot, to_slot + ); Some(1) } else { None }; - if result.is_some() { - *w_active_transaction_status_index = if index0.frozen { 0 } else { 1 }; + if let Some(purge_target_primary_index) = purge_target_primary_index { + *w_active_transaction_status_index = purge_target_primary_index; if index0.frozen { index0.max_slot = 0 }; @@ -2048,16 +2059,17 @@ impl Blockstore { batch.put::(1, &index1)?; } - Ok(result) + Ok(purge_target_primary_index) } } - fn get_primary_index( + fn get_primary_index_to_write( &self, slot: Slot, - w_active_transaction_status_index: &mut u64, + // take WriteGuard to require critical section semantics at call site + w_active_transaction_status_index: &RwLockWriteGuard, ) -> Result { - let i = *w_active_transaction_status_index; + let i = **w_active_transaction_status_index; let mut index_meta = self.transaction_status_index_cf.get(i)?.unwrap(); if slot > index_meta.max_slot { assert!(!index_meta.frozen); @@ -2096,9 +2108,10 @@ impl Blockstore { let status = status.into(); // This write lock prevents interleaving issues with the transaction_status_index_cf by gating // writes to that column - let mut w_active_transaction_status_index = + let w_active_transaction_status_index = self.active_transaction_status_index.write().unwrap(); - let primary_index = self.get_primary_index(slot, &mut w_active_transaction_status_index)?; + let primary_index = + self.get_primary_index_to_write(slot, &w_active_transaction_status_index)?; self.transaction_status_cf .put_protobuf((primary_index, signature, slot), &status)?; for address in writable_keys { @@ -2116,6 +2129,21 @@ impl Blockstore { Ok(()) } + fn ensure_lowest_cleanup_slot(&self) -> (std::sync::RwLockReadGuard, Slot) { + // Ensures consistent result by using lowest_cleanup_slot as the lower bound + // for reading columns that do not employ strong read consistency with slot-based + // delete_range + let lowest_cleanup_slot = self.lowest_cleanup_slot.read().unwrap(); + let lowest_available_slot = (*lowest_cleanup_slot) + .checked_add(1) + .expect("overflow from trusted value"); + + // Make caller hold this lock properly; otherwise LedgerCleanupService can purge/compact + // needed slots here at any given moment. + // Blockstore callers, like rpc, can process concurrent read queries + (lowest_cleanup_slot, lowest_available_slot) + } + // Returns a transaction status, as well as a loop counter for unit testing fn get_transaction_status_with_counter( &self, @@ -2123,9 +2151,15 @@ impl Blockstore { confirmed_unrooted_slots: &[Slot], ) -> Result<(Option<(Slot, TransactionStatusMeta)>, u64)> { let mut counter = 0; + let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot(); + for transaction_status_cf_primary_index in 0..=1 { let index_iterator = self.transaction_status_cf.iter(IteratorMode::From( - (transaction_status_cf_primary_index, signature, 0), + ( + transaction_status_cf_primary_index, + signature, + lowest_available_slot, + ), IteratorDirection::Forward, ))?; for ((i, sig, slot), _data) in index_iterator { @@ -2144,6 +2178,8 @@ impl Blockstore { return Ok((status, counter)); } } + drop(lock); + Ok((None, counter)) } @@ -2267,13 +2303,15 @@ impl Blockstore { start_slot: Slot, end_slot: Slot, ) -> Result> { + let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot(); + let mut signatures: Vec<(Slot, Signature)> = vec![]; for transaction_status_cf_primary_index in 0..=1 { let index_iterator = self.address_signatures_cf.iter(IteratorMode::From( ( transaction_status_cf_primary_index, pubkey, - start_slot, + start_slot.max(lowest_available_slot), Signature::default(), ), IteratorDirection::Forward, @@ -2288,6 +2326,7 @@ impl Blockstore { } } } + drop(lock); signatures.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap().then(a.1.cmp(&b.1))); Ok(signatures) } @@ -2300,13 +2339,14 @@ impl Blockstore { pubkey: Pubkey, slot: Slot, ) -> Result> { + let (lock, lowest_available_slot) = self.ensure_lowest_cleanup_slot(); let mut signatures: Vec<(Slot, Signature)> = vec![]; for transaction_status_cf_primary_index in 0..=1 { let index_iterator = self.address_signatures_cf.iter(IteratorMode::From( ( transaction_status_cf_primary_index, pubkey, - slot, + slot.max(lowest_available_slot), Signature::default(), ), IteratorDirection::Forward, @@ -2321,6 +2361,7 @@ impl Blockstore { signatures.push((slot, signature)); } } + drop(lock); signatures.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap().then(a.1.cmp(&b.1))); Ok(signatures) } @@ -2680,7 +2721,8 @@ impl Blockstore { ) -> Result<()> { let mut w_active_transaction_status_index = self.active_transaction_status_index.write().unwrap(); - let primary_index = self.get_primary_index(slot, &mut w_active_transaction_status_index)?; + let primary_index = + self.get_primary_index_to_write(slot, &mut w_active_transaction_status_index)?; self.evm_blocks_by_hash_cf .put_protobuf((primary_index, hash), &id) } @@ -2884,18 +2926,28 @@ impl Blockstore { let transaction_data = confirmed_transaction_data.or_else(|| transactions.first().map(|(_, data)| data)); - Ok(if let Some(data) = transaction_data { - Some( - self.evm_transactions_cf - .deserialize_protobuf_or_bincode::(&data)? - .try_into() - .map_err(|e| { - BlockstoreError::ProtobufDecodeError(prost::DecodeError::new(e)) - })?, - ) - } else { - None - }) + let transaction_data = match transaction_data { + Some(tx_data) => { + let tx_receipt = self + .evm_transactions_cf + .deserialize_protobuf_or_bincode::(&tx_data)?; + let tx_receipt: evm::TransactionReceipt = + tx_receipt.try_into().map_err(BlockstoreError::Other)?; + Some(tx_receipt) + } + None => None, + }; + + // let mb_tx = transaction_data + // .map(|data| { + // self.evm_transactions_cf + // .deserialize_protobuf_or_bincode::(&data) + // }) + // .transpose()? + // .map(|tx_receipt| tx_receipt.try_into()) + // . + + Ok(transaction_data) } pub fn write_evm_transaction( @@ -2908,7 +2960,8 @@ impl Blockstore { // reuse mechanism of transaction_status_index_cf gating let mut w_active_transaction_status_index = self.active_transaction_status_index.write().unwrap(); - let index = self.get_primary_index(block_num, &mut w_active_transaction_status_index)?; + let index = + self.get_primary_index_to_write(block_num, &mut w_active_transaction_status_index)?; let status = status.into(); self.evm_transactions_cf.put_protobuf( EvmTransactionReceiptsIndex { @@ -3275,6 +3328,13 @@ impl Blockstore { Ok(dead_slots_iterator.map(|(slot, _)| slot)) } + pub fn duplicate_slots_iterator(&self, slot: Slot) -> Result + '_> { + let duplicate_slots_iterator = self + .db + .iter::(IteratorMode::From(slot, IteratorDirection::Forward))?; + Ok(duplicate_slots_iterator.map(|(slot, _)| slot)) + } + pub fn last_root(&self) -> Slot { *self.last_root.read().unwrap() } @@ -3293,6 +3353,10 @@ impl Blockstore { self.last_root() } + pub fn lowest_cleanup_slot(&self) -> Slot { + *self.lowest_cleanup_slot.read().unwrap() + } + pub fn storage_size(&self) -> Result { self.db.storage_size() } @@ -3300,6 +3364,50 @@ impl Blockstore { pub fn is_primary_access(&self) -> bool { self.db.is_primary_access() } + + pub fn scan_and_fix_roots(&self, exit: &Arc) -> Result<()> { + let ancestor_iterator = AncestorIterator::new(self.last_root(), &self) + .take_while(|&slot| slot >= self.lowest_cleanup_slot()); + + let mut find_missing_roots = Measure::start("find_missing_roots"); + let mut roots_to_fix = vec![]; + for slot in ancestor_iterator.filter(|slot| !self.is_root(*slot)) { + if exit.load(Ordering::Relaxed) { + return Ok(()); + } + roots_to_fix.push(slot); + } + find_missing_roots.stop(); + let mut fix_roots = Measure::start("fix_roots"); + if !roots_to_fix.is_empty() { + info!("{} slots to be rooted", roots_to_fix.len()); + for chunk in roots_to_fix.chunks(100) { + if exit.load(Ordering::Relaxed) { + return Ok(()); + } + trace!("{:?}", chunk); + self.set_roots(&roots_to_fix)?; + } + } else { + debug!( + "No missing roots found in range {} to {}", + self.lowest_cleanup_slot(), + self.last_root() + ); + } + fix_roots.stop(); + datapoint_info!( + "blockstore-scan_and_fix_roots", + ( + "find_missing_roots_us", + find_missing_roots.as_us() as i64, + i64 + ), + ("num_roots_to_fix", roots_to_fix.len() as i64, i64), + ("fix_roots_us", fix_roots.as_us() as i64, i64), + ); + Ok(()) + } } // Update the `completed_data_indexes` with a new shred `new_shred_index`. If a @@ -3798,8 +3906,7 @@ pub fn create_new_ledger( let last_hash = entries.last().unwrap().hash; let version = solana_sdk::shred_version::version_from_hash(&last_hash); - let shredder = Shredder::new(0, 0, 0.0, Arc::new(Keypair::new()), 0, version) - .expect("Failed to create entry shredder"); + let shredder = Shredder::new(0, 0, Arc::new(Keypair::new()), 0, version).unwrap(); let shreds = shredder.entries_to_shreds(&entries, true, 0).0; assert!(shreds.last().unwrap().last_in_slot()); @@ -3982,10 +4089,10 @@ pub fn entries_to_test_shreds( is_full_slot: bool, version: u16, ) -> Vec { - let shredder = Shredder::new(slot, parent_slot, 0.0, Arc::new(Keypair::new()), 0, version) - .expect("Failed to create entry shredder"); - - shredder.entries_to_shreds(&entries, is_full_slot, 0).0 + Shredder::new(slot, parent_slot, Arc::new(Keypair::new()), 0, version) + .unwrap() + .entries_to_shreds(&entries, is_full_slot, 0) + .0 } // used for tests only @@ -5132,14 +5239,10 @@ pub mod tests { if slot % 3 == 0 { let shred0 = shreds_for_slot.remove(0); missing_shreds.push(shred0); - blockstore - .insert_shreds(shreds_for_slot, None, false) - .unwrap(); - } else { - blockstore - .insert_shreds(shreds_for_slot, None, false) - .unwrap(); } + blockstore + .insert_shreds(shreds_for_slot, None, false) + .unwrap(); } // Check metadata @@ -6303,6 +6406,7 @@ pub mod tests { log_messages: Some(vec![]), pre_token_balances: Some(vec![]), post_token_balances: Some(vec![]), + rewards: Some(vec![]), } .into(); ledger @@ -6318,6 +6422,7 @@ pub mod tests { log_messages: Some(vec![]), pre_token_balances: Some(vec![]), post_token_balances: Some(vec![]), + rewards: Some(vec![]), } .into(); ledger @@ -6333,6 +6438,7 @@ pub mod tests { log_messages: Some(vec![]), pre_token_balances: Some(vec![]), post_token_balances: Some(vec![]), + rewards: Some(vec![]), } .into(); ledger @@ -6350,6 +6456,7 @@ pub mod tests { log_messages: Some(vec![]), pre_token_balances: Some(vec![]), post_token_balances: Some(vec![]), + rewards: Some(vec![]), }), } }) @@ -6377,6 +6484,7 @@ pub mod tests { previous_blockhash: Hash::default().to_string(), rewards: vec![], block_time: None, + block_height: None, }; assert_eq!(confirmed_block, expected_block); @@ -6390,6 +6498,7 @@ pub mod tests { previous_blockhash: blockhash.to_string(), rewards: vec![], block_time: None, + block_height: None, }; assert_eq!(confirmed_block, expected_block); @@ -6406,13 +6515,17 @@ pub mod tests { previous_blockhash: blockhash.to_string(), rewards: vec![], block_time: None, + block_height: None, }; assert_eq!(complete_block, expected_complete_block); - // Test block_time returns, if available + // Test block_time & block_height return, if available let timestamp = 1_576_183_541; ledger.blocktime_cf.put(slot + 1, ×tamp).unwrap(); expected_block.block_time = Some(timestamp); + let block_height = slot - 2; + ledger.block_height_cf.put(slot + 1, &block_height).unwrap(); + expected_block.block_height = Some(block_height); let confirmed_block = ledger.get_rooted_block(slot + 1, true).unwrap(); assert_eq!(confirmed_block, expected_block); @@ -6420,6 +6533,9 @@ pub mod tests { let timestamp = 1_576_183_542; ledger.blocktime_cf.put(slot + 2, ×tamp).unwrap(); expected_complete_block.block_time = Some(timestamp); + let block_height = slot - 1; + ledger.block_height_cf.put(slot + 2, &block_height).unwrap(); + expected_complete_block.block_height = Some(block_height); let complete_block = ledger.get_complete_block(slot + 2, true).unwrap(); assert_eq!(complete_block, expected_complete_block); @@ -6444,6 +6560,7 @@ pub mod tests { let log_messages_vec = vec![String::from("Test message\n")]; let pre_token_balances_vec = vec![]; let post_token_balances_vec = vec![]; + let rewards_vec = vec![]; // result not found assert!(transaction_status_cf @@ -6467,6 +6584,7 @@ pub mod tests { log_messages: Some(log_messages_vec.clone()), pre_token_balances: Some(pre_token_balances_vec.clone()), post_token_balances: Some(post_token_balances_vec.clone()), + rewards: Some(rewards_vec.clone()), } .into(); assert!(transaction_status_cf @@ -6483,6 +6601,7 @@ pub mod tests { log_messages, pre_token_balances, post_token_balances, + rewards, } = transaction_status_cf .get_protobuf_or_bincode::(( 0, @@ -6501,6 +6620,7 @@ pub mod tests { assert_eq!(log_messages.unwrap(), log_messages_vec); assert_eq!(pre_token_balances.unwrap(), pre_token_balances_vec); assert_eq!(post_token_balances.unwrap(), post_token_balances_vec); + assert_eq!(rewards.unwrap(), rewards_vec); // insert value let status = TransactionStatusMeta { @@ -6512,6 +6632,7 @@ pub mod tests { log_messages: Some(log_messages_vec.clone()), pre_token_balances: Some(pre_token_balances_vec.clone()), post_token_balances: Some(post_token_balances_vec.clone()), + rewards: Some(rewards_vec.clone()), } .into(); assert!(transaction_status_cf @@ -6528,6 +6649,7 @@ pub mod tests { log_messages, pre_token_balances, post_token_balances, + rewards, } = transaction_status_cf .get_protobuf_or_bincode::(( 0, @@ -6548,6 +6670,7 @@ pub mod tests { assert_eq!(log_messages.unwrap(), log_messages_vec); assert_eq!(pre_token_balances.unwrap(), pre_token_balances_vec); assert_eq!(post_token_balances.unwrap(), post_token_balances_vec); + assert_eq!(rewards.unwrap(), rewards_vec); } Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } @@ -6778,6 +6901,7 @@ pub mod tests { log_messages: Some(vec![]), pre_token_balances: Some(vec![]), post_token_balances: Some(vec![]), + rewards: Some(vec![]), } .into(); @@ -6950,6 +7074,176 @@ pub mod tests { Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); } + fn do_test_lowest_cleanup_slot_and_special_cfs( + simulate_compaction: bool, + simulate_ledger_cleanup_service: bool, + ) { + solana_logger::setup(); + + let blockstore_path = get_tmp_ledger_path!(); + { + let blockstore = Blockstore::open(&blockstore_path).unwrap(); + // TransactionStatus column opens initialized with one entry at index 2 + let transaction_status_cf = blockstore.db.column::(); + + let pre_balances_vec = vec![1, 2, 3]; + let post_balances_vec = vec![3, 2, 1]; + let status = TransactionStatusMeta { + status: solana_sdk::transaction::Result::<()>::Ok(()), + fee: 42u64, + pre_balances: pre_balances_vec, + post_balances: post_balances_vec, + inner_instructions: Some(vec![]), + log_messages: Some(vec![]), + pre_token_balances: Some(vec![]), + post_token_balances: Some(vec![]), + rewards: Some(vec![]), + } + .into(); + + let signature1 = Signature::new(&[2u8; 64]); + let signature2 = Signature::new(&[3u8; 64]); + + // Insert rooted slots 0..=3 with no fork + let meta0 = SlotMeta::new(0, 0); + blockstore.meta_cf.put(0, &meta0).unwrap(); + let meta1 = SlotMeta::new(1, 0); + blockstore.meta_cf.put(1, &meta1).unwrap(); + let meta2 = SlotMeta::new(2, 1); + blockstore.meta_cf.put(2, &meta2).unwrap(); + let meta3 = SlotMeta::new(3, 2); + blockstore.meta_cf.put(3, &meta3).unwrap(); + + blockstore.set_roots(&[0, 1, 2, 3]).unwrap(); + + let lowest_cleanup_slot = 1; + let lowest_available_slot = lowest_cleanup_slot + 1; + + transaction_status_cf + .put_protobuf((0, signature1, lowest_cleanup_slot), &status) + .unwrap(); + + transaction_status_cf + .put_protobuf((0, signature2, lowest_available_slot), &status) + .unwrap(); + + let address0 = solana_sdk::pubkey::new_rand(); + let address1 = solana_sdk::pubkey::new_rand(); + blockstore + .write_transaction_status( + lowest_cleanup_slot, + signature1, + vec![&address0], + vec![], + TransactionStatusMeta::default(), + ) + .unwrap(); + blockstore + .write_transaction_status( + lowest_available_slot, + signature2, + vec![&address1], + vec![], + TransactionStatusMeta::default(), + ) + .unwrap(); + + let check_for_missing = || { + ( + blockstore + .get_transaction_status_with_counter(signature1, &[]) + .unwrap() + .0 + .is_none(), + blockstore + .find_address_signatures_for_slot(address0, lowest_cleanup_slot) + .unwrap() + .is_empty(), + blockstore + .find_address_signatures(address0, lowest_cleanup_slot, lowest_cleanup_slot) + .unwrap() + .is_empty(), + ) + }; + + let assert_existing_always = || { + let are_existing_always = ( + blockstore + .get_transaction_status_with_counter(signature2, &[]) + .unwrap() + .0 + .is_some(), + !blockstore + .find_address_signatures_for_slot(address1, lowest_available_slot) + .unwrap() + .is_empty(), + !blockstore + .find_address_signatures( + address1, + lowest_available_slot, + lowest_available_slot, + ) + .unwrap() + .is_empty(), + ); + assert_eq!(are_existing_always, (true, true, true)); + }; + + let are_missing = check_for_missing(); + // should never be missing before the conditional compaction & simulation... + assert_eq!(are_missing, (false, false, false)); + assert_existing_always(); + + if simulate_compaction { + blockstore.set_max_expired_slot(lowest_cleanup_slot); + // force compaction filters to run across whole key range. + blockstore + .compact_storage(Slot::min_value(), Slot::max_value()) + .unwrap(); + } + + if simulate_ledger_cleanup_service { + *blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot; + } + + let are_missing = check_for_missing(); + if simulate_compaction || simulate_ledger_cleanup_service { + // ... when either simulation (or both) is effective, we should observe to be missing + // consistently + assert_eq!(are_missing, (true, true, true)); + } else { + // ... otherwise, we should observe to be existing... + assert_eq!(are_missing, (false, false, false)); + } + assert_existing_always(); + } + Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction"); + } + + #[test] + fn test_lowest_cleanup_slot_and_special_cfs_with_compact_with_ledger_cleanup_service_simulation( + ) { + do_test_lowest_cleanup_slot_and_special_cfs(true, true); + } + + #[test] + fn test_lowest_cleanup_slot_and_special_cfs_with_compact_without_ledger_cleanup_service_simulation( + ) { + do_test_lowest_cleanup_slot_and_special_cfs(true, false); + } + + #[test] + fn test_lowest_cleanup_slot_and_special_cfs_without_compact_with_ledger_cleanup_service_simulation( + ) { + do_test_lowest_cleanup_slot_and_special_cfs(false, true); + } + + #[test] + fn test_lowest_cleanup_slot_and_special_cfs_without_compact_without_ledger_cleanup_service_simulation( + ) { + do_test_lowest_cleanup_slot_and_special_cfs(false, false); + } + #[test] fn test_get_rooted_transaction() { let slot = 2; @@ -6979,6 +7273,7 @@ pub mod tests { let log_messages = Some(vec![String::from("Test message\n")]); let pre_token_balances = Some(vec![]); let post_token_balances = Some(vec![]); + let rewards = Some(vec![]); let signature = transaction.signatures[0]; let status = TransactionStatusMeta { status: Ok(()), @@ -6989,6 +7284,7 @@ pub mod tests { log_messages: log_messages.clone(), pre_token_balances: pre_token_balances.clone(), post_token_balances: post_token_balances.clone(), + rewards: rewards.clone(), } .into(); blockstore @@ -7006,6 +7302,7 @@ pub mod tests { log_messages, pre_token_balances, post_token_balances, + rewards, }), } }) @@ -7075,6 +7372,7 @@ pub mod tests { let log_messages = Some(vec![String::from("Test message\n")]); let pre_token_balances = Some(vec![]); let post_token_balances = Some(vec![]); + let rewards = Some(vec![]); let signature = transaction.signatures[0]; let status = TransactionStatusMeta { status: Ok(()), @@ -7085,6 +7383,7 @@ pub mod tests { log_messages: log_messages.clone(), pre_token_balances: pre_token_balances.clone(), post_token_balances: post_token_balances.clone(), + rewards: rewards.clone(), } .into(); blockstore @@ -7102,6 +7401,7 @@ pub mod tests { log_messages, pre_token_balances, post_token_balances, + rewards, }), } }) @@ -7829,6 +8129,7 @@ pub mod tests { log_messages: Some(vec![]), pre_token_balances: Some(vec![]), post_token_balances: Some(vec![]), + rewards: Some(vec![]), } .into(); transaction_status_cf @@ -7908,7 +8209,7 @@ pub mod tests { fn test_recovery() { let slot = 1; let (data_shreds, coding_shreds, leader_schedule_cache) = - setup_erasure_shreds(slot, 0, 100, 1.0); + setup_erasure_shreds(slot, 0, 100); let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); @@ -7941,7 +8242,7 @@ pub mod tests { let slot = 1; let num_entries = 100; let (data_shreds, coding_shreds, leader_schedule_cache) = - setup_erasure_shreds(slot, 0, num_entries, 1.0); + setup_erasure_shreds(slot, 0, num_entries); assert!(data_shreds.len() > 3); assert!(coding_shreds.len() > 3); let blockstore_path = get_tmp_ledger_path!(); @@ -8078,19 +8379,10 @@ pub mod tests { slot: u64, parent_slot: u64, num_entries: u64, - erasure_rate: f32, ) -> (Vec, Vec, Arc) { let entries = make_slot_entries_with_transactions(num_entries); let leader_keypair = Arc::new(Keypair::new()); - let shredder = Shredder::new( - slot, - parent_slot, - erasure_rate, - leader_keypair.clone(), - 0, - 0, - ) - .expect("Failed in creating shredder"); + let shredder = Shredder::new(slot, parent_slot, leader_keypair.clone(), 0, 0).unwrap(); let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0); let genesis_config = create_genesis_config(2).genesis_config; @@ -8142,8 +8434,7 @@ pub mod tests { let entries1 = make_slot_entries_with_transactions(1); let entries2 = make_slot_entries_with_transactions(1); let leader_keypair = Arc::new(Keypair::new()); - let shredder = - Shredder::new(slot, 0, 1.0, leader_keypair, 0, 0).expect("Failed in creating shredder"); + let shredder = Shredder::new(slot, 0, leader_keypair, 0, 0).unwrap(); let (shreds, _, _) = shredder.entries_to_shreds(&entries1, true, 0); let (duplicate_shreds, _, _) = shredder.entries_to_shreds(&entries2, true, 0); let shred = shreds[0].clone(); @@ -8373,6 +8664,12 @@ pub mod tests { ui_amount_string: "1.1".to_string(), }, }]), + rewards: Some(vec![Reward { + pubkey: "My11111111111111111111111111111111111111111".to_string(), + lamports: -42, + post_balance: 42, + reward_type: Some(RewardType::Rent), + }]), }; let deprecated_status: StoredTransactionStatusMeta = status.clone().into(); let protobuf_status: generated::TransactionStatusMeta = status.into(); @@ -8454,8 +8751,8 @@ pub mod tests { let ledger_path = get_tmp_ledger_path!(); let ledger = Blockstore::open(&ledger_path).unwrap(); - let coding1 = Shredder::generate_coding_shreds(0.5f32, &shreds, usize::MAX); - let coding2 = Shredder::generate_coding_shreds(1.0f32, &shreds, usize::MAX); + let coding1 = Shredder::generate_coding_shreds(&shreds, false); + let coding2 = Shredder::generate_coding_shreds(&shreds, true); for shred in &shreds { info!("shred {:?}", shred); } @@ -8479,7 +8776,7 @@ pub mod tests { solana_logger::setup(); let slot = 1; let (_data_shreds, mut coding_shreds, leader_schedule_cache) = - setup_erasure_shreds(slot, 0, 100, 1.0); + setup_erasure_shreds(slot, 0, 100); let blockstore_path = get_tmp_ledger_path!(); { let blockstore = Blockstore::open(&blockstore_path).unwrap(); diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 006a8b5975..a54d2c1d07 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -1,4 +1,5 @@ use super::*; +use std::time::Instant; #[derive(Default)] pub struct PurgeStats { @@ -31,6 +32,19 @@ impl Blockstore { } } + /// Usually this is paired with .purge_slots() but we can't internally call this in + /// that function unconditionally. That's because set_max_expired_slot() + /// expects to purge older slots by the successive chronological order, while .purge_slots() + /// can also be used to purge *future* slots for --hard-fork thing, preserving older + /// slots. It'd be quite dangerous to purge older slots in that case. + /// So, current legal user of this function is LedgerCleanupService. + pub fn set_max_expired_slot(&self, to_slot: Slot) { + // convert here from inclusive purged range end to inclusive alive range start to align + // with Slot::default() for initial compaction filter behavior consistency + let to_slot = to_slot.checked_add(1).unwrap(); + self.db.set_oldest_slot(to_slot); + } + pub fn purge_and_compact_slots(&self, from_slot: Slot, to_slot: Slot) { self.purge_slots(from_slot, to_slot, PurgeType::Exact); if let Err(e) = self.compact_storage(from_slot, to_slot) { @@ -47,6 +61,10 @@ impl Blockstore { /// /// Dangerous; Use with care pub fn purge_from_next_slots(&self, from_slot: Slot, to_slot: Slot) { + let mut count = 0; + let mut rewritten = 0; + let mut last_print = Instant::now(); + let mut total_retain_us = 0; for (slot, mut meta) in self .slot_meta_iterator(0) .expect("unable to iterate over meta") @@ -55,10 +73,23 @@ impl Blockstore { break; } + count += 1; + if last_print.elapsed().as_millis() > 2000 { + info!( + "purged: {} slots rewritten: {} retain_time: {}us", + count, rewritten, total_retain_us + ); + count = 0; + rewritten = 0; + total_retain_us = 0; + last_print = Instant::now(); + } + let mut time = Measure::start("retain"); let original_len = meta.next_slots.len(); meta.next_slots .retain(|slot| *slot < from_slot || *slot > to_slot); if meta.next_slots.len() != original_len { + rewritten += 1; info!( "purge_from_next_slots: meta for slot {} no longer refers to slots {:?}", slot, @@ -70,6 +101,8 @@ impl Blockstore { ) .expect("couldn't update meta"); } + time.stop(); + total_retain_us += time.as_us(); } } @@ -145,6 +178,10 @@ impl Blockstore { & self .db .delete_range_cf::(&mut write_batch, from_slot, to_slot) + .is_ok() + & self + .db + .delete_range_cf::(&mut write_batch, from_slot, to_slot) .is_ok(); let mut w_active_transaction_status_index = self.active_transaction_status_index.write().unwrap(); @@ -160,6 +197,13 @@ impl Blockstore { to_slot, )?; } + PurgeType::CompactionFilter => { + // No explicit action is required here because this purge type completely and + // indefinitely relies on the proper working of compaction filter for those + // special column families, never toggling the primary index from the current + // one. Overall, this enables well uniformly distributed writes, resulting + // in no spiky periodic huge delete_range for them. + } } delete_range_timer.stop(); let mut write_timer = Measure::start("write_batch"); @@ -173,6 +217,10 @@ impl Blockstore { write_timer.stop(); purge_stats.delete_range += delete_range_timer.as_us(); purge_stats.write_batch += write_timer.as_us(); + // only drop w_active_transaction_status_index after we do db.write(write_batch); + // otherwise, readers might be confused with inconsistent state between + // self.active_transaction_status_index and RockDb's TransactionStatusIndex contents + drop(w_active_transaction_status_index); Ok(columns_purged) } @@ -243,6 +291,10 @@ impl Blockstore { && self .perf_samples_cf .compact_range(from_slot, to_slot) + .unwrap_or(false) + && self + .block_height_cf + .compact_range(from_slot, to_slot) .unwrap_or(false); compact_timer.stop(); if !result { @@ -303,18 +355,26 @@ impl Blockstore { w_active_transaction_status_index: &mut u64, to_slot: Slot, ) -> Result<()> { - if let Some(index) = self.toggle_transaction_status_index( + if let Some(purged_index) = self.toggle_transaction_status_index( write_batch, w_active_transaction_status_index, to_slot, )? { *columns_purged &= self .db - .delete_range_cf::(write_batch, index, index + 1) + .delete_range_cf::( + write_batch, + purged_index, + purged_index + 1, + ) .is_ok() & self .db - .delete_range_cf::(write_batch, index, index + 1) + .delete_range_cf::( + write_batch, + purged_index, + purged_index + 1, + ) .is_ok(); } Ok(()) diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 7d7c37fa65..20ead78fa3 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -7,9 +7,13 @@ use log::*; use prost::Message; pub use rocksdb::Direction as IteratorDirection; use rocksdb::{ - self, ColumnFamily, ColumnFamilyDescriptor, DBIterator, DBRawIterator, DBRecoveryMode, - IteratorMode as RocksIteratorMode, Options, WriteBatch as RWriteBatch, DB, + self, + compaction_filter::CompactionFilter, + compaction_filter_factory::{CompactionFilterContext, CompactionFilterFactory}, + ColumnFamily, ColumnFamilyDescriptor, CompactionDecision, DBIterator, DBRawIterator, + DBRecoveryMode, IteratorMode as RocksIteratorMode, Options, WriteBatch as RWriteBatch, DB, }; + use serde::de::DeserializeOwned; use serde::Serialize; use solana_runtime::hardened_unpack::UnpackError; @@ -19,7 +23,17 @@ use solana_sdk::{ signature::Signature, }; use solana_storage_proto::convert::{generated, generated_evm}; -use std::{collections::HashMap, fs, marker::PhantomData, path::Path, sync::Arc}; +use std::{ + collections::HashMap, + ffi::{CStr, CString}, + fs, + marker::PhantomData, + path::Path, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, +}; use thiserror::Error; const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB @@ -57,6 +71,11 @@ const REWARDS_CF: &str = "rewards"; const BLOCKTIME_CF: &str = "blocktime"; /// Column family for Performance Samples const PERF_SAMPLES_CF: &str = "perf_samples"; +/// Column family for BlockHeight +const BLOCK_HEIGHT_CF: &str = "block_height"; + +// 1 day is chosen for the same reasoning of DEFAULT_COMPACTION_SLOT_INTERVAL +const PERIODIC_COMPACTION_SECONDS: u64 = 60 * 60 * 24; const EVM_HEADERS: &str = "evm_headers"; const EVM_BLOCK_BY_HASH: &str = "evm_block_by_hash"; @@ -82,6 +101,7 @@ pub enum BlockstoreError { ProtobufDecodeError(#[from] prost::DecodeError), ParentEntriesUnavailable, SlotUnavailable, + Other(&'static str), // TODO(velas): remove, use specific error variant } pub type Result = std::result::Result; @@ -159,6 +179,9 @@ pub mod columns { pub struct PerfSamples; #[derive(Debug)] + /// The block height column + pub struct BlockHeight; + /// The evm block header. pub struct EvmBlockHeader; @@ -228,8 +251,30 @@ impl From for DBRecoveryMode { } } +#[derive(Default, Clone, Debug)] +struct OldestSlot(Arc); + +impl OldestSlot { + pub fn set(&self, oldest_slot: Slot) { + // this is independently used for compaction_filter without any data dependency. + // also, compaction_filters are created via its factories, creating short-lived copies of + // this atomic value for the single job of compaction. So, Relaxed store can be justified + // in total + self.0.store(oldest_slot, Ordering::Relaxed); + } + + pub fn get(&self) -> Slot { + // copy from the AtomicU64 as a general precaution so that the oldest_slot can not mutate + // across single run of compaction for simpler reasoning although this isn't strict + // requirement at the moment + // also eventual propagation (very Relaxed) load is Ok, because compaction by nature doesn't + // require strictly synchronized semantics in this regard + self.0.load(Ordering::Relaxed) + } +} + #[derive(Debug)] -struct Rocks(rocksdb::DB, ActualAccessType); +struct Rocks(rocksdb::DB, ActualAccessType, OldestSlot); impl Rocks { fn open( @@ -238,9 +283,9 @@ impl Rocks { recovery_mode: Option, ) -> Result { use columns::{ - AddressSignatures, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, - PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta, TransactionStatus, - TransactionStatusIndex, + AddressSignatures, BlockHeight, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta, + Index, Orphans, PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta, + TransactionStatus, TransactionStatusIndex, }; fs::create_dir_all(&path)?; @@ -254,43 +299,88 @@ impl Rocks { db_options.set_wal_recovery_mode(recovery_mode.into()); } + let oldest_slot = OldestSlot::default(); + // Column family names - let meta_cf_descriptor = - ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options(&access_type)); - let dead_slots_cf_descriptor = - ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options(&access_type)); - let duplicate_slots_cf_descriptor = - ColumnFamilyDescriptor::new(DuplicateSlots::NAME, get_cf_options(&access_type)); - let erasure_meta_cf_descriptor = - ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options(&access_type)); - let orphans_cf_descriptor = - ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options(&access_type)); - let root_cf_descriptor = - ColumnFamilyDescriptor::new(Root::NAME, get_cf_options(&access_type)); - let index_cf_descriptor = - ColumnFamilyDescriptor::new(Index::NAME, get_cf_options(&access_type)); - let shred_data_cf_descriptor = - ColumnFamilyDescriptor::new(ShredData::NAME, get_cf_options(&access_type)); - let shred_code_cf_descriptor = - ColumnFamilyDescriptor::new(ShredCode::NAME, get_cf_options(&access_type)); - let transaction_status_cf_descriptor = - ColumnFamilyDescriptor::new(TransactionStatus::NAME, get_cf_options(&access_type)); - let address_signatures_cf_descriptor = - ColumnFamilyDescriptor::new(AddressSignatures::NAME, get_cf_options(&access_type)); - let transaction_status_index_cf_descriptor = - ColumnFamilyDescriptor::new(TransactionStatusIndex::NAME, get_cf_options(&access_type)); - let rewards_cf_descriptor = - ColumnFamilyDescriptor::new(Rewards::NAME, get_cf_options(&access_type)); - let blocktime_cf_descriptor = - ColumnFamilyDescriptor::new(Blocktime::NAME, get_cf_options(&access_type)); - let perf_samples_cf_descriptor = - ColumnFamilyDescriptor::new(PerfSamples::NAME, get_cf_options(&access_type)); - let evm_headers_cf_descriptor = - ColumnFamilyDescriptor::new(EvmBlockHeader::NAME, get_cf_options(&access_type)); - let evm_headers_by_hash_cf_descriptor = - ColumnFamilyDescriptor::new(EvmHeaderIndexByHash::NAME, get_cf_options(&access_type)); - let evm_transactions_cf_descriptor = - ColumnFamilyDescriptor::new(EvmTransactionReceipts::NAME, get_cf_options(&access_type)); + let meta_cf_descriptor = ColumnFamilyDescriptor::new( + SlotMeta::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let dead_slots_cf_descriptor = ColumnFamilyDescriptor::new( + DeadSlots::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let duplicate_slots_cf_descriptor = ColumnFamilyDescriptor::new( + DuplicateSlots::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let erasure_meta_cf_descriptor = ColumnFamilyDescriptor::new( + ErasureMeta::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let orphans_cf_descriptor = ColumnFamilyDescriptor::new( + Orphans::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let root_cf_descriptor = ColumnFamilyDescriptor::new( + Root::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let index_cf_descriptor = ColumnFamilyDescriptor::new( + Index::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let shred_data_cf_descriptor = ColumnFamilyDescriptor::new( + ShredData::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let shred_code_cf_descriptor = ColumnFamilyDescriptor::new( + ShredCode::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let transaction_status_cf_descriptor = ColumnFamilyDescriptor::new( + TransactionStatus::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let address_signatures_cf_descriptor = ColumnFamilyDescriptor::new( + AddressSignatures::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let transaction_status_index_cf_descriptor = ColumnFamilyDescriptor::new( + TransactionStatusIndex::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let rewards_cf_descriptor = ColumnFamilyDescriptor::new( + Rewards::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let blocktime_cf_descriptor = ColumnFamilyDescriptor::new( + Blocktime::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let perf_samples_cf_descriptor = ColumnFamilyDescriptor::new( + PerfSamples::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let block_height_cf_descriptor = ColumnFamilyDescriptor::new( + BlockHeight::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + // Don't forget to add to both run_purge_with_stats() and + // compact_storage() in ledger/src/blockstore/blockstore_purge.rs!! + + let evm_headers_cf_descriptor = ColumnFamilyDescriptor::new( + EvmBlockHeader::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let evm_headers_by_hash_cf_descriptor = ColumnFamilyDescriptor::new( + EvmHeaderIndexByHash::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); + let evm_transactions_cf_descriptor = ColumnFamilyDescriptor::new( + EvmTransactionReceipts::NAME, + get_cf_options::(&access_type, &oldest_slot), + ); let cfs = vec![ (SlotMeta::NAME, meta_cf_descriptor), @@ -311,6 +401,8 @@ impl Rocks { (Rewards::NAME, rewards_cf_descriptor), (Blocktime::NAME, blocktime_cf_descriptor), (PerfSamples::NAME, perf_samples_cf_descriptor), + (BlockHeight::NAME, block_height_cf_descriptor), + // EVM tail args (EvmBlockHeader::NAME, evm_headers_cf_descriptor), (EvmTransactionReceipts::NAME, evm_transactions_cf_descriptor), ( @@ -318,18 +410,18 @@ impl Rocks { evm_headers_by_hash_cf_descriptor, ), ]; + let cf_names: Vec<_> = cfs.iter().map(|c| c.0).collect(); // Open the database let db = match access_type { AccessType::PrimaryOnly | AccessType::PrimaryOnlyForMaintenance => Rocks( DB::open_cf_descriptors(&db_options, path, cfs.into_iter().map(|c| c.1))?, ActualAccessType::Primary, + oldest_slot, ), AccessType::TryPrimaryThenSecondary => { - let names: Vec<_> = cfs.iter().map(|c| c.0).collect(); - match DB::open_cf_descriptors(&db_options, path, cfs.into_iter().map(|c| c.1)) { - Ok(db) => Rocks(db, ActualAccessType::Primary), + Ok(db) => Rocks(db, ActualAccessType::Primary, oldest_slot), Err(err) => { let secondary_path = path.join("solana-secondary"); @@ -341,22 +433,84 @@ impl Rocks { db_options.set_max_open_files(-1); Rocks( - DB::open_cf_as_secondary(&db_options, path, &secondary_path, names)?, + DB::open_cf_as_secondary( + &db_options, + path, + &secondary_path, + cf_names.clone(), + )?, ActualAccessType::Secondary, + oldest_slot, ) } } } }; + // this is only needed for LedgerCleanupService. so guard with PrimaryOnly (i.e. running solana-validator) + if matches!(access_type, AccessType::PrimaryOnly) { + for cf_name in cf_names { + // this special column family must be excluded from LedgerCleanupService's rocksdb + // compactions + if cf_name == TransactionStatusIndex::NAME { + continue; + } + + // This is the crux of our write-stall-free storage cleaning strategy with consistent + // state view for higher-layers + // + // For the consistent view, we commit delete_range on pruned slot range by LedgerCleanupService. + // simple story here. + // + // For actual storage cleaning, we employ RocksDB compaction. But default RocksDB compaction + // settings don't work well for us. That's because we're using it rather like a really big + // (100 GBs) ring-buffer. RocksDB is basically assuming uniform data write over the key space for + // efficient compaction, which isn't true for our use as a ring buffer. + // + // So, we customize the compaction strategy with 2 combined tweaks: + // (1) compaction_filter and (2) shortening its periodic cycles. + // + // Via the compaction_filter, we finally reclaim previously delete_range()-ed storage occupied + // by pruned slots. When compaction_filter is set, each SST files are re-compacted periodically + // to hunt for keys newly expired by the compaction_filter re-evaluation. But RocksDb's default + // `periodic_compaction_seconds` is 30 days, which is too long for our case. So, we + // shorten it to a day (24 hours). + // + // As we write newer SST files over time at rather consistent rate of speed, this + // effectively makes each newly-created ssts be re-compacted for the filter at + // well-dispersed different timings. + // As a whole, we rewrite the whole dataset at every PERIODIC_COMPACTION_SECONDS, + // slowly over the duration of PERIODIC_COMPACTION_SECONDS. So, this results in + // amortization. + // So, there is a bit inefficiency here because we'll rewrite not-so-old SST files + // too. But longer period would introduce higher variance of ledger storage sizes over + // the long period. And it's much better than the daily IO spike caused by compact_range() by + // previous implementation. + // + // `ttl` and `compact_range`(`ManualCompaction`), doesn't work nicely. That's + // because its original intention is delete_range()s to reclaim disk space. So it tries to merge + // them with N+1 SST files all way down to the bottommost SSTs, often leading to vastly large amount + // (= all) of invalidated SST files, when combined with newer writes happening at the opposite + // edge of the key space. This causes a long and heavy disk IOs and possible write + // stall and ultimately, the deadly Replay/Banking stage stall at higher layers. + db.0.set_options_cf( + db.cf_handle(cf_name), + &[( + "periodic_compaction_seconds", + &format!("{}", PERIODIC_COMPACTION_SECONDS), + )], + ) + .unwrap(); + } + } Ok(db) } fn columns(&self) -> Vec<&'static str> { use columns::{ - AddressSignatures, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, - PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta, TransactionStatus, - TransactionStatusIndex, + AddressSignatures, BlockHeight, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta, + Index, Orphans, PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta, + TransactionStatus, TransactionStatusIndex, }; vec![ @@ -375,6 +529,8 @@ impl Rocks { Rewards::NAME, Blocktime::NAME, PerfSamples::NAME, + BlockHeight::NAME, + // EVM scope EvmBlockHeader::NAME, EvmTransactionReceipts::NAME, EvmHeaderIndexByHash::NAME, @@ -446,9 +602,13 @@ pub trait Column { fn key(index: Self::Index) -> Vec; fn index(key: &[u8]) -> Self::Index; - fn primary_index(index: Self::Index) -> Slot; + // this return Slot or some u64 + fn primary_index(index: Self::Index) -> u64; #[allow(clippy::wrong_self_convention)] fn as_index(slot: Slot) -> Self::Index; + fn slot(index: Self::Index) -> Slot { + Self::primary_index(index) + } } pub trait ColumnName { @@ -490,6 +650,7 @@ impl Column for T { index } + #[allow(clippy::wrong_self_convention)] fn as_index(slot: Slot) -> u64 { slot } @@ -521,6 +682,11 @@ impl Column for columns::TransactionStatus { index.0 } + fn slot(index: Self::Index) -> Slot { + index.2 + } + + #[allow(clippy::wrong_self_convention)] fn as_index(index: u64) -> Self::Index { (index, Signature::default(), 0) } @@ -557,6 +723,11 @@ impl Column for columns::AddressSignatures { index.0 } + fn slot(index: Self::Index) -> Slot { + index.2 + } + + #[allow(clippy::wrong_self_convention)] fn as_index(index: u64) -> Self::Index { (index, Pubkey::default(), 0, Signature::default()) } @@ -583,6 +754,11 @@ impl Column for columns::TransactionStatusIndex { index } + fn slot(_index: Self::Index) -> Slot { + unimplemented!() + } + + #[allow(clippy::wrong_self_convention)] fn as_index(slot: u64) -> u64 { slot } @@ -616,6 +792,14 @@ impl TypedColumn for columns::PerfSamples { type Type = blockstore_meta::PerfSample; } +impl SlotColumn for columns::BlockHeight {} +impl ColumnName for columns::BlockHeight { + const NAME: &'static str = BLOCK_HEIGHT_CF; +} +impl TypedColumn for columns::BlockHeight { + type Type = u64; +} + impl Column for columns::ShredCode { type Index = (u64, u64); @@ -631,6 +815,7 @@ impl Column for columns::ShredCode { index.0 } + #[allow(clippy::wrong_self_convention)] fn as_index(slot: Slot) -> Self::Index { (slot, 0) } @@ -660,6 +845,7 @@ impl Column for columns::ShredData { index.0 } + #[allow(clippy::wrong_self_convention)] fn as_index(slot: Slot) -> Self::Index { (slot, 0) } @@ -738,6 +924,7 @@ impl Column for columns::ErasureMeta { index.0 } + #[allow(clippy::wrong_self_convention)] fn as_index(slot: Slot) -> Self::Index { (slot, 0) } @@ -1039,6 +1226,10 @@ impl Database { pub fn is_primary_access(&self) -> bool { self.backend.is_primary_access() } + + pub fn set_oldest_slot(&self, oldest_slot: Slot) { + self.backend.2.set(oldest_slot); + } } impl LedgerColumn @@ -1225,7 +1416,63 @@ impl<'a> WriteBatch<'a> { } } -fn get_cf_options(access_type: &AccessType) -> Options { +struct PurgedSlotFilter { + oldest_slot: Slot, + name: CString, + _phantom: PhantomData, +} + +impl CompactionFilter for PurgedSlotFilter { + fn filter(&mut self, _level: u32, key: &[u8], _value: &[u8]) -> CompactionDecision { + use rocksdb::CompactionDecision::*; + + let slot_in_key = C::slot(C::index(key)); + // Refer to a comment about periodic_compaction_seconds, especially regarding implicit + // periodic execution of compaction_filters + if slot_in_key >= self.oldest_slot { + Keep + } else { + Remove + } + } + + fn name(&self) -> &CStr { + &self.name + } +} + +struct PurgedSlotFilterFactory { + oldest_slot: OldestSlot, + name: CString, + _phantom: PhantomData, +} + +impl CompactionFilterFactory for PurgedSlotFilterFactory { + type Filter = PurgedSlotFilter; + + fn create(&mut self, _context: CompactionFilterContext) -> Self::Filter { + let copied_oldest_slot = self.oldest_slot.get(); + PurgedSlotFilter:: { + oldest_slot: copied_oldest_slot, + name: CString::new(format!( + "purged_slot_filter({}, {:?})", + C::NAME, + copied_oldest_slot + )) + .unwrap(), + _phantom: PhantomData::default(), + } + } + + fn name(&self) -> &CStr { + &self.name + } +} + +fn get_cf_options( + access_type: &AccessType, + oldest_slot: &OldestSlot, +) -> Options { let mut options = Options::default(); // 256 * 8 = 2GB. 6 of these columns should take at most 12GB of RAM options.set_max_write_buffer_number(8); @@ -1239,6 +1486,19 @@ fn get_cf_options(access_type: &AccessType) -> Options { options.set_level_zero_file_num_compaction_trigger(file_num_compaction_trigger as i32); options.set_max_bytes_for_level_base(total_size_base); options.set_target_file_size_base(file_size_base); + + // TransactionStatusIndex must be excluded from LedgerCleanupService's rocksdb + // compactions.... + if matches!(access_type, AccessType::PrimaryOnly) + && C::NAME != columns::TransactionStatusIndex::NAME + { + options.set_compaction_filter_factory(PurgedSlotFilterFactory:: { + oldest_slot: oldest_slot.clone(), + name: CString::new(format!("purged_slot_filter_factory({})", C::NAME)).unwrap(), + _phantom: PhantomData::default(), + }); + } + if matches!(access_type, AccessType::PrimaryOnlyForMaintenance) { options.set_disable_auto_compactions(true); } @@ -1270,3 +1530,57 @@ fn get_db_options(access_type: &AccessType) -> Options { options } + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::blockstore_db::columns::ShredData; + + #[test] + fn test_compaction_filter() { + // this doesn't implement Clone... + let dummy_compaction_filter_context = || CompactionFilterContext { + is_full_compaction: true, + is_manual_compaction: true, + }; + let oldest_slot = OldestSlot::default(); + + let mut factory = PurgedSlotFilterFactory:: { + oldest_slot: oldest_slot.clone(), + name: CString::new("test compaction filter").unwrap(), + _phantom: PhantomData::default(), + }; + let mut compaction_filter = factory.create(dummy_compaction_filter_context()); + + let dummy_level = 0; + let key = ShredData::key(ShredData::as_index(0)); + let dummy_value = vec![]; + + // we can't use assert_matches! because CompactionDecision doesn't implement Debug + assert!(matches!( + compaction_filter.filter(dummy_level, &key, &dummy_value), + CompactionDecision::Keep + )); + + // mutating oledst_slot doen't affect existing compaction filters... + oldest_slot.set(1); + assert!(matches!( + compaction_filter.filter(dummy_level, &key, &dummy_value), + CompactionDecision::Keep + )); + + // recreating compaction filter starts to expire the key + let mut compaction_filter = factory.create(dummy_compaction_filter_context()); + assert!(matches!( + compaction_filter.filter(dummy_level, &key, &dummy_value), + CompactionDecision::Remove + )); + + // newer key shouldn't be removed + let key = ShredData::key(ShredData::as_index(1)); + matches!( + compaction_filter.filter(dummy_level, &key, &dummy_value), + CompactionDecision::Keep + ); + } +} diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 01df93e85a..00737b3c32 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -51,8 +51,9 @@ pub struct ShredIndex { pub struct ErasureMeta { /// Which erasure set in the slot this is pub set_index: u64, - /// First coding index in the FEC set - pub first_coding_index: u64, + /// Deprecated field. + #[serde(rename = "first_coding_index")] + __unused: u64, /// Size of shards in this erasure set pub size: usize, /// Erasure configuration for this erasure set @@ -184,21 +185,19 @@ impl SlotMeta { } impl ErasureMeta { - pub fn new(set_index: u64, first_coding_index: u64, config: &ErasureConfig) -> ErasureMeta { + pub fn new(set_index: u64, config: ErasureConfig) -> ErasureMeta { ErasureMeta { set_index, - first_coding_index, - size: 0, - config: *config, + config, + ..Self::default() } } pub fn status(&self, index: &Index) -> ErasureMetaStatus { use ErasureMetaStatus::*; - let num_coding = index.coding().present_in_bounds( - self.first_coding_index..self.first_coding_index + self.config.num_coding() as u64, - ); + let coding_indices = self.set_index..self.set_index + self.config.num_coding() as u64; + let num_coding = index.coding().present_in_bounds(coding_indices); let num_data = index .data() .present_in_bounds(self.set_index..self.set_index + self.config.num_data() as u64); @@ -263,7 +262,7 @@ mod test { let set_index = 0; let erasure_config = ErasureConfig::default(); - let mut e_meta = ErasureMeta::new(set_index, set_index, &erasure_config); + let mut e_meta = ErasureMeta::new(set_index, erasure_config); let mut rng = thread_rng(); let mut index = Index::new(0); e_meta.size = 1; diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index a36f63fb31..c55ad0dde8 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -16,9 +16,9 @@ use solana_measure::{measure::Measure, thread_mem_usage}; use solana_metrics::{datapoint_error, inc_new_counter_debug}; use solana_rayon_threadlimit::get_thread_count; use solana_runtime::{ - accounts_index::AccountIndex, + accounts_index::AccountSecondaryIndexes, bank::{ - Bank, ExecuteTimings, InnerInstructionsList, TransactionBalancesSet, + Bank, ExecuteTimings, InnerInstructionsList, RentDebits, TransactionBalancesSet, TransactionExecutionResult, TransactionLogMessages, TransactionResults, }, bank_forks::BankForks, @@ -101,7 +101,7 @@ fn get_first_error( fn execute_batch( batch: &TransactionBatch, bank: &Arc, - transaction_status_sender: Option, + transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, timings: &mut ExecuteTimings, ) -> Result<()> { @@ -130,6 +130,7 @@ fn execute_batch( let TransactionResults { fee_collection_results, execution_results, + rent_debits, .. } = tx_results; @@ -152,6 +153,7 @@ fn execute_batch( token_balances, inner_instructions, transaction_logs, + rent_debits, ); } @@ -163,7 +165,7 @@ fn execute_batches( bank: &Arc, batches: &[TransactionBatch], entry_callback: Option<&ProcessCallback>, - transaction_status_sender: Option, + transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, timings: &mut ExecuteTimings, ) -> Result<()> { @@ -173,12 +175,12 @@ fn execute_batches( thread_pool.borrow().install(|| { batches .into_par_iter() - .map_with(transaction_status_sender, |sender, batch| { + .map(|batch| { let mut timings = ExecuteTimings::default(); let result = execute_batch( batch, bank, - sender.clone(), + transaction_status_sender, replay_vote_sender, &mut timings, ); @@ -207,19 +209,23 @@ pub fn process_entries( bank: &Arc, entries: &mut [Entry], randomize: bool, - transaction_status_sender: Option, + transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, ) -> Result<()> { + let mut timings = ExecuteTimings::default(); let mut entry_types: Vec<_> = entries.iter().map(EntryType::from).collect(); - process_entries_with_callback( + let result = process_entries_with_callback( bank, &mut entry_types, randomize, None, transaction_status_sender, replay_vote_sender, - &mut ExecuteTimings::default(), - ) + &mut timings, + ); + + debug!("process_entries: {:?}", timings); + result } // Note: If randomize is true this will shuffle entries' transactions in-place. @@ -228,7 +234,7 @@ fn process_entries_with_callback( entries: &mut [EntryType], randomize: bool, entry_callback: Option<&ProcessCallback>, - transaction_status_sender: Option, + transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, timings: &mut ExecuteTimings, ) -> Result<()> { @@ -249,7 +255,7 @@ fn process_entries_with_callback( bank, &batches, entry_callback, - transaction_status_sender.clone(), + transaction_status_sender, replay_vote_sender, timings, )?; @@ -300,7 +306,7 @@ fn process_entries_with_callback( bank, &batches, entry_callback, - transaction_status_sender.clone(), + transaction_status_sender, replay_vote_sender, timings, )?; @@ -362,7 +368,7 @@ pub struct ProcessOptions { pub new_hard_forks: Option>, pub frozen_accounts: Vec, pub debug_keys: Option>>, - pub account_indexes: HashSet, + pub account_indexes: AccountSecondaryIndexes, pub accounts_db_caching_enabled: bool, pub allow_dead_slots: bool, } @@ -374,6 +380,7 @@ pub fn process_blockstore( evm_genesis_path: impl AsRef, account_paths: Vec, opts: ProcessOptions, + cache_block_meta_sender: Option<&CacheBlockMetaSender>, ) -> BlockstoreProcessorResult { if let Some(num_threads) = opts.override_num_threads { PAR_THREAD_POOL.with(|pool| { @@ -398,8 +405,21 @@ pub fn process_blockstore( let bank0 = Arc::new(bank0); info!("processing ledger for slot 0..."); let recyclers = VerifyRecyclers::default(); - process_bank_0(&bank0, blockstore, &opts, &recyclers); - do_process_blockstore_from_root(blockstore, bank0, &opts, &recyclers, None) + process_bank_0( + &bank0, + blockstore, + &opts, + &recyclers, + cache_block_meta_sender, + ); + do_process_blockstore_from_root( + blockstore, + bank0, + &opts, + &recyclers, + None, + cache_block_meta_sender, + ) } // Process blockstore from a known root bank @@ -408,7 +428,8 @@ pub(crate) fn process_blockstore_from_root( bank: Bank, opts: &ProcessOptions, recyclers: &VerifyRecyclers, - transaction_status_sender: Option, + transaction_status_sender: Option<&TransactionStatusSender>, + cache_block_meta_sender: Option<&CacheBlockMetaSender>, ) -> BlockstoreProcessorResult { do_process_blockstore_from_root( blockstore, @@ -416,6 +437,7 @@ pub(crate) fn process_blockstore_from_root( opts, recyclers, transaction_status_sender, + cache_block_meta_sender, ) } @@ -424,7 +446,8 @@ fn do_process_blockstore_from_root( bank: Arc, opts: &ProcessOptions, recyclers: &VerifyRecyclers, - transaction_status_sender: Option, + transaction_status_sender: Option<&TransactionStatusSender>, + cache_block_meta_sender: Option<&CacheBlockMetaSender>, ) -> BlockstoreProcessorResult { info!("processing ledger from slot {}...", bank.slot()); let allocated = thread_mem_usage::Allocatedp::default(); @@ -466,6 +489,7 @@ fn do_process_blockstore_from_root( } } + let mut timing = ExecuteTimings::default(); // Iterate and replay slots from blockstore starting from `start_slot` let (initial_forks, leader_schedule_cache) = { if let Some(meta) = blockstore @@ -486,6 +510,8 @@ fn do_process_blockstore_from_root( opts, recyclers, transaction_status_sender, + cache_block_meta_sender, + &mut timing, )?; initial_forks.sort_by_key(|bank| bank.slot()); @@ -503,6 +529,7 @@ fn do_process_blockstore_from_root( } let bank_forks = BankForks::new_from_banks(&initial_forks, root); + info!("ledger processing timing: {:?}", timing); info!( "ledger processed in {}. {} MB allocated. root slot is {}, {} fork{} at {}, with {} frozen bank{}", HumanTime::from(chrono::Duration::from_std(now.elapsed()).unwrap()).to_text_en(Accuracy::Precise, Tense::Present), @@ -584,15 +611,16 @@ fn confirm_full_slot( opts: &ProcessOptions, recyclers: &VerifyRecyclers, progress: &mut ConfirmationProgress, - transaction_status_sender: Option, + transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, + timing: &mut ExecuteTimings, ) -> result::Result<(), BlockstoreProcessorError> { - let mut timing = ConfirmationTiming::default(); + let mut confirmation_timing = ConfirmationTiming::default(); let skip_verification = !opts.poh_verify; confirm_slot( blockstore, bank, - &mut timing, + &mut confirmation_timing, progress, skip_verification, transaction_status_sender, @@ -602,6 +630,8 @@ fn confirm_full_slot( opts.allow_dead_slots, )?; + timing.accumulate(&confirmation_timing.execute_timings); + if !bank.is_complete() { Err(BlockstoreProcessorError::InvalidBlock( BlockError::Incomplete, @@ -660,7 +690,7 @@ pub fn confirm_slot( timing: &mut ConfirmationTiming, progress: &mut ConfirmationProgress, skip_verification: bool, - transaction_status_sender: Option, + transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, entry_callback: Option<&ProcessCallback>, recyclers: &VerifyRecyclers, @@ -780,6 +810,7 @@ fn process_bank_0( blockstore: &Blockstore, opts: &ProcessOptions, recyclers: &VerifyRecyclers, + cache_block_meta_sender: Option<&CacheBlockMetaSender>, ) { assert_eq!(bank0.slot(), 0); let mut progress = ConfirmationProgress::new(bank0.last_blockhash()); @@ -791,9 +822,11 @@ fn process_bank_0( &mut progress, None, None, + &mut ExecuteTimings::default(), ) .expect("processing for bank 0 must succeed"); bank0.freeze(); + cache_block_meta(bank0, cache_block_meta_sender); } // Given a bank, add its children to the pending slots queue if those children slots are @@ -855,6 +888,7 @@ fn process_next_slots( // Iterate through blockstore processing slots starting from the root slot pointed to by the // given `meta` and return a vector of frozen bank forks +#[allow(clippy::too_many_arguments)] fn load_frozen_forks( root_bank: &Arc, root_meta: &SlotMeta, @@ -863,7 +897,9 @@ fn load_frozen_forks( root: &mut Slot, opts: &ProcessOptions, recyclers: &VerifyRecyclers, - transaction_status_sender: Option, + transaction_status_sender: Option<&TransactionStatusSender>, + cache_block_meta_sender: Option<&CacheBlockMetaSender>, + timing: &mut ExecuteTimings, ) -> result::Result>, BlockstoreProcessorError> { let mut initial_forks = HashMap::new(); let mut all_banks = HashMap::new(); @@ -919,8 +955,10 @@ fn load_frozen_forks( opts, recyclers, &mut progress, - transaction_status_sender.clone(), + transaction_status_sender, + cache_block_meta_sender, None, + timing, ) .is_err() { @@ -944,7 +982,7 @@ fn load_frozen_forks( ).and_then(|supermajority_root| { if supermajority_root > *root { // If there's a cluster confirmed root greater than our last - // replayed root, then beccause the cluster confirmed root should + // replayed root, then because the cluster confirmed root should // be descended from our last root, it must exist in `all_banks` let cluster_root_bank = all_banks.get(&supermajority_root).unwrap(); @@ -952,6 +990,21 @@ fn load_frozen_forks( // is drastically wrong assert!(cluster_root_bank.ancestors.contains_key(root)); info!("blockstore processor found new cluster confirmed root: {}, observed in bank: {}", cluster_root_bank.slot(), bank.slot()); + + // Ensure cluster-confirmed root and parents are set as root in blockstore + let mut rooted_slots = vec![]; + let mut new_root_bank = cluster_root_bank.clone(); + loop { + if new_root_bank.slot() == *root { break; } // Found the last root in the chain, yay! + assert!(new_root_bank.slot() > *root); + + rooted_slots.push(new_root_bank.slot()); + // As noted, the cluster confirmed root should be descended from + // our last root; therefore parent should be set + new_root_bank = new_root_bank.parent().unwrap(); + } + inc_new_counter_info!("load_frozen_forks-cluster-confirmed-root", rooted_slots.len()); + blockstore.set_roots(&rooted_slots).expect("Blockstore::set_roots should succeed"); Some(cluster_root_bank) } else { None @@ -1077,12 +1130,14 @@ fn process_single_slot( opts: &ProcessOptions, recyclers: &VerifyRecyclers, progress: &mut ConfirmationProgress, - transaction_status_sender: Option, + transaction_status_sender: Option<&TransactionStatusSender>, + cache_block_meta_sender: Option<&CacheBlockMetaSender>, replay_vote_sender: Option<&ReplayVoteSender>, + timing: &mut ExecuteTimings, ) -> result::Result<(), BlockstoreProcessorError> { // Mark corrupt slots as dead so validators don't replay this slot and // see AlreadyProcessed errors later in ReplayStage - confirm_full_slot(blockstore, bank, opts, recyclers, progress, transaction_status_sender, replay_vote_sender).map_err(|err| { + confirm_full_slot(blockstore, bank, opts, recyclers, progress, transaction_status_sender, replay_vote_sender, timing).map_err(|err| { let slot = bank.slot(); warn!("slot {} failed to verify: {}", slot, err); if blockstore.is_primary_access() { @@ -1096,6 +1151,7 @@ fn process_single_slot( })?; bank.freeze(); // all banks handled by this routine are created from complete slots + cache_block_meta(bank, cache_block_meta_sender); Ok(()) } @@ -1113,6 +1169,7 @@ pub struct TransactionStatusBatch { pub token_balances: TransactionTokenBalancesSet, pub inner_instructions: Option>>, pub transaction_logs: Option>, + pub rent_debits: Vec, } #[derive(Clone)] @@ -1131,6 +1188,7 @@ impl TransactionStatusSender { token_balances: TransactionTokenBalancesSet, inner_instructions: Vec>, transaction_logs: Vec, + rent_debits: Vec, ) { let slot = bank.slot(); let (inner_instructions, transaction_logs) = if !self.enable_cpi_and_log_storage { @@ -1148,6 +1206,7 @@ impl TransactionStatusSender { token_balances, inner_instructions, transaction_logs, + rent_debits, })) { trace!( @@ -1170,6 +1229,16 @@ impl TransactionStatusSender { } } +pub type CacheBlockMetaSender = Sender>; + +pub fn cache_block_meta(bank: &Arc, cache_block_meta_sender: Option<&CacheBlockMetaSender>) { + if let Some(cache_block_meta_sender) = cache_block_meta_sender { + cache_block_meta_sender + .send(bank.clone()) + .unwrap_or_else(|err| warn!("cache_block_meta_sender failed: {:?}", err)); + } +} + // used for tests only pub fn fill_blockstore_slot_with_ticks( blockstore: &Blockstore, @@ -1217,7 +1286,7 @@ pub mod tests { self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, }; use solana_sdk::{ - account::Account, + account::{AccountSharedData, WritableAccount}, epoch_schedule::EpochSchedule, hash::Hash, pubkey::Pubkey, @@ -1284,6 +1353,7 @@ pub mod tests { poh_verify: true, ..ProcessOptions::default() }, + None, ) .unwrap(); assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); @@ -1336,6 +1406,7 @@ pub mod tests { poh_verify: true, ..ProcessOptions::default() }, + None, ) .unwrap(); assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); @@ -1360,6 +1431,7 @@ pub mod tests { poh_verify: true, ..ProcessOptions::default() }, + None, ) .unwrap(); @@ -1414,11 +1486,13 @@ pub mod tests { poh_verify: true, ..ProcessOptions::default() }; + let evm_state_dir = TempDir::new().unwrap(); let evm_genesis_path = ledger_path.join(solana_sdk::genesis_config::EVM_GENESIS); genesis_config .generate_evm_state(&ledger_path, None) .unwrap(); + let (bank_forks, _leader_schedule) = process_blockstore( &genesis_config, &blockstore, @@ -1426,6 +1500,7 @@ pub mod tests { evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); @@ -1496,6 +1571,7 @@ pub mod tests { genesis_config .generate_evm_state(&ledger_path, None) .unwrap(); + let (bank_forks, _leader_schedule) = process_blockstore( &genesis_config, &blockstore, @@ -1503,6 +1579,7 @@ pub mod tests { evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); @@ -1524,7 +1601,6 @@ pub mod tests { // Slot 0 should not show up in the ending bank_forks_info let evm_state_dir = TempDir::new().unwrap(); - let evm_genesis_path = ledger_path.join(solana_sdk::genesis_config::EVM_GENESIS); genesis_config .generate_evm_state(&ledger_path, None) @@ -1537,6 +1613,7 @@ pub mod tests { &evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); @@ -1604,8 +1681,10 @@ pub mod tests { poh_verify: true, ..ProcessOptions::default() }; + let evm_state_dir = TempDir::new().unwrap(); let evm_genesis_path = ledger_path.join(solana_sdk::genesis_config::EVM_GENESIS); + genesis_config .generate_evm_state(&ledger_path, None) .unwrap(); @@ -1616,6 +1695,7 @@ pub mod tests { evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); @@ -1695,11 +1775,13 @@ pub mod tests { poh_verify: true, ..ProcessOptions::default() }; + let evm_state_dir = TempDir::new().unwrap(); let evm_genesis_path = ledger_path.join(solana_sdk::genesis_config::EVM_GENESIS); genesis_config .generate_evm_state(&ledger_path, None) .unwrap(); + let (bank_forks, _leader_schedule) = process_blockstore( &genesis_config, &blockstore, @@ -1707,6 +1789,7 @@ pub mod tests { evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); @@ -1775,6 +1858,7 @@ pub mod tests { evm_genesis_path, Vec::new(), ProcessOptions::default(), + None, ) .unwrap(); @@ -1831,6 +1915,7 @@ pub mod tests { evm_genesis_path, Vec::new(), ProcessOptions::default(), + None, ) .unwrap(); @@ -1891,6 +1976,7 @@ pub mod tests { evm_genesis_path, Vec::new(), ProcessOptions::default(), + None, ) .unwrap(); @@ -1940,11 +2026,13 @@ pub mod tests { poh_verify: true, ..ProcessOptions::default() }; + let evm_state_dir = TempDir::new().unwrap(); let evm_genesis_path = ledger_path.join(solana_sdk::genesis_config::EVM_GENESIS); genesis_config .generate_evm_state(&ledger_path, None) .unwrap(); + let (bank_forks, _leader_schedule) = process_blockstore( &genesis_config, &blockstore, @@ -1952,6 +2040,7 @@ pub mod tests { evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); @@ -2096,11 +2185,13 @@ pub mod tests { poh_verify: true, ..ProcessOptions::default() }; + let evm_state_dir = TempDir::new().unwrap(); let evm_genesis_path = ledger_path.join(solana_sdk::genesis_config::EVM_GENESIS); genesis_config .generate_evm_state(&ledger_path, None) .unwrap(); + let (bank_forks, _leader_schedule) = process_blockstore( &genesis_config, &blockstore, @@ -2108,6 +2199,7 @@ pub mod tests { evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); @@ -2137,11 +2229,13 @@ pub mod tests { poh_verify: true, ..ProcessOptions::default() }; + let evm_state_dir = TempDir::new().unwrap(); let evm_genesis_path = ledger_path.join(solana_sdk::genesis_config::EVM_GENESIS); genesis_config .generate_evm_state(&ledger_path, None) .unwrap(); + let (bank_forks, _leader_schedule) = process_blockstore( &genesis_config, &blockstore, @@ -2149,6 +2243,7 @@ pub mod tests { evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); @@ -2167,11 +2262,13 @@ pub mod tests { override_num_threads: Some(1), ..ProcessOptions::default() }; + let evm_state_dir = TempDir::new().unwrap(); let evm_genesis_path = ledger_path.join(solana_sdk::genesis_config::EVM_GENESIS); genesis_config .generate_evm_state(&ledger_path, None) .unwrap(); + process_blockstore( &genesis_config, &blockstore, @@ -2179,6 +2276,7 @@ pub mod tests { evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); PAR_THREAD_POOL.with(|pool| { @@ -2196,11 +2294,13 @@ pub mod tests { full_leader_cache: true, ..ProcessOptions::default() }; + let evm_state_dir = TempDir::new().unwrap(); let evm_genesis_path = ledger_path.join(solana_sdk::genesis_config::EVM_GENESIS); genesis_config .generate_evm_state(&ledger_path, None) .unwrap(); + let (_bank_forks, leader_schedule) = process_blockstore( &genesis_config, &blockstore, @@ -2208,6 +2308,7 @@ pub mod tests { evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); assert_eq!(leader_schedule.max_schedules(), std::usize::MAX); @@ -2269,11 +2370,13 @@ pub mod tests { entry_callback: Some(entry_callback), ..ProcessOptions::default() }; + let evm_state_dir = TempDir::new().unwrap(); let evm_genesis_path = ledger_path.join(solana_sdk::genesis_config::EVM_GENESIS); genesis_config .generate_evm_state(&ledger_path, None) .unwrap(); + process_blockstore( &genesis_config, &blockstore, @@ -2281,6 +2384,7 @@ pub mod tests { evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); assert_eq!(*callback_counter.write().unwrap(), 2); @@ -2654,7 +2758,7 @@ pub mod tests { let mut hash = bank.last_blockhash(); let present_account_key = Keypair::new(); - let present_account = Account::new(1, 10, &Pubkey::default()); + let present_account = AccountSharedData::new(1, 10, &Pubkey::default()); bank.store_account(&present_account_key.pubkey(), &present_account); let mut entries: Vec<_> = (0..NUM_TRANSFERS) @@ -2935,6 +3039,7 @@ pub mod tests { dev_halt_at_slot: Some(0), ..ProcessOptions::default() }; + let evm_state_path = TempDir::new().unwrap(); let evm_genesis_path = ledger_path.join(solana_sdk::genesis_config::EVM_GENESIS); genesis_config @@ -2948,6 +3053,7 @@ pub mod tests { evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); @@ -2999,7 +3105,7 @@ pub mod tests { ..ProcessOptions::default() }; let recyclers = VerifyRecyclers::default(); - process_bank_0(&bank0, &blockstore, &opts, &recyclers); + process_bank_0(&bank0, &blockstore, &opts, &recyclers, None); let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1)); confirm_full_slot( &blockstore, @@ -3009,13 +3115,15 @@ pub mod tests { &mut ConfirmationProgress::new(bank0.last_blockhash()), None, None, + &mut ExecuteTimings::default(), ) .unwrap(); bank1.squash(); // Test process_blockstore_from_root() from slot 1 onwards let (bank_forks, _leader_schedule) = - do_process_blockstore_from_root(&blockstore, bank1, &opts, &recyclers, None).unwrap(); + do_process_blockstore_from_root(&blockstore, bank1, &opts, &recyclers, None, None) + .unwrap(); assert_eq!(frozen_bank_slots(&bank_forks), vec![5, 6]); assert_eq!(bank_forks.working_bank().slot(), 6); @@ -3060,7 +3168,7 @@ pub mod tests { } let present_account_key = Keypair::new(); - let present_account = Account::new(1, 10, &Pubkey::default()); + let present_account = AccountSharedData::new(1, 10, &Pubkey::default()); bank.store_account(&present_account_key.pubkey(), &present_account); let mut i = 0; @@ -3186,7 +3294,7 @@ pub mod tests { &[], None, None, - HashSet::new(), + AccountSecondaryIndexes::default(), false, ); *bank.epoch_schedule() @@ -3225,7 +3333,7 @@ pub mod tests { let bank = Arc::new(Bank::new(&genesis_config)); let present_account_key = Keypair::new(); - let present_account = Account::new(1, 10, &Pubkey::default()); + let present_account = AccountSharedData::new(1, 10, &Pubkey::default()); bank.store_account(&present_account_key.pubkey(), &present_account); let keypair = Keypair::new(); @@ -3442,11 +3550,13 @@ pub mod tests { poh_verify: true, ..ProcessOptions::default() }; + let evm_state_dir = TempDir::new().unwrap(); let evm_genesis_path = ledger_path.join(solana_sdk::genesis_config::EVM_GENESIS); genesis_config .generate_evm_state(&ledger_path, None) .unwrap(); + let (bank_forks, _leader_schedule) = process_blockstore( &genesis_config, &blockstore, @@ -3454,6 +3564,7 @@ pub mod tests { evm_genesis_path, Vec::new(), opts.clone(), + None, ) .unwrap(); @@ -3491,6 +3602,7 @@ pub mod tests { genesis_config .generate_evm_state(&ledger_path, None) .unwrap(); + let (bank_forks, _leader_schedule) = process_blockstore( &genesis_config, &blockstore, @@ -3498,6 +3610,7 @@ pub mod tests { &evm_genesis_path, Vec::new(), opts.clone(), + None, ) .unwrap(); @@ -3554,11 +3667,11 @@ pub mod tests { ); let evm_state_dir = TempDir::new().unwrap(); - let evm_genesis_path = ledger_path.join(solana_sdk::genesis_config::EVM_GENESIS); genesis_config .generate_evm_state(&ledger_path, None) .unwrap(); + let (bank_forks, _leader_schedule) = process_blockstore( &genesis_config, &blockstore, @@ -3566,6 +3679,7 @@ pub mod tests { &evm_genesis_path, Vec::new(), opts, + None, ) .unwrap(); @@ -3592,10 +3706,13 @@ pub mod tests { .map(|(root, stake)| { let mut vote_state = VoteState::default(); vote_state.root_slot = Some(root); - let mut vote_account = - Account::new(1, VoteState::size_of(), &solana_vote_program::id()); + let mut vote_account = AccountSharedData::new( + 1, + VoteState::size_of(), + &solana_vote_program::id(), + ); let versioned = VoteStateVersions::new_current(vote_state); - VoteState::serialize(&versioned, &mut vote_account.data).unwrap(); + VoteState::serialize(&versioned, vote_account.data_as_mut_slice()).unwrap(); ( solana_sdk::pubkey::new_rand(), (stake, ArcVoteAccount::from(vote_account)), diff --git a/ledger/src/erasure.rs b/ledger/src/erasure.rs index a1322724c3..455fdbcf64 100644 --- a/ledger/src/erasure.rs +++ b/ledger/src/erasure.rs @@ -104,10 +104,12 @@ impl Session { } /// Create coding blocks by overwriting `parity` - pub fn encode(&self, data: &[&[u8]], parity: &mut [&mut [u8]]) -> Result<()> { - self.0.encode_sep(data, parity)?; - - Ok(()) + pub fn encode(&self, data: &[T], parity: &mut [U]) -> Result<()> + where + T: AsRef<[u8]>, + U: AsRef<[u8]> + AsMut<[u8]>, + { + self.0.encode_sep(data, parity) } /// Recover data + coding blocks into data blocks diff --git a/ledger/src/leader_schedule.rs b/ledger/src/leader_schedule.rs index d2cc19e256..4ba071abcd 100644 --- a/ledger/src/leader_schedule.rs +++ b/ledger/src/leader_schedule.rs @@ -33,10 +33,8 @@ impl LeaderSchedule { .map(|i| { if i % repeat == 0 { current_node = ids[weighted_index.sample(rng)]; - current_node - } else { - current_node } + current_node }) .collect(); Self::new_from_schedule(slot_leaders) diff --git a/ledger/src/poh.rs b/ledger/src/poh.rs index 7edc629f59..c230afc8b9 100644 --- a/ledger/src/poh.rs +++ b/ledger/src/poh.rs @@ -8,6 +8,9 @@ pub struct Poh { num_hashes: u64, hashes_per_tick: u64, remaining_hashes: u64, + ticks_per_slot: u64, + tick_number: u64, + slot_start_time: Instant, } #[derive(Debug)] @@ -18,23 +21,47 @@ pub struct PohEntry { impl Poh { pub fn new(hash: Hash, hashes_per_tick: Option) -> Self { + Self::new_with_slot_info(hash, hashes_per_tick, 0, 0) + } + + pub fn new_with_slot_info( + hash: Hash, + hashes_per_tick: Option, + ticks_per_slot: u64, + tick_number: u64, + ) -> Self { let hashes_per_tick = hashes_per_tick.unwrap_or(std::u64::MAX); assert!(hashes_per_tick > 1); + let now = Instant::now(); Poh { hash, num_hashes: 0, hashes_per_tick, remaining_hashes: hashes_per_tick, + ticks_per_slot, + tick_number, + slot_start_time: now, } } pub fn reset(&mut self, hash: Hash, hashes_per_tick: Option) { - let mut poh = Poh::new(hash, hashes_per_tick); + // retains ticks_per_slot: this cannot change without restarting the validator + let tick_number = 0; + let mut poh = + Poh::new_with_slot_info(hash, hashes_per_tick, self.ticks_per_slot, tick_number); std::mem::swap(&mut poh, self); } + pub fn target_poh_time(&self, target_ns_per_tick: u64) -> Instant { + assert!(self.hashes_per_tick > 0); + let offset_tick_ns = target_ns_per_tick * self.tick_number; + let offset_ns = target_ns_per_tick * self.num_hashes / self.hashes_per_tick; + self.slot_start_time + Duration::from_nanos(offset_ns + offset_tick_ns) + } + pub fn hash(&mut self, max_num_hashes: u64) -> bool { let num_hashes = std::cmp::min(self.remaining_hashes - 1, max_num_hashes); + for _ in 0..num_hashes { self.hash = hash(&self.hash.as_ref()); } @@ -75,6 +102,7 @@ impl Poh { let num_hashes = self.num_hashes; self.remaining_hashes = self.hashes_per_tick; self.num_hashes = 0; + self.tick_number += 1; Some(PohEntry { num_hashes, hash: self.hash, @@ -102,6 +130,7 @@ mod tests { use crate::poh::{Poh, PohEntry}; use matches::assert_matches; use solana_sdk::hash::{hash, hashv, Hash}; + use std::time::Duration; fn verify(initial_hash: Hash, entries: &[(PohEntry, Option)]) -> bool { let mut current_hash = initial_hash; @@ -124,6 +153,42 @@ mod tests { true } + #[test] + fn test_target_poh_time() { + let zero = Hash::default(); + for target_ns_per_tick in 10..12 { + let mut poh = Poh::new(zero, None); + assert_eq!(poh.target_poh_time(target_ns_per_tick), poh.slot_start_time); + poh.tick_number = 2; + assert_eq!( + poh.target_poh_time(target_ns_per_tick), + poh.slot_start_time + Duration::from_nanos(target_ns_per_tick * 2) + ); + let mut poh = Poh::new(zero, Some(5)); + assert_eq!(poh.target_poh_time(target_ns_per_tick), poh.slot_start_time); + poh.tick_number = 2; + assert_eq!( + poh.target_poh_time(target_ns_per_tick), + poh.slot_start_time + Duration::from_nanos(target_ns_per_tick * 2) + ); + poh.num_hashes = 3; + assert_eq!( + poh.target_poh_time(target_ns_per_tick), + poh.slot_start_time + + Duration::from_nanos(target_ns_per_tick * 2 + target_ns_per_tick * 3 / 5) + ); + } + } + + #[test] + #[should_panic(expected = "assertion failed: hashes_per_tick > 1")] + fn test_target_poh_time_hashes_per_tick() { + let zero = Hash::default(); + let poh = Poh::new(zero, Some(0)); + let target_ns_per_tick = 10; + poh.target_poh_time(target_ns_per_tick); + } + #[test] fn test_poh_verify() { let zero = Hash::default(); diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 57eb2dbf7b..5a21ffeedb 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -91,7 +91,6 @@ pub const DATA_SHRED: u8 = 0b1010_0101; pub const CODING_SHRED: u8 = 0b0101_1010; pub const MAX_DATA_SHREDS_PER_FEC_BLOCK: u32 = 32; -pub const RECOMMENDED_FEC_RATE: f32 = 1.0; pub const SHRED_TICK_REFERENCE_MASK: u8 = 0b0011_1111; const LAST_SHRED_IN_SLOT: u8 = 0b1000_0000; @@ -264,20 +263,18 @@ impl Shred { } pub fn new_from_serialized_shred(mut payload: Vec) -> Result { + // A shred can be deserialized in several cases; payload length will vary for these: + // payload.len() <= SHRED_PAYLOAD_SIZE when payload is retrieved from the blockstore + // payload.len() == SHRED_PAYLOAD_SIZE when payload is from a local shred (shred.payload) + // payload.len() > PACKET_DATA_SIZE when payload comes from a packet (window serivce) + // Resize here so the shreds always have the same length + payload.resize(SHRED_PAYLOAD_SIZE, 0); + let mut start = 0; let common_header: ShredCommonHeader = Self::deserialize_obj(&mut start, SIZE_OF_COMMON_SHRED_HEADER, &payload)?; let slot = common_header.slot; - let expected_data_size = SHRED_PAYLOAD_SIZE; - // Safe because any payload from the network must have passed through - // window service, which implies payload wll be of size - // PACKET_DATA_SIZE, and `expected_data_size` <= PACKET_DATA_SIZE. - // - // On the other hand, if this function is called locally, the payload size should match - // the `expected_data_size`. - assert!(payload.len() >= expected_data_size); - payload.truncate(expected_data_size); let shred = if common_header.shred_type == ShredType(CODING_SHRED) { let coding_header: CodingShredHeader = Self::deserialize_obj(&mut start, SIZE_OF_CODING_SHRED_HEADER, &payload)?; @@ -519,7 +516,6 @@ pub struct Shredder { pub slot: Slot, pub parent_slot: Slot, version: u16, - fec_rate: f32, keypair: Arc, pub signing_coding_time: u128, reference_tick: u8, @@ -529,21 +525,16 @@ impl Shredder { pub fn new( slot: Slot, parent_slot: Slot, - fec_rate: f32, keypair: Arc, reference_tick: u8, version: u16, ) -> Result { - #[allow(clippy::manual_range_contains)] - if fec_rate > 1.0 || fec_rate < 0.0 { - Err(ShredError::InvalidFecRate(fec_rate)) - } else if slot < parent_slot || slot - parent_slot > u64::from(std::u16::MAX) { + if slot < parent_slot || slot - parent_slot > u64::from(std::u16::MAX) { Err(ShredError::SlotTooLow { slot, parent_slot }) } else { Ok(Self { slot, parent_slot, - fec_rate, keypair, signing_coding_time: 0, reference_tick, @@ -569,7 +560,7 @@ impl Shredder { let coding_shreds = Self::data_shreds_to_coding_shreds( self.keypair.deref(), &data_shreds, - self.fec_rate, + is_last_in_slot, &mut stats, ) .unwrap(); @@ -647,12 +638,9 @@ impl Shredder { pub fn data_shreds_to_coding_shreds( keypair: &Keypair, data_shreds: &[Shred], - fec_rate: f32, + is_last_in_slot: bool, process_stats: &mut ProcessShredsStats, ) -> Result> { - if !(0.0..=1.0).contains(&fec_rate) { - return Err(ShredError::InvalidFecRate(fec_rate)); - } if data_shreds.is_empty() { return Ok(Vec::default()); } @@ -663,11 +651,7 @@ impl Shredder { data_shreds .par_chunks(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize) .flat_map(|shred_data_batch| { - Shredder::generate_coding_shreds( - fec_rate, - shred_data_batch, - shred_data_batch.len(), // max_coding_shreds - ) + Shredder::generate_coding_shreds(shred_data_batch, is_last_in_slot) }) .collect() }) @@ -725,100 +709,53 @@ impl Shredder { } /// Generates coding shreds for the data shreds in the current FEC set - pub fn generate_coding_shreds( - fec_rate: f32, - data_shred_batch: &[Shred], - max_coding_shreds: usize, - ) -> Vec { - assert!(!data_shred_batch.is_empty()); - if fec_rate != 0.0 { - let num_data = data_shred_batch.len(); - // always generate at least 1 coding shred even if the fec_rate doesn't allow it - let num_coding = - Self::calculate_num_coding_shreds(num_data, fec_rate, max_coding_shreds); - let session = - Session::new(num_data, num_coding).expect("Failed to create erasure session"); - let ShredCommonHeader { - slot, - index: start_index, - version, - fec_set_index, - .. - } = data_shred_batch[0].common_header; - assert_eq!(fec_set_index, start_index); - assert!(data_shred_batch - .iter() - .all(|shred| shred.common_header.slot == slot - && shred.common_header.version == version - && shred.common_header.fec_set_index == fec_set_index)); - // All information after coding shred field in a data shred is encoded - let valid_data_len = SHRED_PAYLOAD_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL; - let data_ptrs: Vec<_> = data_shred_batch - .iter() - .map(|data| &data.payload[..valid_data_len]) - .collect(); - - // Create empty coding shreds, with correctly populated headers - let mut coding_shreds: Vec<_> = (0..num_coding) - .map(|i| { - Shred::new_empty_coding( - slot, - start_index + i as u32, - fec_set_index, - num_data, - num_coding, - i, // position - version, - ) - .payload - }) - .collect(); - - // Grab pointers for the coding blocks - let coding_block_offset = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_CODING_SHRED_HEADER; - let mut coding_ptrs: Vec<_> = coding_shreds - .iter_mut() - .map(|buffer| &mut buffer[coding_block_offset..]) - .collect(); - - // Create coding blocks - session - .encode(&data_ptrs, coding_ptrs.as_mut_slice()) - .expect("Failed in erasure encode"); - - // append to the shred list - coding_shreds - .into_iter() - .enumerate() - .map(|(i, payload)| { - let mut shred = Shred::new_empty_coding( - slot, - start_index + i as u32, - start_index, - num_data, - num_coding, - i, - version, - ); - shred.payload = payload; - shred - }) - .collect() - } else { - vec![] - } - } - - fn calculate_num_coding_shreds( - num_data_shreds: usize, - fec_rate: f32, - max_coding_shreds: usize, - ) -> usize { - if num_data_shreds == 0 { - 0 + pub fn generate_coding_shreds(data: &[Shred], is_last_in_slot: bool) -> Vec { + const PAYLOAD_ENCODE_SIZE: usize = SHRED_PAYLOAD_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL; + let ShredCommonHeader { + slot, + index, + version, + fec_set_index, + .. + } = data.first().unwrap().common_header; + assert_eq!(fec_set_index, index); + assert!(data.iter().all(|shred| shred.common_header.slot == slot + && shred.common_header.version == version + && shred.common_header.fec_set_index == fec_set_index)); + let num_data = data.len(); + let num_coding = if is_last_in_slot { + (2 * MAX_DATA_SHREDS_PER_FEC_BLOCK as usize) + .saturating_sub(num_data) + .max(num_data) } else { - max_coding_shreds.min(1.max((fec_rate * num_data_shreds as f32) as usize)) - } + num_data + }; + let data: Vec<_> = data + .iter() + .map(|shred| &shred.payload[..PAYLOAD_ENCODE_SIZE]) + .collect(); + let mut parity = vec![vec![0u8; PAYLOAD_ENCODE_SIZE]; num_coding]; + Session::new(num_data, num_coding) + .unwrap() + .encode(&data, &mut parity[..]) + .unwrap(); + parity + .iter() + .enumerate() + .map(|(i, parity)| { + let mut shred = Shred::new_empty_coding( + slot, + fec_set_index + i as u32, // shred index + fec_set_index, + num_data, + num_coding, + i, // position + version, + ); + shred.payload[SIZE_OF_DATA_SHRED_IGNORED_TAIL..].copy_from_slice(parity); + shred + }) + .collect() } fn fill_in_missing_shreds( @@ -856,7 +793,6 @@ impl Shredder { num_data: usize, num_coding: usize, first_index: usize, - first_code_index: usize, slot: Slot, ) -> std::result::Result, reed_solomon_erasure::Error> { Self::verify_consistent_shred_payload_sizes(&"try_recovery()", &shreds)?; @@ -870,8 +806,8 @@ impl Shredder { let mut shred_bufs: Vec> = shreds .into_iter() .flat_map(|shred| { - let index = - Self::get_shred_index(&shred, num_data, first_index, first_code_index); + let offset = if shred.is_data() { 0 } else { num_data }; + let index = offset + shred.index() as usize; let mut blocks = Self::fill_in_missing_shreds( num_data, num_coding, @@ -949,50 +885,36 @@ impl Shredder { /// Combines all shreds to recreate the original buffer pub fn deshred(shreds: &[Shred]) -> std::result::Result, reed_solomon_erasure::Error> { - let num_data = shreds.len(); + use reed_solomon_erasure::Error::TooFewDataShards; + const SHRED_DATA_OFFSET: usize = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER; Self::verify_consistent_shred_payload_sizes(&"deshred()", shreds)?; - let data_shred_bufs = { - let first_index = shreds.first().unwrap().index() as usize; - let last_shred = shreds.last().unwrap(); - let last_index = if last_shred.data_complete() || last_shred.last_in_slot() { - last_shred.index() as usize - } else { - 0 - }; - - if num_data.saturating_add(first_index) != last_index.saturating_add(1) { - return Err(reed_solomon_erasure::Error::TooFewDataShards); - } - - shreds.iter().map(|shred| &shred.payload).collect() + let index = shreds.first().ok_or(TooFewDataShards)?.index(); + let aligned = shreds.iter().zip(index..).all(|(s, i)| s.index() == i); + let data_complete = { + let shred = shreds.last().unwrap(); + shred.data_complete() || shred.last_in_slot() }; - - Ok(Self::reassemble_payload(num_data, data_shred_bufs)) - } - - fn get_shred_index( - shred: &Shred, - num_data: usize, - first_data_index: usize, - first_code_index: usize, - ) -> usize { - if shred.is_data() { - shred.index() as usize - } else { - shred.index() as usize + num_data + first_data_index - first_code_index + if !data_complete || !aligned { + return Err(TooFewDataShards); } - } - - fn reassemble_payload(num_data: usize, data_shred_bufs: Vec<&Vec>) -> Vec { - let valid_data_len = SHRED_PAYLOAD_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL; - data_shred_bufs[..num_data] + let data: Vec<_> = shreds .iter() - .flat_map(|data| { - let offset = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER; - data[offset..valid_data_len].iter() + .flat_map(|shred| { + let size = shred.data_header.size as usize; + let size = shred.payload.len().min(size); + let offset = SHRED_DATA_OFFSET.min(size); + shred.payload[offset..size].iter() }) - .cloned() - .collect() + .copied() + .collect(); + if data.is_empty() { + // For backward compatibility. This is needed when the data shred + // payload is None, so that deserializing to Vec results in + // an empty vector. + Ok(vec![0u8; SIZE_OF_DATA_SHRED_PAYLOAD]) + } else { + Ok(data) + } } fn verify_consistent_shred_payload_sizes( @@ -1136,8 +1058,12 @@ pub mod tests { use super::*; use bincode::serialized_size; use matches::assert_matches; - use solana_sdk::{hash::hash, shred_version, system_transaction}; - use std::{collections::HashSet, convert::TryInto}; + use rand::{seq::SliceRandom, Rng}; + use solana_sdk::{ + hash::{self, hash}, + shred_version, system_transaction, + }; + use std::{collections::HashSet, convert::TryInto, iter::repeat_with}; #[test] fn test_shred_constants() { @@ -1192,7 +1118,7 @@ pub mod tests { // Test that parent cannot be > current slot assert_matches!( - Shredder::new(slot, slot + 1, 1.00, keypair.clone(), 0, 0), + Shredder::new(slot, slot + 1, keypair.clone(), 0, 0), Err(ShredError::SlotTooLow { slot: _, parent_slot: _, @@ -1200,18 +1126,14 @@ pub mod tests { ); // Test that slot - parent cannot be > u16 MAX assert_matches!( - Shredder::new(slot, slot - 1 - 0xffff, 1.00, keypair.clone(), 0, 0), + Shredder::new(slot, slot - 1 - 0xffff, keypair.clone(), 0, 0), Err(ShredError::SlotTooLow { slot: _, parent_slot: _, }) ); - - let fec_rate = 0.25; let parent_slot = slot - 5; - let shredder = Shredder::new(slot, parent_slot, fec_rate, keypair.clone(), 0, 0) - .expect("Failed in creating shredder"); - + let shredder = Shredder::new(slot, parent_slot, keypair.clone(), 0, 0).unwrap(); let entries: Vec<_> = (0..5) .map(|_| { let keypair0 = Keypair::new(); @@ -1223,14 +1145,12 @@ pub mod tests { .collect(); let size = serialized_size(&entries).unwrap(); - let no_header_size = SIZE_OF_DATA_SHRED_PAYLOAD as u64; - let num_expected_data_shreds = (size + no_header_size - 1) / no_header_size; - let num_expected_coding_shreds = Shredder::calculate_num_coding_shreds( - num_expected_data_shreds as usize, - fec_rate, - num_expected_data_shreds as usize, - ); - + // Integer division to ensure we have enough shreds to fit all the data + let payload_capacity = SIZE_OF_DATA_SHRED_PAYLOAD as u64; + let num_expected_data_shreds = (size + payload_capacity - 1) / payload_capacity; + let num_expected_coding_shreds = (2 * MAX_DATA_SHREDS_PER_FEC_BLOCK as usize) + .saturating_sub(num_expected_data_shreds as usize) + .max(num_expected_data_shreds as usize); let start_index = 0; let (data_shreds, coding_shreds, next_index) = shredder.entries_to_shreds(&entries, true, start_index); @@ -1290,11 +1210,8 @@ pub mod tests { fn test_deserialize_shred_payload() { let keypair = Arc::new(Keypair::new()); let slot = 1; - let parent_slot = 0; - let shredder = Shredder::new(slot, parent_slot, 0.0, keypair, 0, 0) - .expect("Failed in creating shredder"); - + let shredder = Shredder::new(slot, parent_slot, keypair, 0, 0).unwrap(); let entries: Vec<_> = (0..5) .map(|_| { let keypair0 = Keypair::new(); @@ -1316,11 +1233,8 @@ pub mod tests { fn test_shred_reference_tick() { let keypair = Arc::new(Keypair::new()); let slot = 1; - let parent_slot = 0; - let shredder = Shredder::new(slot, parent_slot, 0.0, keypair, 5, 0) - .expect("Failed in creating shredder"); - + let shredder = Shredder::new(slot, parent_slot, keypair, 5, 0).unwrap(); let entries: Vec<_> = (0..5) .map(|_| { let keypair0 = Keypair::new(); @@ -1346,11 +1260,8 @@ pub mod tests { fn test_shred_reference_tick_overflow() { let keypair = Arc::new(Keypair::new()); let slot = 1; - let parent_slot = 0; - let shredder = Shredder::new(slot, parent_slot, 0.0, keypair, u8::max_value(), 0) - .expect("Failed in creating shredder"); - + let shredder = Shredder::new(slot, parent_slot, keypair, u8::max_value(), 0).unwrap(); let entries: Vec<_> = (0..5) .map(|_| { let keypair0 = Keypair::new(); @@ -1380,16 +1291,7 @@ pub mod tests { fn run_test_data_and_code_shredder(slot: Slot) { let keypair = Arc::new(Keypair::new()); - - // Test that FEC rate cannot be > 1.0 - assert_matches!( - Shredder::new(slot, slot - 5, 1.001, keypair.clone(), 0, 0), - Err(ShredError::InvalidFecRate(_)) - ); - - let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0) - .expect("Failed in creating shredder"); - + let shredder = Shredder::new(slot, slot - 5, keypair.clone(), 0, 0).unwrap(); // Create enough entries to make > 1 shred let no_header_size = SIZE_OF_DATA_SHRED_PAYLOAD; let num_entries = max_ticks_per_n_shreds(1, Some(no_header_size)) + 1; @@ -1428,11 +1330,9 @@ pub mod tests { run_test_data_and_code_shredder(0x1234_5678_9abc_def0); } - fn run_test_recovery_and_reassembly(slot: Slot) { + fn run_test_recovery_and_reassembly(slot: Slot, is_last_in_slot: bool) { let keypair = Arc::new(Keypair::new()); - let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0) - .expect("Failed in creating shredder"); - + let shredder = Shredder::new(slot, slot - 5, keypair.clone(), 0, 0).unwrap(); let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); @@ -1453,11 +1353,24 @@ pub mod tests { .collect(); let serialized_entries = bincode::serialize(&entries).unwrap(); - let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0); + let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds( + &entries, + is_last_in_slot, + 0, // next_shred_index + ); let num_coding_shreds = coding_shreds.len(); // We should have 10 shreds now, an equal number of coding shreds assert_eq!(data_shreds.len(), num_data_shreds); + if is_last_in_slot { + assert_eq!( + num_coding_shreds, + 2 * MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - num_data_shreds + ); + } else { + // and an equal number of coding shreds + assert_eq!(num_data_shreds, num_coding_shreds); + } let all_shreds = data_shreds .iter() @@ -1472,7 +1385,6 @@ pub mod tests { num_data_shreds, num_coding_shreds, 0, - 0, slot ), Err(reed_solomon_erasure::Error::TooFewShardsPresent) @@ -1484,7 +1396,6 @@ pub mod tests { num_data_shreds, num_coding_shreds, 0, - 0, slot, ) .unwrap(); @@ -1502,7 +1413,6 @@ pub mod tests { num_data_shreds, num_coding_shreds, 0, - 0, slot, ) .unwrap(); @@ -1550,7 +1460,6 @@ pub mod tests { num_data_shreds, num_coding_shreds, 0, - 0, slot, ) .unwrap(); @@ -1558,6 +1467,7 @@ pub mod tests { assert_eq!(recovered_data.len(), 3); // Data shreds 0, 2, 4 were missing for (i, recovered_shred) in recovered_data.into_iter().enumerate() { let index = i * 2; + let is_last_data = recovered_shred.index() as usize == num_data_shreds - 1; verify_test_data_shred( &recovered_shred, index.try_into().unwrap(), @@ -1565,8 +1475,8 @@ pub mod tests { slot - 5, &keypair.pubkey(), true, - recovered_shred.index() as usize == num_data_shreds - 1, - recovered_shred.index() as usize == num_data_shreds - 1, + is_last_data && is_last_in_slot, + is_last_data, ); shred_info.insert(i * 2, recovered_shred); @@ -1622,7 +1532,6 @@ pub mod tests { num_data_shreds, num_coding_shreds, 25, - 25, slot, ) .unwrap(); @@ -1654,7 +1563,6 @@ pub mod tests { num_data_shreds, num_coding_shreds, 25, - 25, slot + 1, ) .unwrap(); @@ -1667,7 +1575,6 @@ pub mod tests { num_data_shreds, num_coding_shreds, 15, - 15, slot, ), Err(reed_solomon_erasure::Error::TooFewShardsPresent) @@ -1675,14 +1582,88 @@ pub mod tests { // Test8: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds assert_matches!( - Shredder::try_recovery(shred_info, num_data_shreds, num_coding_shreds, 35, 35, slot,), + Shredder::try_recovery(shred_info, num_data_shreds, num_coding_shreds, 35, slot), Err(reed_solomon_erasure::Error::TooFewShardsPresent) ); } #[test] fn test_recovery_and_reassembly() { - run_test_recovery_and_reassembly(0x1234_5678_9abc_def0); + run_test_recovery_and_reassembly(0x1234_5678_9abc_def0, false); + run_test_recovery_and_reassembly(0x1234_5678_9abc_def0, true); + } + + fn run_recovery_with_expanded_coding_shreds(num_tx: usize, is_last_in_slot: bool) { + let mut rng = rand::thread_rng(); + let txs = repeat_with(|| { + system_transaction::transfer( + &Keypair::new(), // from + &Pubkey::new_unique(), // to + rng.gen(), // lamports + hash::new_rand(&mut rng), // recent block hash + ) + }) + .take(num_tx) + .collect(); + let entry = Entry::new( + &hash::new_rand(&mut rng), // prev hash + rng.gen_range(1, 64), // num hashes + txs, + ); + let keypair = Arc::new(Keypair::new()); + let slot = 71489660; + let shredder = Shredder::new( + slot, + slot - rng.gen_range(1, 27), // parent slot + keypair, + 0, // reference tick + rng.gen(), // version + ) + .unwrap(); + let next_shred_index = rng.gen_range(1, 1024); + let (data_shreds, coding_shreds, _) = + shredder.entries_to_shreds(&[entry], is_last_in_slot, next_shred_index); + let num_data_shreds = data_shreds.len(); + let num_coding_shreds = coding_shreds.len(); + let mut shreds = coding_shreds; + shreds.extend(data_shreds.iter().cloned()); + shreds.shuffle(&mut rng); + shreds.truncate(num_data_shreds); + shreds.sort_by_key(|shred| { + if shred.is_data() { + shred.index() + } else { + shred.index() + num_data_shreds as u32 + } + }); + let exclude: HashSet<_> = shreds + .iter() + .filter(|shred| shred.is_data()) + .map(|shred| shred.index()) + .collect(); + let recovered_shreds = Shredder::try_recovery( + shreds, + num_data_shreds, + num_coding_shreds, + next_shred_index as usize, // first index + slot, + ) + .unwrap(); + assert_eq!( + recovered_shreds, + data_shreds + .into_iter() + .filter(|shred| !exclude.contains(&shred.index())) + .collect::>() + ); + } + + #[test] + fn test_recovery_with_expanded_coding_shreds() { + for num_tx in 0..100 { + run_recovery_with_expanded_coding_shreds(num_tx, false); + run_recovery_with_expanded_coding_shreds(num_tx, true); + } } #[test] @@ -1691,9 +1672,7 @@ pub mod tests { let hash = hash(Hash::default().as_ref()); let version = shred_version::version_from_hash(&hash); assert_ne!(version, 0); - let shredder = - Shredder::new(0, 0, 1.0, keypair, 0, version).expect("Failed in creating shredder"); - + let shredder = Shredder::new(0, 0, keypair, 0, version).unwrap(); let entries: Vec<_> = (0..5) .map(|_| { let keypair0 = Keypair::new(); @@ -1741,9 +1720,7 @@ pub mod tests { let hash = hash(Hash::default().as_ref()); let version = shred_version::version_from_hash(&hash); assert_ne!(version, 0); - let shredder = - Shredder::new(0, 0, 0.5, keypair, 0, version).expect("Failed in creating shredder"); - + let shredder = Shredder::new(0, 0, keypair, 0, version).unwrap(); let entries: Vec<_> = (0..500) .map(|_| { let keypair0 = Keypair::new(); @@ -1765,10 +1742,10 @@ pub mod tests { }); coding_shreds.iter().enumerate().for_each(|(i, s)| { - // There will be half the number of coding shreds, as FEC rate is 0.5 - // So multiply i with 2 - let expected_fec_set_index = - start_index + ((i * 2 / max_per_block) * max_per_block) as u32; + let mut expected_fec_set_index = start_index + (i - i % max_per_block) as u32; + while expected_fec_set_index as usize > data_shreds.len() { + expected_fec_set_index -= max_per_block as u32; + } assert_eq!(s.common_header.fec_set_index, expected_fec_set_index); }); } @@ -1779,9 +1756,7 @@ pub mod tests { let hash = hash(Hash::default().as_ref()); let version = shred_version::version_from_hash(&hash); assert_ne!(version, 0); - let shredder = - Shredder::new(0, 0, 1.0, keypair, 0, version).expect("Failed in creating shredder"); - + let shredder = Shredder::new(0, 0, keypair, 0, version).unwrap(); let entries: Vec<_> = (0..500) .map(|_| { let keypair0 = Keypair::new(); @@ -1808,17 +1783,28 @@ pub mod tests { let coding_shreds = Shredder::data_shreds_to_coding_shreds( shredder.keypair.deref(), &data_shreds[..count], - shredder.fec_rate, + false, // is_last_in_slot &mut stats, ) .unwrap(); assert_eq!(coding_shreds.len(), count); + let coding_shreds = Shredder::data_shreds_to_coding_shreds( + shredder.keypair.deref(), + &data_shreds[..count], + true, // is_last_in_slot + &mut stats, + ) + .unwrap(); + assert_eq!( + coding_shreds.len(), + 2 * MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - count + ); }); let coding_shreds = Shredder::data_shreds_to_coding_shreds( shredder.keypair.deref(), &data_shreds[..MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 1], - shredder.fec_rate, + false, // is_last_in_slot &mut stats, ) .unwrap(); @@ -1826,6 +1812,17 @@ pub mod tests { coding_shreds.len(), MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 1 ); + let coding_shreds = Shredder::data_shreds_to_coding_shreds( + shredder.keypair.deref(), + &data_shreds[..MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 1], + true, // is_last_in_slot + &mut stats, + ) + .unwrap(); + assert_eq!( + coding_shreds.len(), + 3 * MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - 1 + ); } #[test] diff --git a/ledger/src/staking_utils.rs b/ledger/src/staking_utils.rs index 29a9cfe23c..7b7c24bb96 100644 --- a/ledger/src/staking_utils.rs +++ b/ledger/src/staking_utils.rs @@ -68,7 +68,7 @@ pub(crate) mod tests { use rand::Rng; use solana_runtime::vote_account::{ArcVoteAccount, VoteAccounts}; use solana_sdk::{ - account::{from_account, Account}, + account::{from_account, AccountSharedData}, clock::Clock, instruction::Instruction, pubkey::Pubkey, @@ -212,7 +212,8 @@ pub(crate) mod tests { let mut result: Vec<_> = epoch_stakes_and_lockouts(&bank, next_leader_schedule_epoch); result.sort(); let stake_history = - from_account::(&bank.get_account(&stake_history::id()).unwrap()).unwrap(); + from_account::(&bank.get_account(&stake_history::id()).unwrap()) + .unwrap(); let mut expected = vec![ ( leader_stake.stake(bank.epoch(), Some(&stake_history), true), @@ -309,7 +310,7 @@ pub(crate) mod tests { )); let mut rng = rand::thread_rng(); let vote_accounts = stakes.into_iter().map(|(stake, vote_state)| { - let account = Account::new_data( + let account = AccountSharedData::new_data( rng.gen(), // lamports &VoteStateVersions::new_current(vote_state), &Pubkey::new_unique(), // owner diff --git a/ledger/tests/shred.rs b/ledger/tests/shred.rs index 80a720da12..8dc241de38 100644 --- a/ledger/tests/shred.rs +++ b/ledger/tests/shred.rs @@ -22,9 +22,7 @@ type IndexShredsMap = BTreeMap>; fn test_multi_fec_block_coding() { let keypair = Arc::new(Keypair::new()); let slot = 0x1234_5678_9abc_def0; - let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0) - .expect("Failed in creating shredder"); - + let shredder = Shredder::new(slot, slot - 5, keypair.clone(), 0, 0).unwrap(); let num_fec_sets = 100; let num_data_shreds = (MAX_DATA_SHREDS_PER_FEC_BLOCK * num_fec_sets) as usize; let keypair0 = Keypair::new(); @@ -78,7 +76,6 @@ fn test_multi_fec_block_coding() { MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, shred_start_index, - shred_start_index, slot, ) .unwrap(); @@ -123,6 +120,7 @@ fn test_multi_fec_block_different_size_coding() { for (fec_data_shreds, fec_coding_shreds) in fec_data.values().zip(fec_coding.values()) { let first_data_index = fec_data_shreds.first().unwrap().index() as usize; let first_code_index = fec_coding_shreds.first().unwrap().index() as usize; + assert_eq!(first_data_index, first_code_index); let num_data = fec_data_shreds.len(); let num_coding = fec_coding_shreds.len(); let all_shreds: Vec = fec_data_shreds @@ -131,17 +129,9 @@ fn test_multi_fec_block_different_size_coding() { .chain(fec_coding_shreds.iter().step_by(2)) .cloned() .collect(); - - let recovered_data = Shredder::try_recovery( - all_shreds, - num_data, - num_coding, - first_data_index, - first_code_index, - slot, - ) - .unwrap(); - + let recovered_data = + Shredder::try_recovery(all_shreds, num_data, num_coding, first_data_index, slot) + .unwrap(); // Necessary in order to ensure the last shred in the slot // is part of the recovered set, and that the below `index` // calcuation in the loop is correct @@ -200,8 +190,7 @@ fn setup_different_sized_fec_blocks( parent_slot: Slot, keypair: Arc, ) -> (IndexShredsMap, IndexShredsMap, usize) { - let shredder = - Shredder::new(slot, parent_slot, 1.0, keypair, 0, 0).expect("Failed in creating shredder"); + let shredder = Shredder::new(slot, parent_slot, keypair, 0, 0).unwrap(); let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index 7e85b3dbf1..47e27edb96 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-local-cluster" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -17,26 +17,25 @@ fs_extra = "1.2.0" log = "0.4.11" rand = "0.7.0" rayon = "1.5.0" -solana-config-program = { path = "../programs/config", version = "=1.5.19" } -solana-core = { path = "../core", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-download-utils = { path = "../download-utils", version = "=1.5.19" } -solana-faucet = { path = "../faucet", version = "=1.5.19" } -solana-exchange-program = { path = "../programs/exchange", version = "=1.5.19" } -solana-ledger = { path = "../ledger", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } -solana-vest-program = { path = "../programs/vest", version = "=1.5.19" } -solana-vote-program = { path = "../programs/vote", version = "=1.5.19" } +solana-config-program = { path = "../programs/config", version = "=1.6.14" } +solana-core = { path = "../core", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-download-utils = { path = "../download-utils", version = "=1.6.14" } +solana-faucet = { path = "../faucet", version = "=1.6.14" } +solana-exchange-program = { path = "../programs/exchange", version = "=1.6.14" } +solana-ledger = { path = "../ledger", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } +solana-vest-program = { path = "../programs/vest", version = "=1.6.14" } +solana-vote-program = { path = "../programs/vote", version = "=1.6.14" } tempfile = "3.1.0" -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.5.19" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.6.14" } [dev-dependencies] assert_matches = "1.3.0" serial_test = "0.4.0" -serial_test_derive = "0.4.0" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index ef0f96ac92..29ea476033 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -6,6 +6,7 @@ use log::*; use rand::{thread_rng, Rng}; use rayon::prelude::*; use solana_client::thin_client::create_client; +use solana_core::validator::ValidatorExit; use solana_core::{ cluster_info::VALIDATOR_PORT_RANGE, consensus::VOTE_THRESHOLD_DEPTH, contact_info::ContactInfo, gossip_service::discover_cluster, @@ -16,9 +17,7 @@ use solana_ledger::{ }; use solana_sdk::{ client::SyncClient, - clock::{ - self, Slot, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, NUM_CONSECUTIVE_LEADER_SLOTS, - }, + clock::{self, Slot, NUM_CONSECUTIVE_LEADER_SLOTS}, commitment_config::CommitmentConfig, epoch_schedule::MINIMUM_SLOTS_PER_EPOCH, hash::Hash, @@ -32,13 +31,11 @@ use solana_sdk::{ use std::{ collections::{HashMap, HashSet}, path::Path, - sync::Arc, + sync::{Arc, RwLock}, thread::sleep, time::{Duration, Instant}, }; -const DEFAULT_SLOT_MILLIS: u64 = (DEFAULT_TICKS_PER_SLOT * 1000) / DEFAULT_TICKS_PER_SECOND; - /// Spend and verify from every node in the network pub fn spend_and_verify_all_nodes( entry_point_info: &ContactInfo, @@ -133,20 +130,6 @@ pub fn send_many_transactions( expected_balances } -pub fn validator_exit(entry_point_info: &ContactInfo, nodes: usize) { - let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); - assert!(cluster_nodes.len() >= nodes); - for node in &cluster_nodes { - let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE); - assert!(client.validator_exit().unwrap()); - } - sleep(Duration::from_millis(DEFAULT_SLOT_MILLIS)); - for node in &cluster_nodes { - let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE); - assert!(client.validator_exit().is_err()); - } -} - pub fn verify_ledger_ticks(ledger_path: &Path, ticks_per_slot: usize) { let ledger = Blockstore::open(ledger_path).unwrap(); let zeroth_slot = ledger.get_slot_entries(0, 0).unwrap(); @@ -195,11 +178,12 @@ pub fn sleep_n_epochs( pub fn kill_entry_and_spend_and_verify_rest( entry_point_info: &ContactInfo, + entry_point_validator_exit: &Arc>, funding_keypair: &Keypair, nodes: usize, slot_millis: u64, ) { - solana_logger::setup(); + info!("kill_entry_and_spend_and_verify_rest..."); let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap(); assert!(cluster_nodes.len() >= nodes); let client = create_client(entry_point_info.client_facing_addr(), VALIDATOR_PORT_RANGE); @@ -218,7 +202,7 @@ pub fn kill_entry_and_spend_and_verify_rest( )); info!("done sleeping for first 2 warmup epochs"); info!("killing entry point: {}", entry_point_info.id); - assert!(client.validator_exit().unwrap()); + entry_point_validator_exit.write().unwrap().exit(); info!("sleeping for some time"); sleep(Duration::from_millis( slot_millis * NUM_CONSECUTIVE_LEADER_SLOTS, diff --git a/local-cluster/src/lib.rs b/local-cluster/src/lib.rs index 133b4fc168..92cf3f72d6 100644 --- a/local-cluster/src/lib.rs +++ b/local-cluster/src/lib.rs @@ -2,3 +2,4 @@ pub mod cluster; pub mod cluster_tests; pub mod local_cluster; +pub mod validator_configs; diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 4716116ca9..fa4c18b12f 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -1,6 +1,7 @@ use crate::{ cluster::{Cluster, ClusterValidatorInfo, ValidatorInfo}, cluster_tests, + validator_configs::*, }; use itertools::izip; use log::*; @@ -9,7 +10,7 @@ use solana_core::{ cluster_info::{Node, VALIDATOR_PORT_RANGE}, contact_info::ContactInfo, gossip_service::discover_cluster, - validator::{Validator, ValidatorConfig}, + validator::{Validator, ValidatorConfig, ValidatorStartProgress}, }; use solana_ledger::create_new_tmp_ledger; use solana_runtime::genesis_utils::{ @@ -18,6 +19,7 @@ use solana_runtime::genesis_utils::{ }; use solana_sdk::{ account::Account, + account::AccountSharedData, client::SyncClient, clock::{DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT}, commitment_config::CommitmentConfig, @@ -42,10 +44,10 @@ use std::{ collections::HashMap, io::{Error, ErrorKind, Result}, iter, - sync::Arc, + sync::{Arc, RwLock}, }; -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct ClusterConfig { /// The validator config that should be applied to every node in the cluster pub validator_configs: Vec, @@ -68,7 +70,7 @@ pub struct ClusterConfig { pub native_instruction_processors: Vec<(String, Pubkey)>, pub cluster_type: ClusterType, pub poh_config: PohConfig, - pub additional_accounts: Vec<(Pubkey, Account)>, + pub additional_accounts: Vec<(Pubkey, AccountSharedData)>, } impl Default for ClusterConfig { @@ -110,7 +112,10 @@ impl LocalCluster { let mut config = ClusterConfig { node_stakes: stakes, cluster_lamports, - validator_configs: vec![ValidatorConfig::default(); num_nodes], + validator_configs: make_identical_validator_configs( + &ValidatorConfig::default(), + num_nodes, + ), ..ClusterConfig::default() }; Self::new(&mut config) @@ -165,9 +170,12 @@ impl LocalCluster { stakes_in_genesis, config.cluster_type, ); - genesis_config - .accounts - .extend(config.additional_accounts.drain(..)); + genesis_config.accounts.extend( + config + .additional_accounts + .drain(..) + .map(|(key, account)| (key, Account::from(account))), + ); genesis_config.ticks_per_slot = config.ticks_per_slot; genesis_config.epoch_schedule = EpochSchedule::custom( config.slots_per_epoch, @@ -193,21 +201,23 @@ impl LocalCluster { let (leader_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config); let leader_contact_info = leader_node.info.clone(); - let mut leader_config = config.validator_configs[0].clone(); + let mut leader_config = safe_clone_config(&config.validator_configs[0]); leader_config.rpc_addrs = Some((leader_node.info.rpc, leader_node.info.rpc_pubsub)); leader_config.account_paths = vec![leader_ledger_path.join("accounts")]; let leader_keypair = Arc::new(Keypair::from_bytes(&leader_keypair.to_bytes()).unwrap()); let leader_vote_keypair = Arc::new(Keypair::from_bytes(&leader_vote_keypair.to_bytes()).unwrap()); + let leader_server = Validator::new( leader_node, &leader_keypair, &leader_ledger_path, &leader_vote_keypair.pubkey(), - vec![leader_vote_keypair.clone()], + Arc::new(RwLock::new(vec![leader_vote_keypair.clone()])), vec![], &leader_config, true, // should_check_duplicate_instance + Arc::new(RwLock::new(ValidatorStartProgress::default())), ); let mut validators = HashMap::new(); @@ -217,10 +227,9 @@ impl LocalCluster { ledger_path: leader_ledger_path, contact_info: leader_contact_info.clone(), }; - let cluster_leader = ClusterValidatorInfo::new( leader_info, - config.validator_configs[0].clone(), + safe_clone_config(&config.validator_configs[0]), leader_server, ); @@ -255,10 +264,8 @@ impl LocalCluster { ); } - let listener_config = ValidatorConfig { - voting_disabled: true, - ..config.validator_configs[0].clone() - }; + let mut listener_config = safe_clone_config(&config.validator_configs[0]); + listener_config.voting_disabled = true; (0..config.num_listeners).for_each(|_| { cluster.add_validator(&listener_config, 0, Arc::new(Keypair::new()), None); }); @@ -339,7 +346,7 @@ impl LocalCluster { } } - let mut config = validator_config.clone(); + let mut config = safe_clone_config(validator_config); config.rpc_addrs = Some((validator_node.info.rpc, validator_node.info.rpc_pubsub)); config.account_paths = vec![ledger_path.join("accounts")]; let voting_keypair = voting_keypair.unwrap(); @@ -348,10 +355,11 @@ impl LocalCluster { &validator_keypair, &ledger_path, &voting_keypair.pubkey(), - vec![voting_keypair.clone()], + Arc::new(RwLock::new(vec![voting_keypair.clone()])), vec![self.entry_point_info.clone()], &config, true, // should_check_duplicate_instance + Arc::new(RwLock::new(ValidatorStartProgress::default())), ); let validator_pubkey = validator_keypair.pubkey(); @@ -362,7 +370,7 @@ impl LocalCluster { ledger_path, contact_info, }, - validator_config.clone(), + safe_clone_config(validator_config), validator_server, ); @@ -662,12 +670,13 @@ impl Cluster for LocalCluster { &validator_info.keypair, &validator_info.ledger_path, &validator_info.voting_keypair.pubkey(), - vec![validator_info.voting_keypair.clone()], + Arc::new(RwLock::new(vec![validator_info.voting_keypair.clone()])), entry_point_info .map(|entry_point_info| vec![entry_point_info]) .unwrap_or_default(), - &cluster_validator_info.config, + &safe_clone_config(&cluster_validator_info.config), true, // should_check_duplicate_instance + Arc::new(RwLock::new(ValidatorStartProgress::default())), ); cluster_validator_info.validator = Some(restarted_node); cluster_validator_info @@ -706,11 +715,12 @@ mod test { #[test] fn test_local_cluster_start_and_exit_with_config() { solana_logger::setup(); - let mut validator_config = ValidatorConfig::default(); - validator_config.rpc_config.enable_validator_exit = true; const NUM_NODES: usize = 1; let mut config = ClusterConfig { - validator_configs: vec![ValidatorConfig::default(); NUM_NODES], + validator_configs: make_identical_validator_configs( + &ValidatorConfig::default(), + NUM_NODES, + ), node_stakes: vec![3; NUM_NODES], cluster_lamports: 100, ticks_per_slot: 8, diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 61bb05c991..782e4d923f 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -37,6 +37,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { poh_verify: config.poh_verify, cuda: config.cuda, require_tower: config.require_tower, + tower_path: config.tower_path.clone(), debug_keys: config.debug_keys.clone(), contact_debug_interval: config.contact_debug_interval, contact_save_interval: config.contact_save_interval, @@ -53,6 +54,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { tpu_coalesce_ms: config.tpu_coalesce_ms, validator_exit: Arc::new(RwLock::new(ValidatorExit::default())), poh_hashes_per_batch: config.poh_hashes_per_batch, + no_wait_for_vote_to_start_leader: config.no_wait_for_vote_to_start_leader, } } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 7691166460..94f00b1a8d 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -3,7 +3,7 @@ use assert_matches::assert_matches; use crossbeam_channel::{unbounded, Receiver}; use gag::BufferRedirect; use log::*; -use serial_test_derive::serial; +use serial_test::serial; use solana_client::{ pubsub_client::PubsubClient, rpc_client::RpcClient, @@ -28,18 +28,19 @@ use solana_ledger::{ leader_schedule::LeaderSchedule, }; use solana_local_cluster::{ - cluster::Cluster, + cluster::{Cluster, ClusterValidatorInfo}, cluster_tests, local_cluster::{ClusterConfig, LocalCluster}, + validator_configs::*, }; use solana_runtime::{ bank_forks::{ArchiveFormat, SnapshotConfig}, snapshot_utils, }; use solana_sdk::{ - account::Account, + account::AccountSharedData, client::{AsyncClient, SyncClient}, - clock::{self, Slot}, + clock::{self, Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES}, commitment_config::CommitmentConfig, epoch_schedule::MINIMUM_SLOTS_PER_EPOCH, genesis_config::ClusterType, @@ -49,10 +50,9 @@ use solana_sdk::{ signature::{Keypair, Signer}, system_program, system_transaction, }; -use solana_stake_program::stake_state::MIN_DELEGATE_STAKE_AMOUNT; use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY; use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeSet, HashMap, HashSet}, fs, io::Read, iter, @@ -69,7 +69,6 @@ const RUST_LOG_FILTER: &str = #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_ledger_cleanup_service() { solana_logger::setup_with_default(RUST_LOG_FILTER); error!("test_ledger_cleanup_service"); @@ -82,7 +81,7 @@ fn test_ledger_cleanup_service() { cluster_lamports: 10_000, poh_config: PohConfig::new_sleep(Duration::from_millis(50)), node_stakes: vec![100; num_nodes], - validator_configs: vec![validator_config; num_nodes], + validator_configs: make_identical_validator_configs(&validator_config, num_nodes), ..ClusterConfig::default() }; let mut cluster = LocalCluster::new(&mut config); @@ -126,7 +125,6 @@ fn test_spend_and_verify_all_nodes_1() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_spend_and_verify_all_nodes_2() { solana_logger::setup_with_default(RUST_LOG_FILTER); error!("test_spend_and_verify_all_nodes_2"); @@ -142,7 +140,6 @@ fn test_spend_and_verify_all_nodes_2() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_spend_and_verify_all_nodes_3() { solana_logger::setup_with_default(RUST_LOG_FILTER); error!("test_spend_and_verify_all_nodes_3"); @@ -158,7 +155,6 @@ fn test_spend_and_verify_all_nodes_3() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_local_cluster_signature_subscribe() { solana_logger::setup_with_default(RUST_LOG_FILTER); let num_nodes = 2; @@ -248,57 +244,30 @@ fn test_spend_and_verify_all_nodes_env_num_nodes() { ); } -#[allow(unused_attributes)] -#[test] -#[should_panic] -fn test_validator_exit_default_config_should_panic() { - solana_logger::setup(); - error!("test_validator_exit_default_config_should_panic"); - let num_nodes = 2; - let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100); - cluster_tests::validator_exit(&local.entry_point_info, num_nodes); -} - -#[test] -#[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] -fn test_validator_exit_2() { - solana_logger::setup(); - error!("test_validator_exit_2"); - let num_nodes = 2; - let mut validator_config = ValidatorConfig::default(); - validator_config.rpc_config.enable_validator_exit = true; - validator_config.wait_for_supermajority = Some(0); - - let mut config = ClusterConfig { - cluster_lamports: 10_000, - node_stakes: vec![100; num_nodes], - validator_configs: vec![validator_config; num_nodes], - ..ClusterConfig::default() - }; - let local = LocalCluster::new(&mut config); - cluster_tests::validator_exit(&local.entry_point_info, num_nodes); -} - // Cluster needs a supermajority to remain, so the minimum size for this test is 4 #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_leader_failure_4() { solana_logger::setup_with_default(RUST_LOG_FILTER); error!("test_leader_failure_4"); let num_nodes = 4; - let mut validator_config = ValidatorConfig::default(); - validator_config.rpc_config.enable_validator_exit = true; + let validator_config = ValidatorConfig::default(); let mut config = ClusterConfig { - cluster_lamports: 1000 * MIN_DELEGATE_STAKE_AMOUNT, - node_stakes: vec![MIN_DELEGATE_STAKE_AMOUNT + 400; 4], - validator_configs: vec![validator_config; num_nodes], + cluster_lamports: 10_000, + node_stakes: vec![100; 4], + validator_configs: make_identical_validator_configs(&validator_config, num_nodes), ..ClusterConfig::default() }; let local = LocalCluster::new(&mut config); + cluster_tests::kill_entry_and_spend_and_verify_rest( &local.entry_point_info, + &local + .validators + .get(&local.entry_point_info.id) + .unwrap() + .config + .validator_exit, &local.funding_keypair, num_nodes, config.ticks_per_slot * config.poh_config.target_tick_duration.as_millis() as u64, @@ -315,16 +284,17 @@ fn test_leader_failure_4() { /// * `leader_schedule` - An option that specifies whether the cluster should /// run with a fixed, predetermined leader schedule #[allow(clippy::cognitive_complexity)] -fn run_cluster_partition( - partitions: &[&[usize]], +fn run_cluster_partition( + partitions: &[Vec], leader_schedule: Option<(LeaderSchedule, Vec>)>, - on_partition_start: E, - on_partition_resolved: F, - additional_accounts: Vec<(Pubkey, Account)>, -) where - E: FnOnce(&mut LocalCluster), - F: FnOnce(&mut LocalCluster), -{ + mut context: C, + on_partition_start: impl FnOnce(&mut LocalCluster, &mut C), + on_before_partition_resolved: impl FnOnce(&mut LocalCluster, &mut C), + on_partition_resolved: impl FnOnce(&mut LocalCluster, &mut C), + partition_duration: Option, + ticks_per_slot: Option, + additional_accounts: Vec<(Pubkey, AccountSharedData)>, +) { solana_logger::setup_with_default(RUST_LOG_FILTER); info!("PARTITION_TEST!"); let num_nodes = partitions.len(); @@ -371,7 +341,7 @@ fn run_cluster_partition( let mut config = ClusterConfig { cluster_lamports, node_stakes, - validator_configs: vec![validator_config; num_nodes], + validator_configs: make_identical_validator_configs(&validator_config, num_nodes), validator_keys: Some( validator_keys .into_iter() @@ -382,6 +352,7 @@ fn run_cluster_partition( stakers_slot_offset: slots_per_epoch, skip_warmup_slots: true, additional_accounts, + ticks_per_slot: ticks_per_slot.unwrap_or(DEFAULT_TICKS_PER_SLOT), ..ClusterConfig::default() }; @@ -410,11 +381,14 @@ fn run_cluster_partition( } info!("PARTITION_TEST start partition"); + on_partition_start(&mut cluster, &mut context); enable_partition.store(false, Ordering::Relaxed); - on_partition_start(&mut cluster); - sleep(Duration::from_millis(leader_schedule_time)); + sleep(Duration::from_millis( + partition_duration.unwrap_or(leader_schedule_time), + )); + on_before_partition_resolved(&mut cluster, &mut context); info!("PARTITION_TEST remove partition"); enable_partition.store(true, Ordering::Relaxed); @@ -433,7 +407,7 @@ fn run_cluster_partition( ); sleep(Duration::from_millis(propagation_time)); info!("PARTITION_TEST resuming normal operation"); - on_partition_resolved(&mut cluster); + on_partition_resolved(&mut cluster, &mut context); } #[allow(unused_attributes)] @@ -441,58 +415,72 @@ fn run_cluster_partition( #[test] #[serial] fn test_cluster_partition_1_2() { - let empty = |_: &mut LocalCluster| {}; - let on_partition_resolved = |cluster: &mut LocalCluster| { + let empty = |_: &mut LocalCluster, _: &mut ()| {}; + let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { cluster.check_for_new_roots(16, &"PARTITION_TEST"); }; - run_cluster_partition(&[&[1], &[1, 1]], None, empty, on_partition_resolved, vec![]) + run_cluster_partition( + &[vec![1], vec![1, 1]], + None, + (), + empty, + empty, + on_partition_resolved, + None, + None, + vec![], + ) } #[test] #[serial] fn test_cluster_partition_1_1() { - let empty = |_: &mut LocalCluster| {}; - let on_partition_resolved = |cluster: &mut LocalCluster| { + let empty = |_: &mut LocalCluster, _: &mut ()| {}; + let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { cluster.check_for_new_roots(16, &"PARTITION_TEST"); }; - run_cluster_partition(&[&[1], &[1]], None, empty, on_partition_resolved, vec![]) + run_cluster_partition( + &[vec![1], vec![1]], + None, + (), + empty, + empty, + on_partition_resolved, + None, + None, + vec![], + ) } #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_cluster_partition_1_1_1() { - let empty = |_: &mut LocalCluster| {}; - let on_partition_resolved = |cluster: &mut LocalCluster| { + let empty = |_: &mut LocalCluster, _: &mut ()| {}; + let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { cluster.check_for_new_roots(16, &"PARTITION_TEST"); }; run_cluster_partition( - &[&[1], &[1], &[1]], + &[vec![1], vec![1], vec![1]], None, + (), + empty, empty, on_partition_resolved, + None, + None, vec![], ) } fn create_custom_leader_schedule( - num_validators: usize, - num_slots_per_validator: usize, + validator_num_slots: &[usize], ) -> (LeaderSchedule, Vec>) { let mut leader_schedule = vec![]; let validator_keys: Vec<_> = iter::repeat_with(|| Arc::new(Keypair::new())) - .take(num_validators) + .take(validator_num_slots.len()) .collect(); - for (i, k) in validator_keys.iter().enumerate() { - let num_slots = { - if i == 0 { - // Set up the leader to have 50% of the slots - num_slots_per_validator * (num_validators - 1) - } else { - num_slots_per_validator - } - }; - for _ in 0..num_slots { + for (k, num_slots) in validator_keys.iter().zip(validator_num_slots.iter()) { + for _ in 0..*num_slots { leader_schedule.push(k.pubkey()) } } @@ -506,7 +494,6 @@ fn create_custom_leader_schedule( #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_kill_heaviest_partition() { // This test: // 1) Spins up four partitions, the heaviest being the first with more stake @@ -517,13 +504,17 @@ fn test_kill_heaviest_partition() { // eventually choose the major partition // 4) Check for recovery let num_slots_per_validator = 8; - let partitions: [&[usize]; 4] = [&[11], &[10], &[10], &[10]]; - let (leader_schedule, validator_keys) = - create_custom_leader_schedule(partitions.len(), num_slots_per_validator); - - let empty = |_: &mut LocalCluster| {}; + let partitions: [Vec; 4] = [vec![11], vec![10], vec![10], vec![10]]; + let (leader_schedule, validator_keys) = create_custom_leader_schedule(&[ + num_slots_per_validator * (partitions.len() - 1), + num_slots_per_validator, + num_slots_per_validator, + num_slots_per_validator, + ]); + + let empty = |_: &mut LocalCluster, _: &mut ()| {}; let validator_to_kill = validator_keys[0].pubkey(); - let on_partition_resolved = |cluster: &mut LocalCluster| { + let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { info!("Killing validator with id: {}", validator_to_kill); cluster.exit_node(&validator_to_kill); cluster.check_for_new_roots(16, &"PARTITION_TEST"); @@ -531,59 +522,86 @@ fn test_kill_heaviest_partition() { run_cluster_partition( &partitions, Some((leader_schedule, validator_keys)), + (), + empty, empty, on_partition_resolved, + None, + None, vec![], ) } #[allow(clippy::assertions_on_constants)] -fn run_kill_partition_switch_threshold( - failures_stake: u64, - alive_stake_1: u64, - alive_stake_2: u64, - on_partition_resolved: F, -) where - F: Fn(&mut LocalCluster), -{ +fn run_kill_partition_switch_threshold( + stakes_to_kill: &[&[(usize, usize)]], + alive_stakes: &[&[(usize, usize)]], + partition_duration: Option, + ticks_per_slot: Option, + partition_context: C, + on_partition_start: impl Fn(&mut LocalCluster, &[Pubkey], &mut C), + on_before_partition_resolved: impl Fn(&mut LocalCluster, &mut C), + on_partition_resolved: impl Fn(&mut LocalCluster, &mut C), +) { // Needs to be at least 1/3 or there will be no overlap // with the confirmation supermajority 2/3 assert!(SWITCH_FORK_THRESHOLD >= 1f64 / 3f64); info!( - "stakes: {} {} {}", - failures_stake, alive_stake_1, alive_stake_2 + "stakes_to_kill: {:?}, alive_stakes: {:?}", + stakes_to_kill, alive_stakes ); // This test: // 1) Spins up three partitions // 2) Kills the first partition with the stake `failures_stake` // 5) runs `on_partition_resolved` - let num_slots_per_validator = 8; - let partitions: [&[usize]; 3] = [ - &[(failures_stake as usize)], - &[(alive_stake_1 as usize)], - &[(alive_stake_2 as usize)], - ]; - let (leader_schedule, validator_keys) = - create_custom_leader_schedule(partitions.len(), num_slots_per_validator); + let partitions: Vec<&[(usize, usize)]> = stakes_to_kill + .iter() + .cloned() + .chain(alive_stakes.iter().cloned()) + .collect(); - let validator_to_kill = validator_keys[0].pubkey(); - let on_partition_start = |cluster: &mut LocalCluster| { - info!("Killing validator with id: {}", validator_to_kill); - cluster.exit_node(&validator_to_kill); + let stake_partitions: Vec> = partitions + .iter() + .map(|stakes_and_slots| stakes_and_slots.iter().map(|(stake, _)| *stake).collect()) + .collect(); + let num_slots_per_validator: Vec = partitions + .iter() + .flat_map(|stakes_and_slots| stakes_and_slots.iter().map(|(_, num_slots)| *num_slots)) + .collect(); + + let (leader_schedule, validator_keys) = create_custom_leader_schedule(&num_slots_per_validator); + + info!( + "Validator ids: {:?}", + validator_keys + .iter() + .map(|k| k.pubkey()) + .collect::>() + ); + let validator_pubkeys: Vec = validator_keys.iter().map(|k| k.pubkey()).collect(); + let on_partition_start = |cluster: &mut LocalCluster, partition_context: &mut C| { + for validator_to_kill in &validator_pubkeys[0..stakes_to_kill.len()] { + info!("Killing validator with id: {}", validator_to_kill); + cluster.exit_node(&validator_to_kill); + } + on_partition_start(cluster, &validator_pubkeys, partition_context); }; run_cluster_partition( - &partitions, + &stake_partitions, Some((leader_schedule, validator_keys)), + partition_context, on_partition_start, + on_before_partition_resolved, on_partition_resolved, + partition_duration, + ticks_per_slot, vec![], ) } #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_kill_partition_switch_threshold_no_progress() { let max_switch_threshold_failure_pct = 1.0 - 2.0 * SWITCH_FORK_THRESHOLD; let total_stake = 10_000; @@ -597,22 +615,30 @@ fn test_kill_partition_switch_threshold_no_progress() { // Check that no new roots were set 400 slots after partition resolves (gives time // for lockouts built during partition to resolve and gives validators an opportunity // to try and switch forks) - let on_partition_resolved = |cluster: &mut LocalCluster| { + let on_partition_start = |_: &mut LocalCluster, _: &[Pubkey], _: &mut ()| {}; + let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {}; + let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { cluster.check_no_new_roots(400, &"PARTITION_TEST"); }; // This kills `max_failures_stake`, so no progress should be made run_kill_partition_switch_threshold( - failures_stake, - alive_stake_1, - alive_stake_2, + &[&[(failures_stake as usize, 16)]], + &[ + &[(alive_stake_1 as usize, 8)], + &[(alive_stake_2 as usize, 8)], + ], + None, + None, + (), + on_partition_start, + on_before_partition_resolved, on_partition_resolved, ); } #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_kill_partition_switch_threshold_progress() { let max_switch_threshold_failure_pct = 1.0 - 2.0 * SWITCH_FORK_THRESHOLD; let total_stake = 10_000; @@ -641,33 +667,269 @@ fn test_kill_partition_switch_threshold_progress() { && smaller as f64 / total_stake as f64 <= SWITCH_FORK_THRESHOLD ); - let on_partition_resolved = |cluster: &mut LocalCluster| { + let on_partition_start = |_: &mut LocalCluster, _: &[Pubkey], _: &mut ()| {}; + let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {}; + let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { cluster.check_for_new_roots(16, &"PARTITION_TEST"); }; run_kill_partition_switch_threshold( - failures_stake, - alive_stake_1, - alive_stake_2, + &[&[(failures_stake as usize, 16)]], + &[ + &[(alive_stake_1 as usize, 8)], + &[(alive_stake_2 as usize, 8)], + ], + None, + None, + (), + on_partition_start, + on_before_partition_resolved, + on_partition_resolved, + ); +} + +#[test] +#[serial] +// Steps in this test: +// We want to create a situation like: +/* + 1 (2%, killed and restarted) --- 200 (37%, lighter fork) + / + 0 + \-------- 4 (38%, heavier fork) +*/ +// where the 2% that voted on slot 1 don't see their votes land in a block +// and thus without integrating votes from gossip into fork choice, will +// deem slot 4 the heavier fork and try to switch to slot 4, which doesn't pass the +// switch threshold. This stalls the network. + +// We do this by: +// 1) Creating a partition so all three nodes don't see each other +// 2) Kill the validator with 2% +// 3) Wait for longer than blockhash expiration +// 4) Copy in the lighter fork's blocks up, *only* up to the first slot in the lighter fork +// (not all the blocks on the lighter fork!), call this slot `L` +// 5) Restart the validator with 2% so that he votes on `L`, but the vote doesn't land +// due to blockhash expiration +// 6) Resolve the partition so that the 2% repairs the other fork, and tries to switch, +// stalling the network. + +fn test_fork_choice_refresh_old_votes() { + solana_logger::setup_with_default(RUST_LOG_FILTER); + let max_switch_threshold_failure_pct = 1.0 - 2.0 * SWITCH_FORK_THRESHOLD; + let total_stake = 100; + let max_failures_stake = (max_switch_threshold_failure_pct * total_stake as f64) as u64; + + // 1% less than the failure stake, where the 2% is allocated to a validator that + // has no leader slots and thus won't be able to vote on its own fork. + let failures_stake = max_failures_stake; + let total_alive_stake = total_stake - failures_stake; + let alive_stake_1 = total_alive_stake / 2 - 1; + let alive_stake_2 = total_alive_stake - alive_stake_1 - 1; + + // Heavier fork still doesn't have enough stake to switch. Both branches need + // the vote to land from the validator with `alive_stake_3` to allow the other + // fork to switch. + let alive_stake_3 = 2; + assert!(alive_stake_1 < alive_stake_2); + assert!(alive_stake_1 + alive_stake_3 > alive_stake_2); + + let partitions: &[&[(usize, usize)]] = &[ + &[(alive_stake_1 as usize, 8)], + &[(alive_stake_2 as usize, 8)], + &[(alive_stake_3 as usize, 0)], + ]; + + #[derive(Default)] + struct PartitionContext { + alive_stake3_info: Option, + smallest_validator_key: Pubkey, + lighter_fork_validator_key: Pubkey, + heaviest_validator_key: Pubkey, + } + let on_partition_start = + |cluster: &mut LocalCluster, validator_keys: &[Pubkey], context: &mut PartitionContext| { + // Kill validator with alive_stake_3, second in `partitions` slice + let smallest_validator_key = &validator_keys[3]; + let info = cluster.exit_node(smallest_validator_key); + context.alive_stake3_info = Some(info); + context.smallest_validator_key = *smallest_validator_key; + context.lighter_fork_validator_key = validator_keys[1]; + // Third in `partitions` slice + context.heaviest_validator_key = validator_keys[2]; + }; + + let ticks_per_slot = 8; + let on_before_partition_resolved = + |cluster: &mut LocalCluster, context: &mut PartitionContext| { + // Equal to ms_per_slot * MAX_RECENT_BLOCKHASHES, rounded up + let sleep_time_ms = + ((ticks_per_slot * DEFAULT_MS_PER_SLOT * MAX_RECENT_BLOCKHASHES as u64) + + DEFAULT_TICKS_PER_SLOT + - 1) + / DEFAULT_TICKS_PER_SLOT; + info!("Wait for blockhashes to expire, {} ms", sleep_time_ms); + + // Wait for blockhashes to expire + sleep(Duration::from_millis(sleep_time_ms)); + + let smallest_ledger_path = context + .alive_stake3_info + .as_ref() + .unwrap() + .info + .ledger_path + .clone(); + let lighter_fork_ledger_path = cluster.ledger_path(&context.lighter_fork_validator_key); + let heaviest_ledger_path = cluster.ledger_path(&context.heaviest_validator_key); + + // Open ledgers + let smallest_blockstore = open_blockstore(&smallest_ledger_path); + let lighter_fork_blockstore = open_blockstore(&lighter_fork_ledger_path); + let heaviest_blockstore = open_blockstore(&heaviest_ledger_path); + + info!("Opened blockstores"); + + // Get latest votes + let lighter_fork_latest_vote = last_vote_in_tower( + &lighter_fork_ledger_path, + &context.lighter_fork_validator_key, + ) + .unwrap(); + let heaviest_fork_latest_vote = + last_vote_in_tower(&heaviest_ledger_path, &context.heaviest_validator_key).unwrap(); + + // Find the first slot on the smaller fork + let lighter_ancestors: BTreeSet = std::iter::once(lighter_fork_latest_vote) + .chain(AncestorIterator::new( + lighter_fork_latest_vote, + &lighter_fork_blockstore, + )) + .collect(); + let heavier_ancestors: BTreeSet = std::iter::once(heaviest_fork_latest_vote) + .chain(AncestorIterator::new( + heaviest_fork_latest_vote, + &heaviest_blockstore, + )) + .collect(); + let first_slot_in_lighter_partition = *lighter_ancestors + .iter() + .zip(heavier_ancestors.iter()) + .find(|(x, y)| x != y) + .unwrap() + .0; + + // Must have been updated in the above loop + assert!(first_slot_in_lighter_partition != 0); + info!( + "First slot in lighter partition is {}", + first_slot_in_lighter_partition + ); + + assert!(first_slot_in_lighter_partition != 0); + + // Copy all the blocks from the smaller partition up to `first_slot_in_lighter_partition` + // into the smallest validator's blockstore + for lighter_slot in std::iter::once(first_slot_in_lighter_partition).chain( + AncestorIterator::new(first_slot_in_lighter_partition, &lighter_fork_blockstore), + ) { + let lighter_slot_meta = + lighter_fork_blockstore.meta(lighter_slot).unwrap().unwrap(); + assert!(lighter_slot_meta.is_full()); + // Get the shreds from the leader of the smaller fork + let lighter_fork_data_shreds = lighter_fork_blockstore + .get_data_shreds_for_slot(lighter_slot, 0) + .unwrap(); + + // Insert those shreds into the smallest validator's blockstore + smallest_blockstore + .insert_shreds(lighter_fork_data_shreds, None, false) + .unwrap(); + + // Check insert succeeded + let new_meta = smallest_blockstore.meta(lighter_slot).unwrap().unwrap(); + assert!(new_meta.is_full()); + assert_eq!(new_meta.last_index, lighter_slot_meta.last_index); + } + + // Restart the smallest validator that we killed earlier in `on_partition_start()` + drop(smallest_blockstore); + cluster.restart_node( + &context.smallest_validator_key, + context.alive_stake3_info.take().unwrap(), + ); + + loop { + // Wait for node to vote on the first slot on the less heavy fork, so it'll need + // a switch proof to flip to the other fork. + // However, this vote won't land because it's using an expired blockhash. The + // fork structure will look something like this after the vote: + /* + 1 (2%, killed and restarted) --- 200 (37%, lighter fork) + / + 0 + \-------- 4 (38%, heavier fork) + */ + if let Some(last_vote) = + last_vote_in_tower(&smallest_ledger_path, &context.smallest_validator_key) + { + // Check that the heaviest validator on the other fork doesn't have this slot, + // this must mean we voted on a unique slot on this fork + if last_vote == first_slot_in_lighter_partition { + info!( + "Saw vote on first slot in lighter partition {}", + first_slot_in_lighter_partition + ); + break; + } else { + info!( + "Haven't seen vote on first slot in lighter partition, latest vote is: {}", + last_vote + ); + } + } + + sleep(Duration::from_millis(20)); + } + + // Now resolve partition, allow validator to see the fork with the heavier validator, + // but the fork it's currently on is the heaviest, if only its own vote landed! + }; + + // Check that new roots were set after the partition resolves (gives time + // for lockouts built during partition to resolve and gives validators an opportunity + // to try and switch forks) + let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut PartitionContext| { + cluster.check_for_new_roots(16, &"PARTITION_TEST"); + }; + + run_kill_partition_switch_threshold( + &[&[(failures_stake as usize - 1, 16)]], + partitions, + // Partition long enough such that the first vote made by validator with + // `alive_stake_3` won't be ingested due to BlockhashTooOld, + None, + Some(ticks_per_slot), + PartitionContext::default(), + on_partition_start, + on_before_partition_resolved, on_partition_resolved, ); } #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_two_unbalanced_stakes() { solana_logger::setup_with_default(RUST_LOG_FILTER); error!("test_two_unbalanced_stakes"); - let mut validator_config = ValidatorConfig::default(); + let validator_config = ValidatorConfig::default(); let num_ticks_per_second = 100; let num_ticks_per_slot = 10; let num_slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64; - validator_config.rpc_config.enable_validator_exit = true; let mut cluster = LocalCluster::new(&mut ClusterConfig { node_stakes: vec![999_990, 3], cluster_lamports: 1_000_000, - validator_configs: vec![validator_config; 2], + validator_configs: make_identical_validator_configs(&validator_config, 2), ticks_per_slot: num_ticks_per_slot, slots_per_epoch: num_slots_per_epoch, stakers_slot_offset: num_slots_per_epoch, @@ -689,14 +951,13 @@ fn test_two_unbalanced_stakes() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_forwarding() { // Set up a cluster where one node is never the leader, so all txs sent to this node // will be have to be forwarded in order to be confirmed let mut config = ClusterConfig { node_stakes: vec![999_990, 3], cluster_lamports: 2_000_000, - validator_configs: vec![ValidatorConfig::default(); 2], + validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), 2), ..ClusterConfig::default() }; let cluster = LocalCluster::new(&mut config); @@ -726,7 +987,7 @@ fn test_restart_node() { let mut cluster = LocalCluster::new(&mut ClusterConfig { node_stakes: vec![100; 1], cluster_lamports: 100, - validator_configs: vec![validator_config.clone()], + validator_configs: vec![safe_clone_config(&validator_config)], ticks_per_slot, slots_per_epoch, stakers_slot_offset: slots_per_epoch, @@ -756,13 +1017,12 @@ fn test_restart_node() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_listener_startup() { let mut config = ClusterConfig { node_stakes: vec![100; 1], cluster_lamports: 1_000, num_listeners: 3, - validator_configs: vec![ValidatorConfig::default(); 1], + validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), 1), ..ClusterConfig::default() }; let cluster = LocalCluster::new(&mut config); @@ -772,7 +1032,6 @@ fn test_listener_startup() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_mainnet_beta_cluster_type() { solana_logger::setup_with_default(RUST_LOG_FILTER); @@ -780,7 +1039,7 @@ fn test_mainnet_beta_cluster_type() { cluster_type: ClusterType::MainnetBeta, node_stakes: vec![100; 1], cluster_lamports: 1_000, - validator_configs: vec![ValidatorConfig::default(); 1], + validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), 1), ..ClusterConfig::default() }; let cluster = LocalCluster::new(&mut config); @@ -889,14 +1148,11 @@ fn test_frozen_account_from_genesis() { validator_keys: Some(vec![(validator_identity.clone(), true)]), node_stakes: vec![100; 1], cluster_lamports: 1_000, - validator_configs: vec![ - ValidatorConfig { - // Freeze the validator identity account - frozen_accounts: vec![validator_identity.pubkey()], - ..ValidatorConfig::default() - }; - 1 - ], + validator_configs: vec![ValidatorConfig { + // Freeze the validator identity account + frozen_accounts: vec![validator_identity.pubkey()], + ..ValidatorConfig::default() + }], ..ClusterConfig::default() }; generate_frozen_account_panic(LocalCluster::new(&mut config), validator_identity); @@ -904,7 +1160,6 @@ fn test_frozen_account_from_genesis() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_frozen_account_from_snapshot() { solana_logger::setup_with_default(RUST_LOG_FILTER); let validator_identity = @@ -918,7 +1173,10 @@ fn test_frozen_account_from_snapshot() { validator_keys: Some(vec![(validator_identity.clone(), true)]), node_stakes: vec![100; 1], cluster_lamports: 1_000, - validator_configs: vec![snapshot_test_config.validator_config.clone()], + validator_configs: make_identical_validator_configs( + &snapshot_test_config.validator_config, + 1, + ), ..ClusterConfig::default() }; let mut cluster = LocalCluster::new(&mut config); @@ -945,7 +1203,6 @@ fn test_frozen_account_from_snapshot() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_consistency_halt() { solana_logger::setup_with_default(RUST_LOG_FILTER); let snapshot_interval_slots = 20; @@ -958,10 +1215,10 @@ fn test_consistency_halt() { .validator_config .accounts_hash_fault_injection_slots = 40; - let validator_stake = MIN_DELEGATE_STAKE_AMOUNT + 100; + let validator_stake = 10_000; let mut config = ClusterConfig { node_stakes: vec![validator_stake], - cluster_lamports: validator_stake * 10, + cluster_lamports: 100_000, validator_configs: vec![leader_snapshot_test_config.validator_config], ..ClusterConfig::default() }; @@ -1035,7 +1292,6 @@ fn test_consistency_halt() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_snapshot_download() { solana_logger::setup_with_default(RUST_LOG_FILTER); // First set up the cluster with 1 node @@ -1051,7 +1307,10 @@ fn test_snapshot_download() { let mut config = ClusterConfig { node_stakes: vec![stake], cluster_lamports: 1_000_000, - validator_configs: vec![leader_snapshot_test_config.validator_config.clone()], + validator_configs: make_identical_validator_configs( + &leader_snapshot_test_config.validator_config, + 1, + ), ..ClusterConfig::default() }; @@ -1099,7 +1358,6 @@ fn test_snapshot_download() { #[allow(unused_attributes)] #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_snapshot_restart_tower() { solana_logger::setup_with_default(RUST_LOG_FILTER); // First set up the cluster with 2 nodes @@ -1115,8 +1373,8 @@ fn test_snapshot_restart_tower() { node_stakes: vec![10000, 10], cluster_lamports: 100_000, validator_configs: vec![ - leader_snapshot_test_config.validator_config.clone(), - validator_snapshot_test_config.validator_config.clone(), + safe_clone_config(&leader_snapshot_test_config.validator_config), + safe_clone_config(&validator_snapshot_test_config.validator_config), ], ..ClusterConfig::default() }; @@ -1172,7 +1430,6 @@ fn test_snapshot_restart_tower() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_snapshots_blockstore_floor() { solana_logger::setup_with_default(RUST_LOG_FILTER); // First set up the cluster with 1 snapshotting leader @@ -1194,7 +1451,10 @@ fn test_snapshots_blockstore_floor() { let mut config = ClusterConfig { node_stakes: vec![10000], cluster_lamports: 100_000, - validator_configs: vec![leader_snapshot_test_config.validator_config.clone()], + validator_configs: make_identical_validator_configs( + &leader_snapshot_test_config.validator_config, + 1, + ), ..ClusterConfig::default() }; @@ -1273,7 +1533,6 @@ fn test_snapshots_blockstore_floor() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_snapshots_restart_validity() { solana_logger::setup_with_default(RUST_LOG_FILTER); let snapshot_interval_slots = 10; @@ -1298,7 +1557,10 @@ fn test_snapshots_restart_validity() { let mut config = ClusterConfig { node_stakes: vec![10000], cluster_lamports: 100_000, - validator_configs: vec![snapshot_test_config.validator_config.clone()], + validator_configs: make_identical_validator_configs( + &snapshot_test_config.validator_config, + 1, + ), ..ClusterConfig::default() }; @@ -1332,7 +1594,10 @@ fn test_snapshots_restart_validity() { // Restart node trace!("Restarting cluster from snapshot"); let nodes = cluster.get_node_pubkeys(); - cluster.exit_restart_node(&nodes[0], snapshot_test_config.validator_config.clone()); + cluster.exit_restart_node( + &nodes[0], + safe_clone_config(&snapshot_test_config.validator_config), + ); // Verify account balances on validator trace!("Verifying balances"); @@ -1394,15 +1659,13 @@ fn test_faulty_node(faulty_node_type: BroadcastStageType) { } #[test] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_wait_for_max_stake() { solana_logger::setup_with_default(RUST_LOG_FILTER); - let mut validator_config = ValidatorConfig::default(); - validator_config.rpc_config.enable_validator_exit = true; + let validator_config = ValidatorConfig::default(); let mut config = ClusterConfig { cluster_lamports: 10_000, node_stakes: vec![100; 4], - validator_configs: vec![validator_config; 4], + validator_configs: make_identical_validator_configs(&validator_config, 4), ..ClusterConfig::default() }; let cluster = LocalCluster::new(&mut config); @@ -1419,9 +1682,10 @@ fn test_wait_for_max_stake() { // votable, then B_{i+1} still chains to B_i fn test_no_voting() { solana_logger::setup_with_default(RUST_LOG_FILTER); - let mut validator_config = ValidatorConfig::default(); - validator_config.rpc_config.enable_validator_exit = true; - validator_config.voting_disabled = true; + let validator_config = ValidatorConfig { + voting_disabled: true, + ..ValidatorConfig::default() + }; let mut config = ClusterConfig { cluster_lamports: 10_000, node_stakes: vec![100], @@ -1456,7 +1720,6 @@ fn test_no_voting() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_optimistic_confirmation_violation_detection() { solana_logger::setup_with_default(RUST_LOG_FILTER); // First set up the cluster with 2 nodes @@ -1473,7 +1736,10 @@ fn test_optimistic_confirmation_violation_detection() { let mut config = ClusterConfig { cluster_lamports: 100_000, node_stakes: node_stakes.clone(), - validator_configs: vec![ValidatorConfig::default(); node_stakes.len()], + validator_configs: make_identical_validator_configs( + &ValidatorConfig::default(), + node_stakes.len(), + ), validator_keys: Some(validator_keys), slots_per_epoch, stakers_slot_offset: slots_per_epoch, @@ -1568,7 +1834,6 @@ fn test_optimistic_confirmation_violation_detection() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_validator_saves_tower() { solana_logger::setup_with_default(RUST_LOG_FILTER); @@ -1610,14 +1875,17 @@ fn test_validator_saves_tower() { } sleep(Duration::from_millis(10)); } + // Stop validator and check saved tower let validator_info = cluster.exit_node(&validator_id); let tower1 = Tower::restore(&ledger_path, &validator_id).unwrap(); trace!("tower1: {:?}", tower1); assert_eq!(tower1.root(), 0); + // Restart the validator and wait for a new root cluster.restart_node(&validator_id, validator_info); let validator_client = cluster.get_validator_client(&validator_id).unwrap(); + // Wait for the first root loop { #[allow(deprecated)] @@ -1705,7 +1973,7 @@ fn test_validator_saves_tower() { } fn open_blockstore(ledger_path: &Path) -> Blockstore { - Blockstore::open_with_access_type(ledger_path, AccessType::PrimaryOnly, None, true) + Blockstore::open_with_access_type(ledger_path, AccessType::TryPrimaryThenSecondary, None, true) .unwrap_or_else(|e| { panic!("Failed to open ledger at {:?}, err: {}", ledger_path, e); }) @@ -1812,7 +2080,10 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b let mut config = ClusterConfig { cluster_lamports: 100_000, node_stakes: node_stakes.clone(), - validator_configs: vec![ValidatorConfig::default(); node_stakes.len()], + validator_configs: make_identical_validator_configs( + &ValidatorConfig::default(), + node_stakes.len(), + ), validator_keys: Some(validator_keys), slots_per_epoch, stakers_slot_offset: slots_per_epoch, @@ -1999,7 +2270,10 @@ fn do_test_future_tower(cluster_mode: ClusterMode) { let mut config = ClusterConfig { cluster_lamports: 100_000, node_stakes: node_stakes.clone(), - validator_configs: vec![ValidatorConfig::default(); node_stakes.len()], + validator_configs: make_identical_validator_configs( + &ValidatorConfig::default(), + node_stakes.len(), + ), validator_keys: Some(validator_keys), slots_per_epoch, stakers_slot_offset: slots_per_epoch, @@ -2066,14 +2340,12 @@ fn do_test_future_tower(cluster_mode: ClusterMode) { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_future_tower_master_only() { do_test_future_tower(ClusterMode::MasterOnly); } #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_future_tower_master_slave() { do_test_future_tower(ClusterMode::MasterSlave); } @@ -2105,7 +2377,10 @@ fn test_hard_fork_invalidates_tower() { let mut config = ClusterConfig { cluster_lamports: 100_000, node_stakes: node_stakes.clone(), - validator_configs: vec![ValidatorConfig::default(); node_stakes.len()], + validator_configs: make_identical_validator_configs( + &ValidatorConfig::default(), + node_stakes.len(), + ), validator_keys: Some(validator_keys), slots_per_epoch, stakers_slot_offset: slots_per_epoch, @@ -2196,37 +2471,33 @@ fn test_hard_fork_invalidates_tower() { #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_no_optimistic_confirmation_violation_with_tower() { do_test_optimistic_confirmation_violation_with_or_without_tower(true); } #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_optimistic_confirmation_violation_without_tower() { do_test_optimistic_confirmation_violation_with_or_without_tower(false); } #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_run_test_load_program_accounts_root() { run_test_load_program_accounts(CommitmentConfig::finalized()); } #[test] #[serial] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn test_run_test_load_program_accounts_partition_root() { run_test_load_program_accounts_partition(CommitmentConfig::finalized()); } fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) { let num_slots_per_validator = 8; - let partitions: [&[usize]; 2] = [&[(1)], &[(1)]]; + let partitions: [Vec; 2] = [vec![1], vec![1]]; let (leader_schedule, validator_keys) = - create_custom_leader_schedule(partitions.len(), num_slots_per_validator); + create_custom_leader_schedule(&[num_slots_per_validator, num_slots_per_validator]); let (update_client_sender, update_client_receiver) = unbounded(); let (scan_client_sender, scan_client_receiver) = unbounded(); @@ -2240,7 +2511,7 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) { scan_client_receiver, ); - let on_partition_start = |cluster: &mut LocalCluster| { + let on_partition_start = |cluster: &mut LocalCluster, _: &mut ()| { let update_client = cluster .get_validator_client(&cluster.entry_point_info.id) .unwrap(); @@ -2251,7 +2522,9 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) { scan_client_sender.send(scan_client).unwrap(); }; - let on_partition_resolved = |cluster: &mut LocalCluster| { + let on_partition_before_resolved = |_: &mut LocalCluster, _: &mut ()| {}; + + let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { cluster.check_for_new_roots(20, &"run_test_load_program_accounts_partition"); exit.store(true, Ordering::Relaxed); t_update.join().unwrap(); @@ -2261,8 +2534,12 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) { run_cluster_partition( &partitions, Some((leader_schedule, validator_keys)), + (), on_partition_start, + on_partition_before_resolved, on_partition_resolved, + None, + None, additional_accounts, ); } @@ -2273,7 +2550,11 @@ fn setup_transfer_scan_threads( scan_commitment: CommitmentConfig, update_client_receiver: Receiver, scan_client_receiver: Receiver, -) -> (JoinHandle<()>, JoinHandle<()>, Vec<(Pubkey, Account)>) { +) -> ( + JoinHandle<()>, + JoinHandle<()>, + Vec<(Pubkey, AccountSharedData)>, +) { let exit_ = exit.clone(); let starting_keypairs: Arc> = Arc::new( iter::repeat_with(Keypair::new) @@ -2285,9 +2566,14 @@ fn setup_transfer_scan_threads( .take(num_starting_accounts) .collect(), ); - let starting_accounts: Vec<(Pubkey, Account)> = starting_keypairs + let starting_accounts: Vec<(Pubkey, AccountSharedData)> = starting_keypairs .iter() - .map(|k| (k.pubkey(), Account::new(1, 0, &system_program::id()))) + .map(|k| { + ( + k.pubkey(), + AccountSharedData::new(1, 0, &system_program::id()), + ) + }) .collect(); let starting_keypairs_ = starting_keypairs.clone(); @@ -2403,7 +2689,10 @@ fn run_test_load_program_accounts(scan_commitment: CommitmentConfig) { let mut config = ClusterConfig { cluster_lamports: 100_000, node_stakes: node_stakes.clone(), - validator_configs: vec![ValidatorConfig::default(); node_stakes.len()], + validator_configs: make_identical_validator_configs( + &ValidatorConfig::default(), + node_stakes.len(), + ), validator_keys: Some(validator_keys), slots_per_epoch, stakers_slot_offset: slots_per_epoch, @@ -2510,11 +2799,12 @@ fn setup_snapshot_validator_config( let (account_storage_dirs, account_storage_paths) = generate_account_paths(num_account_paths); // Create the validator config - let mut validator_config = ValidatorConfig::default(); - validator_config.rpc_config.enable_validator_exit = true; - validator_config.snapshot_config = Some(snapshot_config); - validator_config.account_paths = account_storage_paths; - validator_config.accounts_hash_interval_slots = snapshot_interval_slots; + let validator_config = ValidatorConfig { + snapshot_config: Some(snapshot_config), + account_paths: account_storage_paths, + accounts_hash_interval_slots: snapshot_interval_slots, + ..ValidatorConfig::default() + }; SnapshotValidatorConfig { _snapshot_dir: snapshot_dir, diff --git a/log-analyzer/Cargo.toml b/log-analyzer/Cargo.toml index dcc28a9080..6ccea8db9e 100644 --- a/log-analyzer/Cargo.toml +++ b/log-analyzer/Cargo.toml @@ -3,20 +3,20 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-log-analyzer" description = "The solana cluster network analysis tool" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" publish = false [dependencies] -byte-unit = "4.0.8" +byte-unit = "4.0.9" clap = "2.33.1" -serde = "1.0.118" +serde = "1.0.122" serde_json = "1.0.56" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-version = { path = "../version" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } [[bin]] name = "solana-log-analyzer" diff --git a/log-analyzer/src/main.rs b/log-analyzer/src/main.rs index b9ce45dd64..a8eaf41bc5 100644 --- a/log-analyzer/src/main.rs +++ b/log-analyzer/src/main.rs @@ -11,7 +11,7 @@ use std::ops::Sub; use std::path::PathBuf; #[derive(Deserialize, Serialize, Debug)] -struct IAddrMapping { +struct IpAddrMapping { private: String, public: String, } @@ -90,7 +90,7 @@ impl Sub for &LogLine { } } -fn map_ip_address(mappings: &[IAddrMapping], target: String) -> String { +fn map_ip_address(mappings: &[IpAddrMapping], target: String) -> String { for mapping in mappings { if target.contains(&mapping.private) { return target.replace(&mapping.private, mapping.public.as_str()); @@ -100,7 +100,7 @@ fn map_ip_address(mappings: &[IAddrMapping], target: String) -> String { } fn process_iftop_logs(matches: &ArgMatches) { - let mut map_list: Vec = vec![]; + let mut map_list: Vec = vec![]; if let ("map-IP", Some(args_matches)) = matches.subcommand() { let mut list = args_matches .value_of("list") diff --git a/logger/Cargo.toml b/logger/Cargo.toml index 7861785d3a..abf9bfd65c 100644 --- a/logger/Cargo.toml +++ b/logger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-logger" -version = "1.5.19" +version = "1.6.14" description = "Solana Logger" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-logger" edition = "2018" [dependencies] -env_logger = "0.8.2" +env_logger = "0.8.3" lazy_static = "1.4.0" log = "0.4.11" diff --git a/measure/Cargo.toml b/measure/Cargo.toml index 03ab8c752a..eda7414529 100644 --- a/measure/Cargo.toml +++ b/measure/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-measure" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-measure" readme = "../README.md" @@ -12,8 +12,8 @@ edition = "2018" [dependencies] log = "0.4.11" -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } [target."cfg(unix)".dependencies] jemallocator = "0.3.2" diff --git a/measure/src/measure.rs b/measure/src/measure.rs index f8c9c92502..b44fb6ec03 100644 --- a/measure/src/measure.rs +++ b/measure/src/measure.rs @@ -20,6 +20,10 @@ impl Measure { self.duration = duration_as_ns(&self.start.elapsed()); } + pub fn as_ns(&self) -> u64 { + self.duration + } + pub fn as_us(&self) -> u64 { self.duration / 1000 } diff --git a/merkle-root-bench/Cargo.toml b/merkle-root-bench/Cargo.toml index e21a700701..586b39a530 100644 --- a/merkle-root-bench/Cargo.toml +++ b/merkle-root-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-merkle-root-bench" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -10,11 +10,11 @@ publish = false [dependencies] log = "0.4.11" -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-measure = { path = "../measure", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-version = { path = "../version" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-measure = { path = "../measure", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } clap = "2.33.1" [package.metadata.docs.rs] diff --git a/merkle-root-bench/src/main.rs b/merkle-root-bench/src/main.rs index a0a91fe7b4..76ec3388dc 100644 --- a/merkle-root-bench/src/main.rs +++ b/merkle-root-bench/src/main.rs @@ -1,7 +1,7 @@ extern crate log; use clap::{crate_description, crate_name, value_t, App, Arg}; use solana_measure::measure::Measure; -use solana_runtime::accounts_db::AccountsDb; +use solana_runtime::accounts_hash::AccountsHash; use solana_sdk::{hash::Hash, pubkey::Pubkey}; fn main() { @@ -29,24 +29,24 @@ fn main() { let num_accounts = value_t!(matches, "num_accounts", usize).unwrap_or(10_000); let iterations = value_t!(matches, "iterations", usize).unwrap_or(20); let hashes: Vec<_> = (0..num_accounts) - .map(|_| (Pubkey::new_unique(), Hash::new_unique(), 1)) + .map(|_| (Pubkey::new_unique(), Hash::new_unique())) .collect(); let elapsed: Vec<_> = (0..iterations) .map(|_| { let hashes = hashes.clone(); // done outside timing - let mut time = Measure::start("compute_merkle_root_and_capitalization"); + let mut time = Measure::start("compute_merkle_root"); let fanout = 16; - AccountsDb::compute_merkle_root_and_capitalization(hashes, fanout); + AccountsHash::compute_merkle_root(hashes, fanout); time.stop(); time.as_us() }) .collect(); for result in &elapsed { - println!("compute_merkle_root_and_capitalization(us),{}", result); + println!("compute_merkle_root(us),{}", result); } println!( - "compute_merkle_root_and_capitalization(us) avg: {}", + "compute_merkle_root(us) avg: {}", elapsed.into_iter().sum::() as f64 / iterations as f64 ); } diff --git a/merkle-tree/Cargo.toml b/merkle-tree/Cargo.toml index 87a729c90b..830f503c77 100644 --- a/merkle-tree/Cargo.toml +++ b/merkle-tree/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-merkle-tree" -version = "1.5.19" +version = "1.6.14" description = "Solana Merkle Tree" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-merkle-tree" edition = "2018" [dependencies] -solana-program = { path = "../sdk/program", version = "=1.5.19" } +solana-program = { path = "../sdk/program", version = "=1.6.14" } fast-math = "0.1" # This can go once the BPF toolchain target Rust 1.42.0+ diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml index cdf1c98ffb..12f9461718 100644 --- a/metrics/Cargo.toml +++ b/metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-metrics" -version = "1.5.19" +version = "1.6.14" description = "Solana Metrics" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,17 +10,16 @@ documentation = "https://docs.rs/solana-metrics" edition = "2018" [dependencies] -env_logger = "0.8.2" +env_logger = "0.8.3" gethostname = "0.2.1" lazy_static = "1.4.0" log = "0.4.11" -reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] } -solana-sdk = { path = "../sdk", version = "=1.5.19" } +reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] } +solana-sdk = { path = "../sdk", version = "=1.6.14" } [dev-dependencies] rand = "0.7.0" serial_test = "0.4.0" -serial_test_derive = "0.4.0" [lib] name = "solana_metrics" diff --git a/metrics/scripts/grafana-provisioning/dashboards/cluster-monitor.json b/metrics/scripts/grafana-provisioning/dashboards/cluster-monitor.json index 069a7ea854..0fe089b623 100644 --- a/metrics/scripts/grafana-provisioning/dashboards/cluster-monitor.json +++ b/metrics/scripts/grafana-provisioning/dashboards/cluster-monitor.json @@ -15,8 +15,8 @@ "editable": true, "gnetId": null, "graphTooltip": 0, - "id": 2075, - "iteration": 1607116926093, + "id": 2194, + "iteration": 1610479516268, "links": [ { "asDropdown": true, @@ -3078,8 +3078,8 @@ "scroll": true, "showHeader": true, "sort": { - "col": null, - "desc": false + "col": 1, + "desc": true }, "styles": [ { @@ -8611,7 +8611,7 @@ "x": 0, "y": 71 }, - "id": 78, + "id": 50, "legend": { "alignAsTable": false, "avg": false, @@ -8727,7 +8727,7 @@ "x": 8, "y": 71 }, - "id": 50, + "id": 51, "legend": { "alignAsTable": false, "avg": false, @@ -9048,7 +9048,7 @@ "x": 16, "y": 71 }, - "id": 51, + "id": 52, "legend": { "alignAsTable": false, "avg": false, @@ -9320,7 +9320,7 @@ "x": 0, "y": 77 }, - "id": 52, + "id": 53, "panels": [], "title": "Tower Consensus", "type": "row" @@ -9343,7 +9343,7 @@ "x": 0, "y": 78 }, - "id": 53, + "id": 54, "legend": { "alignAsTable": false, "avg": false, @@ -9503,7 +9503,7 @@ "x": 8, "y": 78 }, - "id": 54, + "id": 55, "legend": { "alignAsTable": false, "avg": false, @@ -9602,6 +9602,43 @@ ] ], "tags": [] + }, + { + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "orderByTime": "ASC", + "policy": "default", + "query": "SELECT max(\"root\") AS \"cluster-root\" FROM \"$testnet\".\"autogen\".\"tower-observed\" WHERE $timeFilter GROUP BY time($__interval)", + "rawQuery": true, + "refId": "C", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "value" + ], + "type": "field" + }, + { + "params": [], + "type": "mean" + } + ] + ], + "tags": [] } ], "thresholds": [], @@ -9663,7 +9700,7 @@ "x": 16, "y": 78 }, - "id": 55, + "id": 56, "legend": { "alignAsTable": false, "avg": false, @@ -9859,7 +9896,7 @@ "x": 0, "y": 83 }, - "id": 56, + "id": 57, "legend": { "alignAsTable": false, "avg": false, @@ -9982,7 +10019,7 @@ "x": 8, "y": 83 }, - "id": 57, + "id": 58, "legend": { "alignAsTable": false, "avg": false, @@ -10105,7 +10142,7 @@ "x": 16, "y": 83 }, - "id": 58, + "id": 59, "legend": { "alignAsTable": false, "avg": false, @@ -10209,15 +10246,255 @@ "alignLevel": null } }, + { + "aliasColors": { + "cluster-info.repair": "#ba43a9", + "replay_stage-new_leader.last": "#00ffbb", + "tower-vote.last": "#00ffbb", + "window-service.receive": "#b7dbab", + "window-stage.consumed": "#5195ce" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 88 + }, + "id": 79, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 2, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "hide": false, + "measurement": "cluster_info-vote-count", + "orderByTime": "ASC", + "policy": "autogen", + "query": "SELECT max(\"slot_height\") FROM \"$testnet\".\"autogen\".\"bank-new_from_parent-heights\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "sum" + } + ] + ], + "tags": [] + }, + { + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "hide": false, + "measurement": "cluster_info-vote-count", + "orderByTime": "ASC", + "policy": "autogen", + "query": "SELECT mean(\"slot_height\") AS \"cluster-height\" FROM \"$testnet\".\"autogen\".\"bank-new_from_parent-heights\" WHERE $timeFilter GROUP BY time($__interval)", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "sum" + } + ] + ], + "tags": [] + }, + { + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "hide": false, + "measurement": "cluster_info-vote-count", + "orderByTime": "ASC", + "policy": "autogen", + "query": "SELECT max(\"slot\") AS \"cluster-replay-slot\" FROM \"$testnet\".\"autogen\".\"replay-slot-stats\" WHERE $timeFilter GROUP BY time($__interval)", + "rawQuery": true, + "refId": "C", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "sum" + } + ] + ], + "tags": [] + }, + { + "groupBy": [ + { + "params": [ + "$__interval" + ], + "type": "time" + }, + { + "params": [ + "null" + ], + "type": "fill" + } + ], + "hide": false, + "measurement": "cluster_info-vote-count", + "orderByTime": "ASC", + "policy": "autogen", + "query": "SELECT max(\"slot\") FROM \"$testnet\".\"autogen\".\"replay-slot-stats\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)", + "rawQuery": true, + "refId": "D", + "resultFormat": "time_series", + "select": [ + [ + { + "params": [ + "count" + ], + "type": "field" + }, + { + "params": [], + "type": "sum" + } + ] + ], + "tags": [] + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Slot Production ($hostid)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 88 + "y": 93 }, - "id": 59, + "id": 60, "panels": [], "repeat": null, "title": "IP Network", @@ -10234,9 +10511,9 @@ "h": 5, "w": 12, "x": 0, - "y": 89 + "y": 94 }, - "id": 60, + "id": 61, "legend": { "alignAsTable": false, "avg": false, @@ -10467,9 +10744,9 @@ "h": 5, "w": 12, "x": 12, - "y": 89 + "y": 94 }, - "id": 61, + "id": 62, "legend": { "alignAsTable": false, "avg": false, @@ -10620,9 +10897,9 @@ "h": 1, "w": 24, "x": 0, - "y": 94 + "y": 99 }, - "id": 62, + "id": 63, "panels": [], "title": "Signature Verification", "type": "row" @@ -10638,9 +10915,9 @@ "h": 5, "w": 12, "x": 0, - "y": 95 + "y": 100 }, - "id": 63, + "id": 64, "legend": { "avg": false, "current": false, @@ -10840,9 +11117,9 @@ "h": 5, "w": 12, "x": 12, - "y": 95 + "y": 100 }, - "id": 64, + "id": 65, "legend": { "alignAsTable": false, "avg": false, @@ -10989,9 +11266,9 @@ "h": 1, "w": 24, "x": 0, - "y": 100 + "y": 105 }, - "id": 65, + "id": 66, "panels": [], "title": "Snapshots", "type": "row" @@ -11007,9 +11284,9 @@ "h": 6, "w": 8, "x": 0, - "y": 101 + "y": 106 }, - "id": 66, + "id": 67, "legend": { "avg": false, "current": false, @@ -11199,9 +11476,9 @@ "h": 6, "w": 8, "x": 8, - "y": 101 + "y": 106 }, - "id": 67, + "id": 68, "legend": { "avg": false, "current": false, @@ -11467,9 +11744,9 @@ "h": 6, "w": 8, "x": 16, - "y": 101 + "y": 106 }, - "id": 68, + "id": 69, "legend": { "avg": false, "current": false, @@ -11661,9 +11938,9 @@ "h": 6, "w": 8, "x": 0, - "y": 107 + "y": 112 }, - "id": 69, + "id": 70, "legend": { "avg": false, "current": false, @@ -11852,9 +12129,9 @@ "h": 1, "w": 24, "x": 0, - "y": 113 + "y": 118 }, - "id": 70, + "id": 71, "panels": [], "title": "RPC Send Transaction Service", "type": "row" @@ -11870,9 +12147,9 @@ "h": 6, "w": 12, "x": 0, - "y": 114 + "y": 119 }, - "id": 71, + "id": 72, "legend": { "avg": false, "current": false, @@ -11988,9 +12265,9 @@ "h": 6, "w": 12, "x": 12, - "y": 114 + "y": 119 }, - "id": 72, + "id": 73, "legend": { "avg": false, "current": false, @@ -12251,9 +12528,9 @@ "h": 1, "w": 24, "x": 0, - "y": 120 + "y": 125 }, - "id": 73, + "id": 74, "panels": [], "title": "Bench TPS", "type": "row" @@ -12269,9 +12546,9 @@ "h": 5, "w": 7, "x": 0, - "y": 121 + "y": 126 }, - "id": 74, + "id": 75, "legend": { "avg": false, "current": false, @@ -12384,9 +12661,9 @@ "h": 5, "w": 7, "x": 7, - "y": 121 + "y": 126 }, - "id": 75, + "id": 76, "legend": { "alignAsTable": false, "avg": false, @@ -12609,9 +12886,9 @@ "h": 5, "w": 10, "x": 14, - "y": 121 + "y": 126 }, - "id": 76, + "id": 77, "links": [], "pageSize": null, "scroll": true, @@ -12697,9 +12974,9 @@ "h": 4, "w": 10, "x": 0, - "y": 126 + "y": 131 }, - "id": 77, + "id": 78, "legend": { "avg": false, "current": false, diff --git a/metrics/src/counter.rs b/metrics/src/counter.rs index 787cc8f386..1bbc0dd778 100644 --- a/metrics/src/counter.rs +++ b/metrics/src/counter.rs @@ -209,7 +209,7 @@ mod tests { use crate::counter::{Counter, DEFAULT_LOG_RATE, DEFAULT_METRICS_RATE}; use log::Level; use log::*; - use serial_test_derive::serial; + use serial_test::serial; use std::env; use std::sync::atomic::Ordering; use std::sync::{Once, RwLock}; @@ -226,12 +226,26 @@ mod tests { } } + /// Try to initialize the logger with a filter level of INFO. + /// + /// Incrementing a counter only happens if the logger is configured for the + /// given log level, so the tests need an INFO logger to pass. + fn try_init_logger_at_level_info() -> Result<(), log::SetLoggerError> { + // Use ::new() to configure the logger manually, instead of using the + // default of reading the RUST_LOG environment variable. Set is_test to + // print to stdout captured by the test runner, instead of polluting the + // test runner output. + let module_limit = None; + env_logger::Builder::new() + .filter(module_limit, log::LevelFilter::Info) + .is_test(true) + .try_init() + } + #[test] #[serial] fn test_counter() { - env_logger::Builder::from_env(env_logger::Env::new().default_filter_or("solana=info")) - .try_init() - .ok(); + try_init_logger_at_level_info().ok(); let _readlock = get_env_lock().read(); static mut COUNTER: Counter = create_counter!("test", 1000, 1); unsafe { @@ -261,9 +275,7 @@ mod tests { #[test] #[serial] fn test_metricsrate() { - env_logger::Builder::from_env(env_logger::Env::new().default_filter_or("solana=info")) - .try_init() - .ok(); + try_init_logger_at_level_info().ok(); let _readlock = get_env_lock().read(); env::remove_var("SOLANA_DEFAULT_METRICS_RATE"); static mut COUNTER: Counter = create_counter!("test", 1000, 0); @@ -279,9 +291,7 @@ mod tests { #[test] #[serial] fn test_metricsrate_env() { - env_logger::Builder::from_env(env_logger::Env::new().default_filter_or("solana=info")) - .try_init() - .ok(); + try_init_logger_at_level_info().ok(); let _writelock = get_env_lock().write(); env::set_var("SOLANA_DEFAULT_METRICS_RATE", "50"); static mut COUNTER: Counter = create_counter!("test", 1000, 0); @@ -305,9 +315,7 @@ mod tests { #[test] #[serial] fn test_lograte() { - env_logger::Builder::from_env(env_logger::Env::new().default_filter_or("solana=info")) - .try_init() - .ok(); + try_init_logger_at_level_info().ok(); let _readlock = get_env_lock().read(); assert_eq!( Counter::default_log_rate(), @@ -326,9 +334,7 @@ mod tests { #[test] #[serial] fn test_lograte_env() { - env_logger::Builder::from_env(env_logger::Env::new().default_filter_or("solana=info")) - .try_init() - .ok(); + try_init_logger_at_level_info().ok(); assert_ne!(DEFAULT_LOG_RATE, 0); let _writelock = get_env_lock().write(); static mut COUNTER: Counter = create_counter!("test_lograte_env", 0, 1); diff --git a/metrics/src/metrics.rs b/metrics/src/metrics.rs index 4484e8220b..6d55b89e7e 100644 --- a/metrics/src/metrics.rs +++ b/metrics/src/metrics.rs @@ -103,16 +103,23 @@ impl MetricsWriter for InfluxDbMetricsWriter { let client = reqwest::blocking::Client::builder() .timeout(Duration::from_secs(5)) - .build() - .unwrap(); + .build(); + let client = match client { + Ok(client) => client, + Err(err) => { + warn!("client instantiation failed: {}", err); + return; + } + }; + let response = client.post(write_url.as_str()).body(line).send(); if let Ok(resp) = response { - if !resp.status().is_success() { - warn!( - "submit response unsuccessful: {} {}", - resp.status(), - resp.text().unwrap() - ); + let status = resp.status(); + if !status.is_success() { + let text = resp + .text() + .unwrap_or_else(|_| "[text body empty]".to_string()); + warn!("submit response unsuccessful: {} {}", status, text,); } } else { warn!("submit error: {}", response.unwrap_err()); diff --git a/multinode-demo/bench-tps.sh b/multinode-demo/bench-tps.sh index a6649b24d5..c00abe85a3 100755 --- a/multinode-demo/bench-tps.sh +++ b/multinode-demo/bench-tps.sh @@ -14,7 +14,7 @@ usage() { echo echo " Run bench-tps " echo - echo " extra args: additional arguments are pass along to solana-bench-tps" + echo " extra args: additional arguments are passed along to solana-bench-tps" echo exit 1 } diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index e32acf56d4..350e953533 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -23,60 +23,55 @@ no_restart=0 args=() while [[ -n $1 ]]; do - if [[ ${1:0:1} = - ]]; then - if [[ $1 = --init-complete-file ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --gossip-host ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --gossip-port ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --dev-halt-at-slot ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --dynamic-port-range ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --limit-ledger-size ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --no-rocksdb-compaction ]]; then - args+=("$1") - shift - elif [[ $1 = --enable-rpc-transaction-history ]]; then - args+=("$1") - shift - elif [[ $1 = --enable-cpi-and-log-storage ]]; then - args+=("$1") - shift - elif [[ $1 = --enable-rpc-bigtable-ledger-storage ]]; then - args+=("$1") - shift - elif [[ $1 = --skip-poh-verify ]]; then - args+=("$1") - shift - elif [[ $1 = --log ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --no-restart ]]; then - no_restart=1 - shift - elif [[ $1 == --wait-for-supermajority ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 == --expected-bank-hash ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 == --accounts ]]; then - args+=("$1" "$2") - shift 2 - else - echo "Unknown argument: $1" - $program --help - exit 1 - fi + if [[ ${1:0:1} = - ]]; then + if [[ $1 = --init-complete-file ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --gossip-host ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --gossip-port ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --dev-halt-at-slot ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --dynamic-port-range ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --limit-ledger-size ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --no-rocksdb-compaction ]]; then + args+=("$1") + shift + elif [[ $1 = --enable-rpc-transaction-history ]]; then + args+=("$1") + shift + elif [[ $1 = --enable-cpi-and-log-storage ]]; then + args+=("$1") + shift + elif [[ $1 = --enable-rpc-bigtable-ledger-storage ]]; then + args+=("$1") + shift + elif [[ $1 = --skip-poh-verify ]]; then + args+=("$1") + shift + elif [[ $1 = --log ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --no-restart ]]; then + no_restart=1 + shift + elif [[ $1 == --wait-for-supermajority ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --expected-bank-hash ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --accounts ]]; then + args+=("$1" "$2") + shift 2 else echo "Unknown argument: $1" $program --help @@ -97,15 +92,15 @@ ledger_dir="$SOLANA_CONFIG_DIR"/bootstrap-validator } args+=( - --enable-rpc-exit - --enable-rpc-set-log-filter - --require-tower - --ledger "$ledger_dir" - --rpc-port 8899 - --snapshot-interval-slots 200 - --identity "$identity" - --vote-account "$vote_account" - --rpc-faucet-address 127.0.0.1:9900 + --require-tower + --ledger "$ledger_dir" + --rpc-port 8899 + --snapshot-interval-slots 200 + --identity "$identity" + --vote-account "$vote_account" + --rpc-faucet-address 127.0.0.1:9900 + --no-poh-speed-test + --no-wait-for-vote-to-start-leader ) default_arg --gossip-port 8001 default_arg --log - diff --git a/multinode-demo/common.sh b/multinode-demo/common.sh index c1533067ae..31a1dfce53 100644 --- a/multinode-demo/common.sh +++ b/multinode-demo/common.sh @@ -12,7 +12,7 @@ source "$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. || exit 1; pwd)"/net/common.sh prebuild= if [[ $1 = "--prebuild" ]]; then - prebuild=true + prebuild=true fi if [[ $(uname) != Linux ]]; then @@ -34,36 +34,31 @@ if [[ -n $USE_INSTALL || ! -f "$SOLANA_ROOT"/Cargo.toml ]]; then fi } else - velas_program() { - declare program="$1" - declare crate="$program" - if [[ -z $program ]]; then - crate="cli" - program="velas" - else - program="velas-$program" - fi - - declare prefix=$2; - if [ "$crate" = "cli" ]; then - prefix="solana" - fi - - if [[ -r "$SOLANA_ROOT/$crate"/Cargo.toml ]]; then - if [[ "$prefix" == "" ]]; then - maybe_package="--package velas-$crate" - else - maybe_package="--package $prefix-$crate" - fi - fi - - if [[ -n $NDEBUG ]]; then - maybe_release=--release - fi - declare manifest_path="--manifest-path=$SOLANA_ROOT/$crate/Cargo.toml" - printf "cargo $CARGO_TOOLCHAIN run $manifest_path $maybe_release $maybe_package --bin %s %s -- " "$program" - } - + velas_program() { + declare program="$1" + declare crate="$program" + if [[ -z $program ]]; then + crate="cli" + program="velas" + else + program="velas-$program" + fi + + if [[ -n $NDEBUG ]]; then + maybe_release=--release + fi + + # Prebuild binaries so that CI sanity check timeout doesn't include build time + if [[ $prebuild ]]; then + ( + set -x + # shellcheck disable=SC2086 # Don't want to double quote + cargo $CARGO_TOOLCHAIN build $maybe_release --bin $program + ) + fi + + printf "cargo $CARGO_TOOLCHAIN run $maybe_release --bin %s %s -- " "$program" + } fi velas_bench_tps=$(velas_program bench-tps) diff --git a/multinode-demo/delegate-stake.sh b/multinode-demo/delegate-stake.sh index 09479d6d2d..881ea7b948 100755 --- a/multinode-demo/delegate-stake.sh +++ b/multinode-demo/delegate-stake.sh @@ -131,4 +131,49 @@ get_program_account_balance_totals SYSTEM get_program_account_balance_totals VOTE get_program_account_balance_totals CONFIG -sum_account_balances_totals +common_args+=(--url "$url") + +if [[ ${#positional_args[@]} -gt 1 ]]; then + usage "$@" +fi +if [[ -n ${positional_args[0]} ]]; then + stake_sol=${positional_args[0]} +fi + +VALIDATOR_KEYS_DIR=$SOLANA_CONFIG_DIR/validator$label +vote_account="${vote_account:-$VALIDATOR_KEYS_DIR/vote-account.json}" +stake_account="${stake_account:-$VALIDATOR_KEYS_DIR/stake-account.json}" + +if [[ ! -f $vote_account ]]; then + echo "Error: $vote_account not found" + exit 1 +fi + +if ((airdrops_enabled)); then + if [[ -z $keypair ]]; then + echo "--keypair argument must be provided" + exit 1 + fi + $solana_cli \ + "${common_args[@]}" --keypair "$SOLANA_CONFIG_DIR/faucet.json" \ + transfer --allow-unfunded-recipient "$keypair" "$stake_sol" +fi + +if [[ -n $keypair ]]; then + common_args+=(--keypair "$keypair") +fi + +if ! [[ -f "$stake_account" ]]; then + $solana_keygen new --no-passphrase -so "$stake_account" +else + echo "$stake_account already exists! Using it" +fi + +set -x +$solana_cli "${common_args[@]}" \ + vote-account "$vote_account" +$solana_cli "${common_args[@]}" \ + create-stake-account "$stake_account" "$stake_sol" +$solana_cli "${common_args[@]}" \ + delegate-stake $maybe_force "$stake_account" "$vote_account" +$solana_cli "${common_args[@]}" stakes "$stake_account" diff --git a/multinode-demo/validator.sh b/multinode-demo/validator.sh index 1ba9adbf03..78ed69a614 100755 --- a/multinode-demo/validator.sh +++ b/multinode-demo/validator.sh @@ -7,7 +7,8 @@ here=$(dirname "$0") source "$here"/common.sh args=( - --max-genesis-archive-unpacked-size 1073741824 + --max-genesis-archive-unpacked-size 1073741824 + --no-poh-speed-test ) airdrops_enabled=1 node_sol=500 # 500 SOL: number of VLX to airdrop the node for transaction fees and vote account rent exemption (ignored if airdrops_enabled=0) @@ -46,282 +47,89 @@ EOF positional_args=() while [[ -n $1 ]]; do - if [[ ${1:0:1} = - ]]; then - # validator.sh-only options - if [[ $1 = --label ]]; then - label="-$2" - shift 2 - elif [[ $1 = --no-restart ]]; then - no_restart=1 - shift - elif [[ $1 = --node-vlx ]]; then - node_sol="$2" - shift 2 - elif [[ $1 = --no-airdrop ]]; then - airdrops_enabled=0 - shift - # velas-validator options - elif [[ $1 = --expected-genesis-hash ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --expected-shred-version ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --identity ]]; then - identity=$2 - args+=("$1" "$2") - shift 2 - elif [[ $1 = --authorized-voter ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --vote-account ]]; then - vote_account=$2 - args+=("$1" "$2") - shift 2 - elif [[ $1 = --init-complete-file ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --ledger ]]; then - ledger_dir=$2 - shift 2 - elif [[ $1 = --entrypoint ]]; then - gossip_entrypoint=$2 - args+=("$1" "$2") - shift 2 - elif [[ $1 = --no-snapshot-fetch ]]; then - args+=("$1") - shift - elif [[ $1 = --no-voting ]]; then - args+=("$1") - shift - elif [[ $1 = --dev-no-sigverify ]]; then - args+=("$1") - shift - elif [[ $1 = --dev-halt-at-slot ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --rpc-port ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --enable-rpc-exit ]]; then - args+=("$1") - shift - elif [[ $1 = --rpc-faucet-address ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --accounts ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --gossip-port ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --dynamic-port-range ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --snapshot-interval-slots ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --limit-ledger-size ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --no-rocksdb-compaction ]]; then - args+=("$1") - shift - elif [[ $1 = --enable-rpc-transaction-history ]]; then - args+=("$1") - shift - elif [[ $1 = --enable-cpi-and-log-storage ]]; then - args+=("$1") - shift - elif [[ $1 = --skip-poh-verify ]]; then - args+=("$1") - shift - elif [[ $1 = --log ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --trusted-validator ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = --halt-on-trusted-validators-accounts-hash-mismatch ]]; then - args+=("$1") - shift - elif [[ $1 = --max-genesis-archive-unpacked-size ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 == --wait-for-supermajority ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 == --expected-bank-hash ]]; then - args+=("$1" "$2") - shift 2 - elif [[ $1 = -h ]]; then - usage "$@" - else - echo "Unknown argument: $1" - exit 1 - fi - else - positional_args+=("$1") - shift - fi -done - -if [[ "$SOLANA_GPU_MISSING" -eq 1 ]]; then - echo "Testnet requires GPUs, but none were found! Aborting..." - exit 1 -fi - -if [[ ${#positional_args[@]} -gt 1 ]]; then - usage "$@" -fi - -if [[ -n $REQUIRE_LEDGER_DIR ]]; then - if [[ -z $ledger_dir ]]; then - usage "Error: --ledger not specified" - fi - SOLANA_CONFIG_DIR="$ledger_dir" -fi - -if [[ -n $REQUIRE_KEYPAIRS ]]; then - if [[ -z $identity ]]; then - usage "Error: --identity not specified" - fi - if [[ -z $vote_account ]]; then - usage "Error: --vote-account not specified" - fi -fi - -if [[ -z "$ledger_dir" ]]; then - ledger_dir="$SOLANA_CONFIG_DIR/validator$label" -fi -mkdir -p "$ledger_dir" - -if [[ -n $gossip_entrypoint ]]; then - # Prefer the --entrypoint argument if supplied... - if [[ ${#positional_args[@]} -gt 0 ]]; then - usage "$@" - fi -else - # ...but also support providing the entrypoint's hostname as the first - # positional argument - entrypoint_hostname=${positional_args[0]} - if [[ -z $entrypoint_hostname ]]; then - gossip_entrypoint=127.0.0.1:8001 - else - gossip_entrypoint="$entrypoint_hostname":8001 - fi -fi - -faucet_address="${gossip_entrypoint%:*}":9900 - -: "${identity:=$ledger_dir/identity.json}" -: "${vote_account:=$ledger_dir/vote-account.json}" - -default_arg --entrypoint "$gossip_entrypoint" -if ((airdrops_enabled)); then - default_arg --rpc-faucet-address "$faucet_address" -fi - -default_arg --identity "$identity" -default_arg --vote-account "$vote_account" -default_arg --ledger "$ledger_dir" -default_arg --log - -default_arg --enable-rpc-exit -default_arg --enable-rpc-set-log-filter -default_arg --require-tower - -if [[ -n $SOLANA_CUDA ]]; then - program=$velas_validator_cuda -else - program=$velas_validator -fi - -set -e -PS4="$(basename "$0"): " - -pid= -kill_node() { - # Note: do not echo anything from this function to ensure $pid is actually - # killed when stdout/stderr are redirected - set +ex - if [[ -n $pid ]]; then - declare _pid=$pid - pid= - kill "$_pid" || true - wait "$_pid" || true - fi -} - -kill_node_and_exit() { - kill_node - exit -} - -trap 'kill_node_and_exit' INT TERM ERR - -wallet() { - ( - set -x - $velas_cli --keypair "$identity" --url "$rpc_url" "$@" - ) -} - -setup_validator_accounts() { - declare node_sol=$1 - - if [[ -n "$SKIP_ACCOUNTS_CREATION" ]]; then - return 0 - fi - - if ! wallet vote-account "$vote_account"; then - if ((airdrops_enabled)); then - echo "Adding $node_sol to validator identity account:" - ( - set -x - $velas_cli --keypair "$SOLANA_CONFIG_DIR/faucet.json" --url "$rpc_url" transfer "$identity" "$node_sol" - ) || return $? - fi - - echo "Creating validator vote account" - wallet create-vote-account "$vote_account" "$identity" || return $? - fi - echo "Validator vote account configured" - - echo "Validator identity account balance:" - wallet balance || return $? - - return 0 -} - -rpc_url=$($velas_gossip rpc-url --timeout 180 --entrypoint "$gossip_entrypoint") - -[[ -r "$identity" ]] || $velas_keygen new --no-passphrase -so "$identity" -[[ -r "$vote_account" ]] || $velas_keygen new --no-passphrase -so "$vote_account" - -setup_validator_accounts "$node_sol" - -while true; do - echo "$PS4$program ${args[*]}" - - $program "${args[@]}" & - pid=$! - echo "pid: $pid" - - if ((no_restart)); then - wait "$pid" - exit $? - fi - - while true; do - if [[ -z $pid ]] || ! kill -0 "$pid"; then - echo "############## validator exited, restarting ##############" - break - fi - sleep 1 - done - - kill_node -done + if [[ ${1:0:1} = - ]]; then + # validator.sh-only options + if [[ $1 = --label ]]; then + label="-$2" + shift 2 + elif [[ $1 = --no-restart ]]; then + no_restart=1 + shift + elif [[ $1 = --node-sol ]]; then + node_sol="$2" + shift 2 + elif [[ $1 = --no-airdrop ]]; then + airdrops_enabled=0 + shift + # solana-validator options + elif [[ $1 = --expected-genesis-hash ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --expected-shred-version ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --identity ]]; then + identity=$2 + args+=("$1" "$2") + shift 2 + elif [[ $1 = --authorized-voter ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --vote-account ]]; then + vote_account=$2 + args+=("$1" "$2") + shift 2 + elif [[ $1 = --init-complete-file ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --ledger ]]; then + ledger_dir=$2 + shift 2 + elif [[ $1 = --entrypoint ]]; then + gossip_entrypoint=$2 + args+=("$1" "$2") + shift 2 + elif [[ $1 = --no-snapshot-fetch ]]; then + args+=("$1") + shift + elif [[ $1 = --no-voting ]]; then + args+=("$1") + shift + elif [[ $1 = --dev-no-sigverify ]]; then + args+=("$1") + shift + elif [[ $1 = --dev-halt-at-slot ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --rpc-port ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --rpc-faucet-address ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --accounts ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --gossip-port ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --dynamic-port-range ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --snapshot-interval-slots ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --limit-ledger-size ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --no-rocksdb-compaction ]]; then + args+=("$1") + shift + elif [[ $1 = --enable-rpc-transaction-history ]]; then + args+=("$1") + shift + elif [[ $1 = --enable-cpi-and-log-storage ]]; then + args+=("$1") shift elif [[ $1 = --skip-poh-verify ]]; then args+=("$1") @@ -416,8 +224,6 @@ default_arg --identity "$identity" default_arg --vote-account "$vote_account" default_arg --ledger "$ledger_dir" default_arg --log - -default_arg --enable-rpc-exit -default_arg --enable-rpc-set-log-filter default_arg --require-tower if [[ -n $SOLANA_CUDA ]]; then @@ -468,7 +274,9 @@ setup_validator_accounts() { echo "Adding $node_sol to validator identity account:" ( set -x - $velas_cli --keypair "$SOLANA_CONFIG_DIR/faucet.json" --url "$rpc_url" transfer "$identity" "$node_sol" + $velas_cli \ + --keypair "$SOLANA_CONFIG_DIR/faucet.json" --url "$rpc_url" \ + transfer --allow-unfunded-recipient "$identity" "$node_sol" ) || return $? fi diff --git a/net-shaper/Cargo.toml b/net-shaper/Cargo.toml index 6eeefb7a3d..9a30414075 100644 --- a/net-shaper/Cargo.toml +++ b/net-shaper/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-net-shaper" description = "The solana cluster network shaping tool" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -11,10 +11,10 @@ publish = false [dependencies] clap = "2.33.1" -serde = "1.0.118" +serde = "1.0.122" serde_json = "1.0.56" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } rand = "0.7.0" [[bin]] diff --git a/net-utils/Cargo.toml b/net-utils/Cargo.toml index 13b12ed381..acc1311f4c 100644 --- a/net-utils/Cargo.toml +++ b/net-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-net-utils" -version = "1.5.19" +version = "1.6.14" description = "Solana Network Utilities" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -15,13 +15,13 @@ clap = "2.33.1" log = "0.4.11" nix = "0.19.0" rand = "0.7.0" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" socket2 = "0.3.17" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-version = { path = "../version" } -tokio = { version = "0.3.5", features = ["full"] } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } +tokio = { version = "1", features = ["full"] } url = "2.1.1" [lib] diff --git a/net-utils/src/ip_echo_server.rs b/net-utils/src/ip_echo_server.rs index 7b656fd2f7..df46be3559 100644 --- a/net-utils/src/ip_echo_server.rs +++ b/net-utils/src/ip_echo_server.rs @@ -4,8 +4,8 @@ use { serde_derive::{Deserialize, Serialize}, std::{io, net::SocketAddr, time::Duration}, tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, net::{TcpListener, TcpStream}, - prelude::*, runtime::{self, Runtime}, time::timeout, }, @@ -103,14 +103,14 @@ async fn process_connection(mut socket: TcpStream, peer_addr: SocketAddr) -> io: if *tcp_port != 0 { debug!("Connecting to tcp/{}", tcp_port); - let tcp_stream = timeout( + let mut tcp_stream = timeout( IO_TIMEOUT, TcpStream::connect(&SocketAddr::new(peer_addr.ip(), *tcp_port)), ) .await??; debug!("Connection established to tcp/{}", *tcp_port); - let _ = tcp_stream.shutdown(std::net::Shutdown::Both); + let _ = tcp_stream.shutdown(); } } diff --git a/net/net.sh b/net/net.sh index e2e0c8e068..3a25c81f26 100755 --- a/net/net.sh +++ b/net/net.sh @@ -524,7 +524,7 @@ prepareDeploy() { if [[ -n $releaseChannel ]]; then echo "Downloading release from channel: $releaseChannel" rm -f "$SOLANA_ROOT"/solana-release.tar.bz2 - declare updateDownloadUrl=http://release.solana.com/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2 + declare updateDownloadUrl=https://release.solana.com/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2 ( set -x curl -L -I "$updateDownloadUrl" @@ -835,8 +835,8 @@ while [[ -n $1 ]]; do doBuild=false shift 1 elif [[ $1 = --limit-ledger-size ]]; then - maybeLimitLedgerSize="$1" - shift 1 + maybeLimitLedgerSize="$1 $2" + shift 2 elif [[ $1 = --skip-poh-verify ]]; then maybeSkipLedgerVerify="$1" shift 1 diff --git a/net/remote/remote-node.sh b/net/remote/remote-node.sh index c468d7202e..d302a0d6e3 100755 --- a/net/remote/remote-node.sh +++ b/net/remote/remote-node.sh @@ -360,6 +360,8 @@ EOF if [[ $airdropsEnabled != true ]]; then args+=(--no-airdrop) + else + args+=(--rpc-faucet-address "$entrypointIp:9900") fi if [[ -r "$SOLANA_CONFIG_DIR"/bank-hash ]]; then diff --git a/net/scripts/solana-user-authorized_keys.sh b/net/scripts/solana-user-authorized_keys.sh index ec527e54b0..ef3e8c2e00 100644 --- a/net/scripts/solana-user-authorized_keys.sh +++ b/net/scripts/solana-user-authorized_keys.sh @@ -83,3 +83,12 @@ SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINHM/Rdj1UtrqPWMWjgXkjr5xF SOLANA_USERS+=('behzad') SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBBalCzbx2ObVYy4OdkKi8csr4xxutk8iVhMWqLCjeSbDgyR9p6LgnDyEKLE+B0qyn3Os85ochKdnvWVopEyPjc= behzad@solana.com') + +SOLANA_USERS+=('joe') +SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP4aNf4MPb9oVGHjsneIF3iyBRMu4+J62G6hk0AptFAa joe@solana.com') + +SOLANA_USERS+=('jwash') +SOLANA_PUBKEYS+=('ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBHe1QgXIAip9AR8Av5KSMxyhjP3FPIBGHKpug0/bBSsO0+gcmZiXsfmIJvRlkk5rfzCBh1VXyF2ae4OzGdazWjI= jeffreywashington@JeffMBPro.local') + +SOLANA_USERS+=('dmitri') +SOLANA_PUBKEYS+=('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHSYk99LYQ82tnVUav8Mu/ZrQGXOzt4esSWTbd9IV3FF dmitri@solana.com') diff --git a/notifier/Cargo.toml b/notifier/Cargo.toml index 40a96e670d..51f9d7a15b 100644 --- a/notifier/Cargo.toml +++ b/notifier/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-notifier" -version = "1.5.19" +version = "1.6.14" description = "Solana Notifier" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,7 +11,7 @@ edition = "2018" [dependencies] log = "0.4.11" -reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] } +reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] } serde_json = "1.0" [lib] diff --git a/notifier/src/lib.rs b/notifier/src/lib.rs index ed1d418aa8..0bb3b0798c 100644 --- a/notifier/src/lib.rs +++ b/notifier/src/lib.rs @@ -20,7 +20,7 @@ use log::*; use reqwest::{blocking::Client, StatusCode}; use serde_json::json; -use std::{env, thread::sleep, time::Duration}; +use std::{env, str::FromStr, thread::sleep, time::Duration}; struct TelegramWebHook { bot_token: String, @@ -75,12 +75,17 @@ fn get_twilio_config() -> Result, String> { Ok(Some(config)) } +enum NotificationType { + Discord(String), + Slack(String), + Telegram(TelegramWebHook), + Twilio(TwilioWebHook), + Log(Level), +} + pub struct Notifier { client: Client, - discord_webhook: Option, - slack_webhook: Option, - telegram_webhook: Option, - twilio_webhook: Option, + notifiers: Vec, } impl Notifier { @@ -91,98 +96,115 @@ impl Notifier { pub fn new(env_prefix: &str) -> Self { info!("Initializing {}Notifier", env_prefix); - let discord_webhook = env::var(format!("{}DISCORD_WEBHOOK", env_prefix)) - .map_err(|_| { - info!("Discord notifications disabled"); - }) - .ok(); - let slack_webhook = env::var(format!("{}SLACK_WEBHOOK", env_prefix)) - .map_err(|_| { - info!("Slack notifications disabled"); - }) - .ok(); - - let telegram_webhook = if let (Ok(bot_token), Ok(chat_id)) = ( + let mut notifiers = vec![]; + + if let Ok(webhook) = env::var(format!("{}DISCORD_WEBHOOK", env_prefix)) { + notifiers.push(NotificationType::Discord(webhook)); + } + if let Ok(webhook) = env::var(format!("{}SLACK_WEBHOOK", env_prefix)) { + notifiers.push(NotificationType::Slack(webhook)); + } + + if let (Ok(bot_token), Ok(chat_id)) = ( env::var(format!("{}TELEGRAM_BOT_TOKEN", env_prefix)), env::var(format!("{}TELEGRAM_CHAT_ID", env_prefix)), ) { - Some(TelegramWebHook { bot_token, chat_id }) - } else { - info!("Telegram notifications disabled"); - None - }; - let twilio_webhook = get_twilio_config() - .map_err(|err| panic!("Twilio config error: {}", err)) - .unwrap(); + notifiers.push(NotificationType::Telegram(TelegramWebHook { + bot_token, + chat_id, + })); + } + + if let Ok(Some(webhook)) = get_twilio_config() { + notifiers.push(NotificationType::Twilio(webhook)); + } + + if let Ok(log_level) = env::var(format!("{}LOG_NOTIFIER_LEVEL", env_prefix)) { + match Level::from_str(&log_level) { + Ok(level) => notifiers.push(NotificationType::Log(level)), + Err(e) => warn!( + "could not parse specified log notifier level string ({}): {}", + log_level, e + ), + } + } + + info!("{} notifiers", notifiers.len()); Notifier { client: Client::new(), - discord_webhook, - slack_webhook, - telegram_webhook, - twilio_webhook, + notifiers, } } + pub fn is_empty(&self) -> bool { + self.notifiers.is_empty() + } + pub fn send(&self, msg: &str) { - if let Some(webhook) = &self.discord_webhook { - for line in msg.split('\n') { - // Discord rate limiting is aggressive, limit to 1 message a second - sleep(Duration::from_millis(1000)); - - info!("Sending {}", line); - let data = json!({ "content": line }); - - loop { - let response = self.client.post(webhook).json(&data).send(); - - if let Err(err) = response { - warn!("Failed to send Discord message: \"{}\": {:?}", line, err); - break; - } else if let Ok(response) = response { - info!("response status: {}", response.status()); - if response.status() == StatusCode::TOO_MANY_REQUESTS { - warn!("rate limited!..."); - warn!("response text: {:?}", response.text()); - sleep(Duration::from_secs(2)); - } else { - break; + for notifier in &self.notifiers { + match notifier { + NotificationType::Discord(webhook) => { + for line in msg.split('\n') { + // Discord rate limiting is aggressive, limit to 1 message a second + sleep(Duration::from_millis(1000)); + + info!("Sending {}", line); + let data = json!({ "content": line }); + + loop { + let response = self.client.post(webhook).json(&data).send(); + + if let Err(err) = response { + warn!("Failed to send Discord message: \"{}\": {:?}", line, err); + break; + } else if let Ok(response) = response { + info!("response status: {}", response.status()); + if response.status() == StatusCode::TOO_MANY_REQUESTS { + warn!("rate limited!..."); + warn!("response text: {:?}", response.text()); + sleep(Duration::from_secs(2)); + } else { + break; + } + } } } } - } - } - - if let Some(webhook) = &self.slack_webhook { - let data = json!({ "text": msg }); - if let Err(err) = self.client.post(webhook).json(&data).send() { - warn!("Failed to send Slack message: {:?}", err); - } - } + NotificationType::Slack(webhook) => { + let data = json!({ "text": msg }); + if let Err(err) = self.client.post(webhook).json(&data).send() { + warn!("Failed to send Slack message: {:?}", err); + } + } - if let Some(TelegramWebHook { chat_id, bot_token }) = &self.telegram_webhook { - let data = json!({ "chat_id": chat_id, "text": msg }); - let url = format!("https://api.telegram.org/bot{}/sendMessage", bot_token); + NotificationType::Telegram(TelegramWebHook { chat_id, bot_token }) => { + let data = json!({ "chat_id": chat_id, "text": msg }); + let url = format!("https://api.telegram.org/bot{}/sendMessage", bot_token); - if let Err(err) = self.client.post(&url).json(&data).send() { - warn!("Failed to send Telegram message: {:?}", err); - } - } + if let Err(err) = self.client.post(&url).json(&data).send() { + warn!("Failed to send Telegram message: {:?}", err); + } + } - if let Some(TwilioWebHook { - account, - token, - to, - from, - }) = &self.twilio_webhook - { - let url = format!( - "https://{}:{}@api.twilio.com/2010-04-01/Accounts/{}/Messages.json", - account, token, account - ); - let params = [("To", to), ("From", from), ("Body", &msg.to_string())]; - if let Err(err) = self.client.post(&url).form(¶ms).send() { - warn!("Failed to send Twilio message: {:?}", err); + NotificationType::Twilio(TwilioWebHook { + account, + token, + to, + from, + }) => { + let url = format!( + "https://{}:{}@api.twilio.com/2010-04-01/Accounts/{}/Messages.json", + account, token, account + ); + let params = [("To", to), ("From", from), ("Body", &msg.to_string())]; + if let Err(err) = self.client.post(&url).form(¶ms).send() { + warn!("Failed to send Twilio message: {:?}", err); + } + } + NotificationType::Log(level) => { + log!(*level, "{}", msg) + } } } } diff --git a/perf/Cargo.toml b/perf/Cargo.toml index b64f746f6a..0505f96292 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-perf" -version = "1.5.19" +version = "1.6.14" description = "Solana Performance APIs" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,17 +13,17 @@ edition = "2018" rand = "0.7.0" dlopen = "0.1.8" bincode = "1.3.1" -rayon = "1.4.0" -serde = "1.0.118" +rayon = "1.5.0" +serde = "1.0.122" dlopen_derive = "0.1.4" lazy_static = "1.4.0" log = "0.4.11" -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.5.19" } -solana-budget-program = { path = "../programs/budget", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-measure = { path = "../measure", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.6.14" } +solana-budget-program = { path = "../programs/budget", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-measure = { path = "../measure", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } curve25519-dalek = { version = "2" } [lib] diff --git a/poh-bench/Cargo.toml b/poh-bench/Cargo.toml index 9230c9e876..6c1891befe 100644 --- a/poh-bench/Cargo.toml +++ b/poh-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-poh-bench" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -12,14 +12,14 @@ documentation = "https://docs.rs/solana-poh-bench" clap = "2.33.1" log = "0.4.11" rand = "0.7.0" -rayon = "1.4.0" -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-ledger = { path = "../ledger", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-measure = { path = "../measure", version = "=1.5.19" } -solana-version = { path = "../version" } -solana-perf = { path = "../perf", version = "=1.5.19" } +rayon = "1.5.0" +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-ledger = { path = "../ledger", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-measure = { path = "../measure", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } +solana-perf = { path = "../perf", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index 7063376f49..af583cc476 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -5,29 +5,28 @@ edition = "2018" license = "Apache-2.0" name = "solana-program-test" repository = "https://github.com/solana-labs/solana" -version = "1.5.19" +version = "1.6.14" [dependencies] async-trait = "0.1.42" base64 = "0.12.3" +bincode = "1.3.1" chrono = "0.4.19" chrono-humanize = "0.1.1" log = "0.4.11" mio = "0.7.6" -solana-banks-client = { path = "../banks-client", version = "=1.5.19" } -solana-banks-server = { path = "../banks-server", version = "=1.5.19" } -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-program = { path = "../sdk/program", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-vote-program = { path = "../programs/vote", version = "=1.5.19" } +serde = "1.0.112" +serde_derive = "1.0.103" +solana-banks-client = { path = "../banks-client", version = "=1.6.14" } +solana-banks-server = { path = "../banks-server", version = "=1.6.14" } +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-vote-program = { path = "../programs/vote", version = "=1.6.14" } thiserror = "1.0" -tokio = { version = "0.3.5", features = ["full"] } +tokio = { version = "1", features = ["full"] } [dev-dependencies] -solana-stake-program = { path = "../programs/stake", version = "1.5.14" } - -[features] -default = [] -multiple-db-in-thread = [] +assert_matches = "1.3.0" +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index b4bff8fb3d..1f98560cec 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -7,27 +7,39 @@ use { log::*, solana_banks_client::start_client, solana_banks_server::banks_server::start_local_server, - solana_program::{ - account_info::AccountInfo, entrypoint::ProgramResult, fee_calculator::FeeCalculator, - hash::Hash, instruction::Instruction, instruction::InstructionError, message::Message, - native_token::sol_to_lamports, program_error::ProgramError, program_stubs, pubkey::Pubkey, - rent::Rent, - }, solana_runtime::{ bank::{Bank, Builtin, ExecuteTimings}, bank_forks::BankForks, commitment::BlockCommitmentCache, - genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, + genesis_utils::{create_genesis_config_with_leader_ex, GenesisConfigInfo}, }, solana_sdk::{ - account::Account, - clock::Slot, - genesis_config::GenesisConfig, + account::{Account, AccountSharedData, ReadableAccount}, + account_info::AccountInfo, + clock::{Clock, Slot}, + entrypoint::{ProgramResult, SUCCESS}, + epoch_schedule::EpochSchedule, + feature_set::demote_sysvar_write_locks, + fee_calculator::{FeeCalculator, FeeRateGovernor}, + genesis_config::{ClusterType, GenesisConfig}, + hash::Hash, + instruction::Instruction, + instruction::InstructionError, keyed_account::KeyedAccount, + message::Message, + native_token::sol_to_lamports, process_instruction::{ stable_log, BpfComputeBudget, InvokeContext, ProcessInstructionWithContext, }, + program_error::{ProgramError, ACCOUNT_BORROW_FAILED, UNSUPPORTED_SYSVAR}, + pubkey::Pubkey, + rent::Rent, signature::{Keypair, Signer}, + sysvar::{ + clock, epoch_schedule, + fees::{self, Fees}, + rent, Sysvar, + }, }, solana_vote_program::vote_state::{VoteState, VoteStateVersions}, std::{ @@ -60,27 +72,6 @@ pub mod programs; #[macro_use] extern crate solana_bpf_loader_program; -pub fn to_instruction_error(error: ProgramError) -> InstructionError { - match error { - ProgramError::Custom(err) => InstructionError::Custom(err), - ProgramError::InvalidArgument => InstructionError::InvalidArgument, - ProgramError::InvalidInstructionData => InstructionError::InvalidInstructionData, - ProgramError::InvalidAccountData => InstructionError::InvalidAccountData, - ProgramError::AccountDataTooSmall => InstructionError::AccountDataTooSmall, - ProgramError::InsufficientFunds => InstructionError::InsufficientFunds, - ProgramError::IncorrectProgramId => InstructionError::IncorrectProgramId, - ProgramError::MissingRequiredSignature => InstructionError::MissingRequiredSignature, - ProgramError::AccountAlreadyInitialized => InstructionError::AccountAlreadyInitialized, - ProgramError::UninitializedAccount => InstructionError::UninitializedAccount, - ProgramError::NotEnoughAccountKeys => InstructionError::NotEnoughAccountKeys, - ProgramError::AccountBorrowFailed => InstructionError::AccountBorrowFailed, - ProgramError::MaxSeedLengthExceeded => InstructionError::MaxSeedLengthExceeded, - ProgramError::InvalidSeeds => InstructionError::InvalidSeeds, - ProgramError::BorshIoError(err) => InstructionError::BorshIoError(err), - ProgramError::AccountNotRentExempt => InstructionError::AccountNotRentExempt, - } -} - /// Errors from the program test environment #[derive(Error, Debug, PartialEq)] pub enum ProgramTestError { @@ -106,7 +97,7 @@ fn get_invoke_context<'a>() -> &'a mut dyn InvokeContext { } pub fn builtin_process_instruction( - process_instruction: solana_program::entrypoint::ProcessInstruction, + process_instruction: solana_sdk::entrypoint::ProcessInstruction, program_id: &Pubkey, keyed_accounts: &[KeyedAccount], input: &[u8], @@ -115,7 +106,7 @@ pub fn builtin_process_instruction( set_invoke_context(invoke_context); // Copy all the accounts into a HashMap to ensure there are no duplicates - let mut accounts: HashMap = keyed_accounts + let mut accounts: HashMap = keyed_accounts .iter() .map(|ka| (*ka.unsigned_key(), ka.account.borrow().clone())) .collect(); @@ -155,21 +146,18 @@ pub fn builtin_process_instruction( .collect(); // Execute the program - let result = - process_instruction(program_id, &account_infos, input).map_err(to_instruction_error); - - if result.is_ok() { - // Commit AccountInfo changes back into KeyedAccounts - for keyed_account in keyed_accounts { - let mut account = keyed_account.account.borrow_mut(); - let key = keyed_account.unsigned_key(); - let (lamports, data, _owner) = &account_refs[key]; - account.lamports = **lamports.borrow(); - account.data = data.borrow().to_vec(); - } + process_instruction(program_id, &account_infos, input).map_err(u64::from)?; + + // Commit AccountInfo changes back into KeyedAccounts + for keyed_account in keyed_accounts { + let mut account = keyed_account.account.borrow_mut(); + let key = keyed_account.unsigned_key(); + let (lamports, data, _owner) = &account_refs[key]; + account.lamports = **lamports.borrow(); + account.set_data(data.borrow().to_vec()); } - result + Ok(()) } /// Converts a `solana-program`-style entrypoint into the runtime's entrypoint style, for @@ -194,8 +182,45 @@ macro_rules! processor { }; } +fn get_sysvar( + id: &Pubkey, + var_addr: *mut u8, +) -> u64 { + let invoke_context = get_invoke_context(); + + let sysvar_data = match invoke_context.get_sysvar_data(id).ok_or_else(|| { + ic_msg!(invoke_context, "Unable to get Sysvar {}", id); + UNSUPPORTED_SYSVAR + }) { + Ok(sysvar_data) => sysvar_data, + Err(err) => return err, + }; + + let var: T = match bincode::deserialize(&sysvar_data) { + Ok(sysvar_data) => sysvar_data, + Err(_) => return UNSUPPORTED_SYSVAR, + }; + + unsafe { + *(var_addr as *mut _ as *mut T) = var; + } + + if invoke_context + .get_compute_meter() + .try_borrow_mut() + .map_err(|_| ACCOUNT_BORROW_FAILED) + .unwrap() + .consume(invoke_context.get_bpf_compute_budget().sysvar_base_cost + T::size_of() as u64) + .is_err() + { + panic!("Exceeded compute budget"); + } + + SUCCESS +} + struct SyscallStubs {} -impl program_stubs::SyscallStubs for SyscallStubs { +impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { fn sol_log(&self, message: &str) { let invoke_context = get_invoke_context(); let logger = invoke_context.get_logger(); @@ -231,26 +256,31 @@ impl program_stubs::SyscallStubs for SyscallStubs { } panic!("Program id {} wasn't found in account_infos", program_id); }; + let demote_sysvar_write_locks = + invoke_context.is_feature_active(&demote_sysvar_write_locks::id()); // TODO don't have the caller's keyed_accounts so can't validate writer or signer escalation or deescalation yet let caller_privileges = message .account_keys .iter() .enumerate() - .map(|(i, _)| message.is_writable(i)) + .map(|(i, _)| message.is_writable(i, demote_sysvar_write_locks)) .collect::>(); stable_log::program_invoke(&logger, &program_id, invoke_context.invoke_depth()); - fn ai_to_a(ai: &AccountInfo) -> Account { - Account { + fn ai_to_a(ai: &AccountInfo) -> AccountSharedData { + AccountSharedData::from(Account { lamports: ai.lamports(), data: ai.try_borrow_data().unwrap().to_vec(), owner: *ai.owner, executable: ai.executable, rent_epoch: ai.rent_epoch, - } + }) } - let executables = vec![(program_id, RefCell::new(ai_to_a(program_account_info())))]; + let executables = vec![( + program_id, + Rc::new(RefCell::new(ai_to_a(program_account_info()))), + )]; // Convert AccountInfos into Accounts let mut accounts = vec![]; @@ -304,7 +334,7 @@ impl program_stubs::SyscallStubs for SyscallStubs { // Copy writeable account modifications back into the caller's AccountInfos for (i, account_pubkey) in message.account_keys.iter().enumerate() { - if !message.is_writable(i) { + if !message.is_writable(i, true) { continue; } @@ -314,7 +344,8 @@ impl program_stubs::SyscallStubs for SyscallStubs { **account_info.try_borrow_mut_lamports().unwrap() = account.borrow().lamports; let mut data = account_info.try_borrow_mut_data()?; - let new_data = &account.borrow().data; + let account_borrow = account.borrow(); + let new_data = account_borrow.data(); if *account_info.owner != account.borrow().owner { // TODO Figure out a better way to allow the System Program to set the account owner #[allow(clippy::transmute_ptr_to_ptr)] @@ -340,9 +371,35 @@ impl program_stubs::SyscallStubs for SyscallStubs { stable_log::program_success(&logger, &program_id); Ok(()) } + + fn sol_get_clock_sysvar(&self, var_addr: *mut u8) -> u64 { + get_sysvar::(&clock::id(), var_addr) + } + + fn sol_get_epoch_schedule_sysvar(&self, var_addr: *mut u8) -> u64 { + get_sysvar::(&epoch_schedule::id(), var_addr) + } + + fn sol_get_fees_sysvar(&self, var_addr: *mut u8) -> u64 { + get_sysvar::(&fees::id(), var_addr) + } + + fn sol_get_rent_sysvar(&self, var_addr: *mut u8) -> u64 { + get_sysvar::(&rent::id(), var_addr) + } } pub fn find_file(filename: &str) -> Option { + for dir in default_shared_object_dirs() { + let candidate = dir.join(&filename); + if candidate.exists() { + return Some(candidate); + } + } + None +} + +fn default_shared_object_dirs() -> Vec { let mut search_path = vec![]; if let Ok(bpf_out_dir) = std::env::var("BPF_OUT_DIR") { search_path.push(PathBuf::from(bpf_out_dir)); @@ -351,15 +408,8 @@ pub fn find_file(filename: &str) -> Option { if let Ok(dir) = std::env::current_dir() { search_path.push(dir); } - trace!("search path: {:?}", search_path); - - for path in search_path { - let candidate = path.join(&filename); - if candidate.exists() { - return Some(candidate); - } - } - None + trace!("BPF .so search path: {:?}", search_path); + search_path } pub fn read_file>(path: P) -> Vec { @@ -413,7 +463,7 @@ fn setup_fee_calculator(bank: Bank) -> Bank { } pub struct ProgramTest { - accounts: Vec<(Pubkey, Account)>, + accounts: Vec<(Pubkey, AccountSharedData)>, builtins: Vec, bpf_compute_max_units: Option, prefer_bpf: bool, @@ -451,6 +501,13 @@ impl Default for ProgramTest { } impl ProgramTest { + /// Create a `ProgramTest`. + /// + /// This is a wrapper around [`default`] and [`add_program`]. See their documentation for more + /// details. + /// + /// [`default`]: #method.default + /// [`add_program`]: #method.add_program pub fn new( program_name: &str, program_id: Pubkey, @@ -473,7 +530,8 @@ impl ProgramTest { /// Add an account to the test environment pub fn add_account(&mut self, address: Pubkey, account: Account) { - self.accounts.push((address, account)); + self.accounts + .push((address, AccountSharedData::from(account))); } /// Add an account to the test environment with the account data in the provided `filename` @@ -522,7 +580,7 @@ impl ProgramTest { /// Add a BPF program to the test environment. /// - /// `program_name` will also used to locate the BPF shared object in the current or fixtures + /// `program_name` will also be used to locate the BPF shared object in the current or fixtures /// directory. /// /// If `process_instruction` is provided, the natively built-program may be used instead of the @@ -533,20 +591,7 @@ impl ProgramTest { program_id: Pubkey, process_instruction: Option, ) { - let loader = solana_program::bpf_loader::id(); - let program_file = find_file(&format!("{}.so", program_name)); - - if process_instruction.is_none() && program_file.is_none() { - panic!("Unable to add program {} ({})", program_name, program_id); - } - - if (program_file.is_some() && self.prefer_bpf) || process_instruction.is_none() { - let program_file = program_file.unwrap_or_else(|| { - panic!( - "Program file data not available for {} ({})", - program_name, program_id - ); - }); + let add_bpf = |this: &mut ProgramTest, program_file: PathBuf| { let data = read_file(&program_file); info!( "\"{}\" BPF program from {}{}", @@ -570,28 +615,87 @@ impl ProgramTest { .unwrap_or_else(|| "".to_string()) ); - self.add_account( + this.add_account( program_id, Account { lamports: Rent::default().minimum_balance(data.len()).min(1), data, - owner: loader, + owner: solana_sdk::bpf_loader::id(), executable: true, rent_epoch: 0, }, ); - } else { + }; + + let add_native = |this: &mut ProgramTest, process_fn: ProcessInstructionWithContext| { info!("\"{}\" program loaded as native code", program_name); - self.builtins.push(Builtin::new( + this.builtins + .push(Builtin::new(program_name, program_id, process_fn)); + }; + + let warn_invalid_program_name = || { + let valid_program_names = default_shared_object_dirs() + .iter() + .filter_map(|dir| dir.read_dir().ok()) + .flat_map(|read_dir| { + read_dir.filter_map(|entry| { + let path = entry.ok()?.path(); + if !path.is_file() { + return None; + } + match path.extension()?.to_str()? { + "so" => Some(path.file_stem()?.to_os_string()), + _ => None, + } + }) + }) + .collect::>(); + + if valid_program_names.is_empty() { + // This should be unreachable as `test-bpf` should guarantee at least one shared + // object exists somewhere. + warn!("No BPF shared objects found."); + return; + } + + warn!( + "Possible bogus program name. Ensure the program name ({}) \ + matches one of the following recognizable program names:", program_name, - program_id, - process_instruction.unwrap_or_else(|| { - panic!( - "Program processor not available for {} ({})", - program_name, program_id - ); - }), - )); + ); + for name in valid_program_names { + warn!(" - {}", name.to_str().unwrap()); + } + }; + + let program_file = find_file(&format!("{}.so", program_name)); + match (self.prefer_bpf, program_file, process_instruction) { + // If BPF is preferred (i.e., `test-bpf` is invoked) and a BPF shared object exists, + // use that as the program data. + (true, Some(file), _) => add_bpf(self, file), + + // If BPF is not required (i.e., we were invoked with `test`), use the provided + // processor function as is. + // + // TODO: figure out why tests hang if a processor panics when running native code. + (false, _, Some(process)) => add_native(self, process), + + // Invalid: `test-bpf` invocation with no matching BPF shared object. + (true, None, _) => { + warn_invalid_program_name(); + panic!( + "Program file data not available for {} ({})", + program_name, program_id + ); + } + + // Invalid: regular `test` invocation without a processor. + (false, _, None) => { + panic!( + "Program processor not available for {} ({})", + program_name, program_id + ); + } } } @@ -608,24 +712,33 @@ impl ProgramTest { static ONCE: Once = Once::new(); ONCE.call_once(|| { - program_stubs::set_syscall_stubs(Box::new(SyscallStubs {})); + solana_sdk::program_stubs::set_syscall_stubs(Box::new(SyscallStubs {})); }); } let rent = Rent::default(); + let fee_rate_governor = FeeRateGovernor::default(); let bootstrap_validator_pubkey = Pubkey::new_unique(); - let bootstrap_validator_lamports = rent.minimum_balance(VoteState::size_of()); + let bootstrap_validator_stake_lamports = + rent.minimum_balance(VoteState::size_of()) + sol_to_lamports(1_000_000.0); + + let mint_keypair = Keypair::new(); + let voting_keypair = Keypair::new(); - let mut gci = create_genesis_config_with_leader( + let genesis_config = create_genesis_config_with_leader_ex( sol_to_lamports(1_000_000.0), + &mint_keypair.pubkey(), &bootstrap_validator_pubkey, - bootstrap_validator_lamports, + &voting_keypair.pubkey(), + &Pubkey::new_unique(), + bootstrap_validator_stake_lamports, + 42, + fee_rate_governor, + rent, + ClusterType::Development, + vec![], ); - let genesis_config = &mut gci.genesis_config; - genesis_config.rent = rent; - genesis_config.fee_rate_governor = - solana_program::fee_calculator::FeeRateGovernor::default(); - debug!("Payer address: {}", gci.mint_keypair.pubkey()); + debug!("Payer address: {}", mint_keypair.pubkey()); debug!("Genesis config: {}", genesis_config); let mut bank = Bank::new(&genesis_config); @@ -633,6 +746,7 @@ impl ProgramTest { for loader in &[ solana_bpf_loader_deprecated_program!(), solana_bpf_loader_program!(), + solana_bpf_loader_upgradeable_program!(), ] { bank.add_builtin(&loader.0, loader.1, loader.2); } @@ -672,7 +786,16 @@ impl ProgramTest { BlockCommitmentCache::new_for_tests_with_slots(slot, slot), )); - (bank_forks, block_commitment_cache, last_blockhash, gci) + ( + bank_forks, + block_commitment_cache, + last_blockhash, + GenesisConfigInfo { + genesis_config, + mint_keypair, + voting_keypair, + }, + ) } pub async fn start(self) -> (BanksClient, Keypair, Hash) { diff --git a/program-test/src/programs.rs b/program-test/src/programs.rs index f27cfa182d..9854834476 100644 --- a/program-test/src/programs.rs +++ b/program-test/src/programs.rs @@ -1,4 +1,8 @@ -use solana_sdk::{account::Account, pubkey::Pubkey, rent::Rent}; +use solana_sdk::{ + account::{Account, AccountSharedData}, + pubkey::Pubkey, + rent::Rent, +}; mod spl_token { solana_sdk::declare_id!("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA"); @@ -29,19 +33,19 @@ static SPL_PROGRAMS: &[(Pubkey, &[u8])] = &[ ), ]; -pub fn spl_programs(rent: &Rent) -> Vec<(Pubkey, Account)> { +pub fn spl_programs(rent: &Rent) -> Vec<(Pubkey, AccountSharedData)> { SPL_PROGRAMS .iter() .map(|(program_id, elf)| { ( *program_id, - Account { + AccountSharedData::from(Account { lamports: rent.minimum_balance(elf.len()).min(1), data: elf.to_vec(), - owner: solana_program::bpf_loader::id(), + owner: solana_sdk::bpf_loader::id(), executable: true, rent_epoch: 0, - }, + }), ) }) .collect() diff --git a/program-test/tests/builtins.rs b/program-test/tests/builtins.rs new file mode 100644 index 0000000000..a350506430 --- /dev/null +++ b/program-test/tests/builtins.rs @@ -0,0 +1,44 @@ +use solana_sdk::{ + bpf_loader_upgradeable::{self, UpgradeableLoaderState}, + signature::{Keypair, Signer}, + transaction::Transaction, +}; + +use solana_program_test::ProgramTest; + +#[tokio::test] +async fn test_bpf_loader_upgradable_present() { + // Arrange + let (mut banks_client, payer, recent_blockhash) = ProgramTest::default().start().await; + + let buffer_keypair = Keypair::new(); + let upgrade_authority_keypair = Keypair::new(); + + let rent = banks_client.get_rent().await.unwrap(); + let buffer_rent = rent.minimum_balance(UpgradeableLoaderState::programdata_len(1).unwrap()); + + let create_buffer_instructions = bpf_loader_upgradeable::create_buffer( + &payer.pubkey(), + &buffer_keypair.pubkey(), + &upgrade_authority_keypair.pubkey(), + buffer_rent, + 1, + ) + .unwrap(); + + let mut transaction = + Transaction::new_with_payer(&create_buffer_instructions[..], Some(&payer.pubkey())); + transaction.sign(&[&payer, &buffer_keypair], recent_blockhash); + + // Act + banks_client.process_transaction(transaction).await.unwrap(); + + // Assert + let buffer_account = banks_client + .get_account(buffer_keypair.pubkey()) + .await + .unwrap() + .unwrap(); + + assert_eq!(buffer_account.owner, bpf_loader_upgradeable::id()); +} diff --git a/program-test/tests/cpi.rs b/program-test/tests/cpi.rs index 029919e1df..373d3adcd3 100644 --- a/program-test/tests/cpi.rs +++ b/program-test/tests/cpi.rs @@ -1,14 +1,15 @@ use { - solana_program::{ + solana_program_test::{processor, ProgramTest}, + solana_sdk::{ account_info::{next_account_info, AccountInfo}, entrypoint::ProgramResult, instruction::{AccountMeta, Instruction}, msg, program::invoke, pubkey::Pubkey, + signature::Signer, + transaction::Transaction, }, - solana_program_test::{processor, ProgramTest}, - solana_sdk::{signature::Signer, transaction::Transaction}, }; // Process instruction to invoke into another program @@ -22,7 +23,7 @@ fn invoker_process_instruction( let account_info_iter = &mut accounts.iter(); let invoked_program_info = next_account_info(account_info_iter)?; invoke( - &Instruction::new(*invoked_program_info.key, &[0], vec![]), + &Instruction::new_with_bincode(*invoked_program_info.key, &[0], vec![]), &[invoked_program_info.clone()], )?; msg!("Processing invoker instruction after CPI"); @@ -42,24 +43,22 @@ fn invoked_process_instruction( } #[tokio::test] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] async fn cpi() { let invoker_program_id = Pubkey::new_unique(); - // Initialize and start the test network let mut program_test = ProgramTest::new( - "program-test-fuzz-invoker", + "invoker", invoker_program_id, processor!(invoker_process_instruction), ); let invoked_program_id = Pubkey::new_unique(); program_test.add_program( - "program-test-fuzz-invoked", + "invoked", invoked_program_id, processor!(invoked_process_instruction), ); - let mut test_state = program_test.start_with_context().await; - let instructions = vec![Instruction::new( + let mut context = program_test.start_with_context().await; + let instructions = vec![Instruction::new_with_bincode( invoker_program_id, &[0], vec![AccountMeta::new_readonly(invoked_program_id, false)], @@ -67,12 +66,12 @@ async fn cpi() { let transaction = Transaction::new_signed_with_payer( &instructions, - Some(&test_state.payer.pubkey()), - &[&test_state.payer], - test_state.last_blockhash, + Some(&context.payer.pubkey()), + &[&context.payer], + context.last_blockhash, ); - test_state + context .banks_client .process_transaction(transaction) .await diff --git a/program-test/tests/fuzz.rs b/program-test/tests/fuzz.rs index f4d494e4bc..20b0f018b5 100644 --- a/program-test/tests/fuzz.rs +++ b/program-test/tests/fuzz.rs @@ -1,11 +1,11 @@ use { solana_banks_client::BanksClient, - solana_program::{ + solana_program_test::{processor, ProgramTest}, + solana_sdk::{ account_info::AccountInfo, entrypoint::ProgramResult, hash::Hash, instruction::Instruction, - msg, pubkey::Pubkey, rent::Rent, system_instruction, + msg, pubkey::Pubkey, rent::Rent, signature::Keypair, signature::Signer, system_instruction, + transaction::Transaction, }, - solana_program_test::{processor, ProgramTest}, - solana_sdk::{signature::Keypair, signature::Signer, transaction::Transaction}, }; #[allow(clippy::unnecessary_wraps)] @@ -19,7 +19,6 @@ fn process_instruction( } #[test] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn simulate_fuzz() { let rt = tokio::runtime::Runtime::new().unwrap(); let program_id = Pubkey::new_unique(); @@ -48,7 +47,6 @@ fn simulate_fuzz() { } #[test] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] fn simulate_fuzz_with_context() { let rt = tokio::runtime::Runtime::new().unwrap(); let program_id = Pubkey::new_unique(); @@ -59,16 +57,16 @@ fn simulate_fuzz_with_context() { processor!(process_instruction), ); - let mut test_state = rt.block_on(async { program_test.start_with_context().await }); + let mut context = rt.block_on(async { program_test.start_with_context().await }); // the honggfuzz `fuzz!` macro does not allow for async closures, // so we have to use the runtime directly to run async functions rt.block_on(async { run_fuzz_instructions( &[1, 2, 3, 4, 5], - &mut test_state.banks_client, - &test_state.payer, - test_state.last_blockhash, + &mut context.banks_client, + &context.payer, + context.last_blockhash, &program_id, ) .await @@ -94,7 +92,7 @@ async fn run_fuzz_instructions( program_id, ); instructions.push(instruction); - instructions.push(Instruction::new(*program_id, &[0], vec![])); + instructions.push(Instruction::new_with_bincode(*program_id, &[0], vec![])); signer_keypairs.push(keypair); } // Process transaction on test network diff --git a/program-test/tests/sysvar.rs b/program-test/tests/sysvar.rs new file mode 100644 index 0000000000..9710523e81 --- /dev/null +++ b/program-test/tests/sysvar.rs @@ -0,0 +1,72 @@ +use { + solana_program_test::{processor, ProgramTest}, + solana_sdk::{ + account_info::AccountInfo, + clock::Clock, + entrypoint::ProgramResult, + epoch_schedule::EpochSchedule, + fee_calculator::FeeCalculator, + instruction::Instruction, + msg, + pubkey::Pubkey, + rent::Rent, + signature::Signer, + sysvar::{fees::Fees, Sysvar}, + transaction::Transaction, + }, +}; + +// Process instruction to invoke into another program +fn sysvar_getter_process_instruction( + _program_id: &Pubkey, + _accounts: &[AccountInfo], + _input: &[u8], +) -> ProgramResult { + msg!("sysvar_getter"); + + let clock = Clock::get()?; + assert_eq!(42, clock.slot); + + let epoch_schedule = EpochSchedule::get()?; + assert_eq!(epoch_schedule, EpochSchedule::default()); + + let fees = Fees::get()?; + assert_eq!( + fees.fee_calculator, + FeeCalculator { + lamports_per_signature: 5000 + } + ); + + let rent = Rent::get()?; + assert_eq!(rent, Rent::default()); + + Ok(()) +} + +#[tokio::test] +async fn get_sysvar() { + let program_id = Pubkey::new_unique(); + let program_test = ProgramTest::new( + "sysvar_getter", + program_id, + processor!(sysvar_getter_process_instruction), + ); + + let mut context = program_test.start_with_context().await; + context.warp_to_slot(42).unwrap(); + let instructions = vec![Instruction::new_with_bincode(program_id, &(), vec![])]; + + let transaction = Transaction::new_signed_with_payer( + &instructions, + Some(&context.payer.pubkey()), + &[&context.payer], + context.last_blockhash, + ); + + context + .banks_client + .process_transaction(transaction) + .await + .unwrap(); +} diff --git a/program-test/tests/warp.rs b/program-test/tests/warp.rs index 3aa8d583fe..a4e55df785 100644 --- a/program-test/tests/warp.rs +++ b/program-test/tests/warp.rs @@ -1,6 +1,9 @@ #![allow(clippy::integer_arithmetic)] use { - solana_program::{ + assert_matches::assert_matches, + bincode::deserialize, + solana_program_test::{processor, ProgramTest, ProgramTestError}, + solana_sdk::{ account_info::{next_account_info, AccountInfo}, clock::Clock, entrypoint::ProgramResult, @@ -8,17 +11,18 @@ use { program_error::ProgramError, pubkey::Pubkey, rent::Rent, - system_instruction, system_program, - sysvar::{clock, Sysvar}, - }, - solana_program_test::{processor, ProgramTest, ProgramTestError}, - solana_sdk::{ signature::{Keypair, Signer}, + system_instruction, system_program, + sysvar::{ + clock, + stake_history::{self, StakeHistory}, + Sysvar, + }, transaction::{Transaction, TransactionError}, }, solana_stake_program::{ stake_instruction, - stake_state::{Authorized, Lockup}, + stake_state::{Authorized, Lockup, StakeState}, }, solana_vote_program::{ vote_instruction, @@ -47,7 +51,6 @@ fn process_instruction( } #[tokio::test] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] async fn clock_sysvar_updated_from_warp() { let program_id = Pubkey::new_unique(); // Initialize and start the test network @@ -59,7 +62,7 @@ async fn clock_sysvar_updated_from_warp() { let mut context = program_test.start_with_context().await; let expected_slot = 5_000_000; - let instruction = Instruction::new( + let instruction = Instruction::new_with_bincode( program_id, &expected_slot, vec![AccountMeta::new_readonly(clock::id(), false)], @@ -84,7 +87,7 @@ async fn clock_sysvar_updated_from_warp() { // Warp to success! context.warp_to_slot(expected_slot).unwrap(); - let instruction = Instruction::new( + let instruction = Instruction::new_with_bincode( program_id, &expected_slot, vec![AccountMeta::new_readonly(clock::id(), false)], @@ -109,7 +112,6 @@ async fn clock_sysvar_updated_from_warp() { } #[tokio::test] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] async fn rent_collected_from_warp() { let program_id = Pubkey::new_unique(); // Initialize and start the test network @@ -161,12 +163,13 @@ async fn rent_collected_from_warp() { } #[tokio::test] -#[cfg_attr(not(feature = "multiple-db-in-thread"), ignore)] async fn stake_rewards_from_warp() { // Initialize and start the test network let program_test = ProgramTest::default(); let mut context = program_test.start_with_context().await; + // warp once to make sure stake config doesn't get rent-collected + context.warp_to_slot(100).unwrap(); let mut instructions = vec![]; let validator_keypair = Keypair::new(); instructions.push(system_instruction::create_account( @@ -251,4 +254,30 @@ async fn stake_rewards_from_warp() { .expect("account exists") .unwrap(); assert!(account.lamports > stake_lamports); + + // check that stake is fully active + let stake_history_account = context + .banks_client + .get_account(stake_history::id()) + .await + .expect("account exists") + .unwrap(); + + let clock_account = context + .banks_client + .get_account(clock::id()) + .await + .expect("account exists") + .unwrap(); + + let stake_state: StakeState = deserialize(&account.data).unwrap(); + let stake_history: StakeHistory = deserialize(&stake_history_account.data).unwrap(); + let clock: Clock = deserialize(&clock_account.data).unwrap(); + let stake = stake_state.stake().unwrap(); + assert_matches!( + stake + .delegation + .stake_activating_and_deactivating(clock.epoch, Some(&stake_history), true,), + (_, 0, 0) + ); } diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 917c7c56a5..adb455157b 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -5,20 +5,25 @@ name = "Inflector" version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = ["lazy_static", "regex"] +dependencies = [ + "lazy_static", + "regex", +] [[package]] name = "addr2line" -version = "0.14.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" -dependencies = ["gimli"] +checksum = "a49806b9dadc843c61e7c97e72490ad7f7220ae249012fbda9ad0609457c0543" +dependencies = [ + "gimli", +] [[package]] -name = "adler" -version = "1.0.2" +name = "adler32" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "567b077b825e468cc974f0020d4082ee6e03132512f207ef1a02fd5d00d1f32d" [[package]] name = "ahash" @@ -28,23 +33,27 @@ checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" -dependencies = ["memchr"] +checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" +dependencies = [ + "memchr", +] [[package]] name = "ansi_term" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -dependencies = ["winapi 0.3.9"] +dependencies = [ + "winapi 0.3.8", +] [[package]] name = "anyhow" -version = "1.0.40" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b2cd92db5cbd74e8e5028f7e27dd7aa3090e89e4f2a197cc7c8dfb69c7063b" +checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" [[package]] name = "arrayref" @@ -54,9 +63,9 @@ checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] name = "arrayvec" -version = "0.5.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" [[package]] name = "ascii" @@ -66,69 +75,39 @@ checksum = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e" [[package]] name = "assert_matches" -version = "1.5.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" +checksum = "695579f0f2520f3774bb40461e5adb066459d4e0af4d59d20175484fb8e9edf1" [[package]] name = "atty" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = ["hermit-abi", "libc", "winapi 0.3.9"] - -[[package]] -name = "auto_enums" -version = "0.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0dfe45d75158751e195799f47ea02e81f570aa24bc5ef999cdd9e888c4b5c3" -dependencies = ["auto_enums_core", "auto_enums_derive"] - -[[package]] -name = "auto_enums_core" -version = "0.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da47c46001293a2c4b744d731958be22cff408a2ab76e2279328f9713b1267b4" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] - -[[package]] -name = "auto_enums_derive" -version = "0.7.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41aed1da83ecdc799503b7cb94da1b45a34d72b49caf40a61d9cf5b88ec07cfd" dependencies = [ - "autocfg 1.0.1", - "derive_utils", - "proc-macro2 1.0.27", - "quote 1.0.9", - "syn 1.0.72", + "hermit-abi", + "libc", + "winapi 0.3.8", ] [[package]] name = "autocfg" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" - -[[package]] -name = "autocfg" -version = "1.0.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" [[package]] name = "backtrace" -version = "0.3.57" +version = "0.3.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ed203b9ba68b242c62b3fb7480f589dd49829be1edb3fe8fc8b4ffda2dcb8d" +checksum = "0df2f85c8a2abbe3b7d7e748052fdd9b76a0458fdeb16ad4223f5eca78c7c130" dependencies = [ - "addr2line", - "cfg-if 1.0.0", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "serde", + "addr2line", + "cfg-if 0.1.10", + "libc", + "object", + "rustc-demangle", + "serde", ] [[package]] @@ -137,20 +116,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23ce669cd6c8588f79e15cf450314f9638f967fc5770ff1c7c1deb0925ea7cfa" -[[package]] -name = "base64" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" -dependencies = ["byteorder 1.4.3", "safemem"] - -[[package]] -name = "base64" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" -dependencies = ["byteorder 1.4.3"] - [[package]] name = "base64" version = "0.11.0" @@ -171,29 +136,12 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] name = "bincode" -version = "1.3.3" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = ["serde"] - -[[package]] -name = "bindgen" -version = "0.55.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b13ce559e6433d360c26305643803cb52cfbabbc2b9c47ce04a58493dfb443" +checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d" dependencies = [ - "bitflags", - "cexpr", - "cfg-if 0.1.10", - "clang-sys", - "lazy_static", - "lazycell", - "peeking_take_while", - "proc-macro2 1.0.27", - "quote 1.0.9", - "regex", - "rustc-hash", - "shlex", + "byteorder 1.3.4", + "serde", ] [[package]] @@ -202,26 +150,19 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -[[package]] -name = "bitvec" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" -dependencies = ["either", "radium"] - [[package]] name = "blake3" -version = "0.3.8" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b64485778c4f16a6a5a9d335e80d449ac6c70cdd6a06d2af18a6f6f775a125b3" +checksum = "e9ff35b701f3914bdb8fad3368d822c766ef2858b2583198e41639b936f09d3f" dependencies = [ - "arrayref", - "arrayvec", - "cc", - "cfg-if 0.1.10", - "constant_time_eq", - "crypto-mac 0.8.0", - "digest 0.9.0", + "arrayref", + "arrayvec", + "cc", + "cfg-if 0.1.10", + "constant_time_eq", + "crypto-mac 0.8.0", + "digest 0.9.0", ] [[package]] @@ -230,10 +171,10 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", - "byte-tools", - "byteorder 1.4.3", - "generic-array 0.12.4", + "block-padding 0.1.5", + "byte-tools", + "byteorder 1.3.4", + "generic-array 0.12.3", ] [[package]] @@ -241,14 +182,19 @@ name = "block-buffer" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = ["block-padding 0.2.1", "generic-array 0.14.4"] +dependencies = [ + "block-padding 0.2.1", + "generic-array 0.14.3", +] [[package]] name = "block-padding" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = ["byte-tools"] +dependencies = [ + "byte-tools", +] [[package]] name = "block-padding" @@ -258,37 +204,48 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "borsh" -version = "0.8.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a7111f797cc721407885a323fb071636aee57f750b1a4ddc27397eba168a74" -dependencies = ["borsh-derive", "hashbrown"] +checksum = "a5a26c53ddf60281f18e7a29b20db7ba3db82a9d81b9650bfaa02d646f50d364" +dependencies = [ + "borsh-derive", + "hashbrown", +] [[package]] name = "borsh-derive" -version = "0.8.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307f3740906bac2c118a8122fe22681232b244f1369273e45f1156b45c43d2dd" +checksum = "b637a47728b78a78cd7f4b85bf06d71ef4221840e059a38f048be2422bf673b2" dependencies = [ - "borsh-derive-internal", - "borsh-schema-derive-internal", - "proc-macro-crate", - "proc-macro2 1.0.27", - "syn 1.0.72", + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate", + "proc-macro2 1.0.24", + "syn 1.0.60", ] [[package]] name = "borsh-derive-internal" -version = "0.8.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2104c73179359431cc98e016998f2f23bc7a05bc53e79741bcba705f30047bc" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] +checksum = "d813fa25eb0bed78c36492cff4415f38c760d6de833d255ba9095bd8ebb7d725" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", +] [[package]] name = "borsh-schema-derive-internal" -version = "0.8.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae29eb8418fcd46f723f8691a2ac06857d31179d33d2f2d91eb13967de97c728" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] +checksum = "dcf78ee4a98c8cb9eba1bac3d3e2a1ea3d7673c719ce691e67b5cbafc472d3b7" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", +] [[package]] name = "bs58" @@ -296,31 +253,21 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" -[[package]] -name = "bstr" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90682c8d613ad3373e66de8c6411e0ae2ab2571e879d2efbf73558cc66f21279" -dependencies = ["memchr"] - [[package]] name = "bumpalo" -version = "3.6.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" +checksum = "5356f1d23ee24a1f785a56d1d1a5f0fd5b0f6a0c0fb2412ce11da71649ab78f6" [[package]] name = "bv" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8834bb1d8ee5dc048ee3124f2c7c1afcc6bc9aed03f11e9dfd8c69470a5db340" -dependencies = ["feature-probe", "serde"] - -[[package]] -name = "byte-slice-cast" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" +dependencies = [ + "feature-probe", + "serde", +] [[package]] name = "byte-tools" @@ -336,28 +283,26 @@ checksum = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" [[package]] name = "bytes" version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" -dependencies = ["byteorder 1.4.3", "either", "iovec"] - -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +dependencies = [ + "byteorder 1.3.4", + "either", + "iovec", +] [[package]] name = "bytes" -version = "0.6.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0dcbc35f504eb6fc275a6d20e4ebcda18cf50d40ba6fabff8c711fa16cb3b16" +checksum = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" [[package]] name = "bytes" @@ -370,28 +315,31 @@ name = "bzip2" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42b7c3cbf0fa9c1b82308d57191728ca0256cb821220f4e2fd410a72ade26e3b" -dependencies = ["bzip2-sys", "libc"] +dependencies = [ + "bzip2-sys", + "libc", +] [[package]] name = "bzip2-sys" -version = "0.1.10+1.0.8" +version = "0.1.9+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17fa3d1ac1ca21c5c4e36a97f3c3eb25084576f6fc47bf0139c1123434216c6c" -dependencies = ["cc", "libc", "pkg-config"] +checksum = "ad3b39a260062fca31f7b0b12f207e8f2590a67d32ec7d59c20484b07ea7285e" +dependencies = [ + "cc", + "libc", + "pkg-config", +] [[package]] name = "cc" version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e450b8da92aa6f274e7c6437692f9f2ce6d701fb73bacfcf87897b3f89a4c20e" -dependencies = ["jobserver", "num_cpus"] - -[[package]] -name = "cexpr" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" -dependencies = ["nom"] +dependencies = [ + "jobserver", + "num_cpus", +] [[package]] name = "cfg-if" @@ -411,34 +359,27 @@ version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ - "libc", - "num-integer", - "num-traits", - "serde", - "time", - "winapi 0.3.9", + "libc", + "num-integer", + "num-traits", + "serde", + "time", + "winapi 0.3.8", ] -[[package]] -name = "clang-sys" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "853eda514c284c2287f4bf20ae614f8781f40a81d32ecda6e91449304dfe077c" -dependencies = ["glob", "libc", "libloading 0.7.0"] - [[package]] name = "clap" version = "2.33.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" dependencies = [ - "ansi_term", - "atty", - "bitflags", - "strsim", - "textwrap", - "unicode-width", - "vec_map", + "ansi_term", + "atty", + "bitflags", + "strsim", + "textwrap", + "unicode-width", + "vec_map", ] [[package]] @@ -446,21 +387,22 @@ name = "cloudabi" version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = ["bitflags"] - -[[package]] -name = "colored" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4ffc801dacf156c5854b9df4f425a626539c3a6ef7893cc0c5084a23f0b6c59" -dependencies = ["atty", "lazy_static", "winapi 0.3.9"] +dependencies = [ + "bitflags", +] [[package]] name = "combine" version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da3da6baa321ec19e1cc41d31bf599f00c783d0517095cdaf0332e3fe8d20680" -dependencies = ["ascii", "byteorder 1.4.3", "either", "memchr", "unreachable"] +dependencies = [ + "ascii", + "byteorder 1.3.4", + "either", + "memchr", + "unreachable", +] [[package]] name = "console" @@ -468,50 +410,54 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c0994e656bba7b922d8dd1245db90672ffb701e684e45be58f20719d69abc5a" dependencies = [ - "encode_unicode", - "lazy_static", - "libc", - "regex", - "terminal_size", - "termios", - "unicode-width", - "winapi 0.3.9", - "winapi-util", + "encode_unicode", + "lazy_static", + "libc", + "regex", + "terminal_size", + "termios", + "unicode-width", + "winapi 0.3.8", + "winapi-util", ] [[package]] name = "console" -version = "0.14.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3993e6445baa160675931ec041a5e03ca84b9c6e32a056150d3aa2bdda0a1f45" +checksum = "a50aab2529019abfabfa93f1e6c41ef392f91fbf179b347a7e96abb524884a08" dependencies = [ - "encode_unicode", - "lazy_static", - "libc", - "regex", - "terminal_size", - "unicode-width", - "winapi 0.3.9", + "encode_unicode", + "lazy_static", + "libc", + "regex", + "terminal_size", + "unicode-width", + "winapi 0.3.8", + "winapi-util", ] [[package]] -name = "constant_time_eq" -version = "0.1.5" +name = "const_fn" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" [[package]] -name = "convert_case" -version = "0.4.0" +name = "constant_time_eq" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "core-foundation" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" -dependencies = ["core-foundation-sys", "libc"] +dependencies = [ + "core-foundation-sys", + "libc", +] [[package]] name = "core-foundation-sys" @@ -520,32 +466,39 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" [[package]] -name = "cpufeatures" -version = "0.1.4" +name = "cpuid-bool" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8" -dependencies = ["libc"] +checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" -dependencies = ["cfg-if 1.0.0"] +checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +dependencies = [ + "cfg-if 0.1.10", +] [[package]] name = "crossbeam-channel" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = ["crossbeam-utils 0.7.2", "maybe-uninit"] +dependencies = [ + "crossbeam-utils 0.7.2", + "maybe-uninit", +] [[package]] name = "crossbeam-channel" -version = "0.5.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" -dependencies = ["cfg-if 1.0.0", "crossbeam-utils 0.8.4"] +checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils 0.8.1", +] [[package]] name = "crossbeam-deque" @@ -553,9 +506,9 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" dependencies = [ - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-epoch 0.8.2", + "crossbeam-utils 0.7.2", + "maybe-uninit", ] [[package]] @@ -564,9 +517,9 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-epoch 0.9.4", - "crossbeam-utils 0.8.4", + "cfg-if 1.0.0", + "crossbeam-epoch 0.9.1", + "crossbeam-utils 0.8.1", ] [[package]] @@ -575,26 +528,27 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ - "autocfg 1.0.1", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", + "autocfg", + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", + "lazy_static", + "maybe-uninit", + "memoffset 0.5.4", + "scopeguard", ] [[package]] name = "crossbeam-epoch" -version = "0.9.4" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52fb27eab85b17fbb9f6fd667089e07d6a2eb8743d02639ee7f6a7a7729c9c94" +checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.4", - "lazy_static", - "memoffset 0.6.3", - "scopeguard", + "cfg-if 1.0.0", + "const_fn", + "crossbeam-utils 0.8.1", + "lazy_static", + "memoffset 0.6.1", + "scopeguard", ] [[package]] @@ -602,21 +556,33 @@ name = "crossbeam-queue" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = ["cfg-if 0.1.10", "crossbeam-utils 0.7.2", "maybe-uninit"] +dependencies = [ + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", + "maybe-uninit", +] [[package]] name = "crossbeam-utils" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = ["autocfg 1.0.1", "cfg-if 0.1.10", "lazy_static"] +dependencies = [ + "autocfg", + "cfg-if 0.1.10", + "lazy_static", +] [[package]] name = "crossbeam-utils" -version = "0.8.4" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4feb231f0d4d6af81aed15928e58ecf5816aa62a2393e2c82f46973e92a9a278" -dependencies = ["autocfg 1.0.1", "cfg-if 1.0.0", "lazy_static"] +checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" +dependencies = [ + "autocfg", + "cfg-if 1.0.0", + "lazy_static", +] [[package]] name = "crunchy" @@ -629,46 +595,65 @@ name = "crypto-mac" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -dependencies = ["generic-array 0.12.4", "subtle 1.0.0"] +dependencies = [ + "generic-array 0.12.3", + "subtle 1.0.0", +] [[package]] name = "crypto-mac" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = ["generic-array 0.14.4", "subtle 2.4.0"] +dependencies = [ + "generic-array 0.14.3", + "subtle 2.2.2", +] + +[[package]] +name = "crypto-mac" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58bcd97a54c7ca5ce2f6eb16f6bede5b0ab5f0055fedc17d2f0b4466e21671ca" +dependencies = [ + "generic-array 0.14.3", + "subtle 2.2.2", +] [[package]] name = "crypto-mac" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6" -dependencies = ["generic-array 0.14.4", "subtle 2.4.0"] +dependencies = [ + "generic-array 0.14.3", + "subtle 2.2.2", +] [[package]] name = "curve25519-dalek" -version = "2.1.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "434e1720189a637d44fe464f4df1e6eb900b4835255b14354497c78af37d9bb8" +checksum = "5d85653f070353a16313d0046f173f70d1aadd5b42600a14de626f0dfb3473a5" dependencies = [ - "byteorder 1.4.3", - "digest 0.8.1", - "rand_core 0.5.1", - "subtle 2.4.0", - "zeroize", + "byteorder 1.3.4", + "digest 0.8.1", + "rand_core 0.5.1", + "subtle 2.2.2", + "zeroize", ] [[package]] name = "curve25519-dalek" -version = "3.1.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "639891fde0dbea823fc3d798a0fdf9d2f9440a42d64a78ab3488b0ca025117b3" +checksum = "c8492de420e9e60bc9a1d66e2dbb91825390b738a388606600663fc529b4b307" dependencies = [ - "byteorder 1.4.3", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle 2.4.0", - "zeroize", + "byteorder 1.3.4", + "digest 0.9.0", + "rand_core 0.5.1", + "subtle 2.2.2", + "zeroize", ] [[package]] @@ -676,286 +661,176 @@ name = "dashmap" version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" -dependencies = ["cfg-if 1.0.0", "num_cpus", "rayon"] - -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] +dependencies = [ + "cfg-if 1.0.0", + "num_cpus", + "rayon", +] [[package]] -name = "derive_more" -version = "0.99.14" +name = "derivation-path" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc7b9cef1e351660e5443924e4f43ab25fbbed3e9a5f052df3677deb4d6b320" +checksum = "193388a8c8c75a490b604ff61775e236541b8975e98e5ca1f6ea97d122b7e2db" dependencies = [ - "convert_case", - "proc-macro2 1.0.27", - "quote 1.0.9", - "syn 1.0.72", + "failure", ] [[package]] -name = "derive_utils" -version = "0.11.2" +name = "derivative" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532b4c15dccee12c7044f1fcad956e98410860b22231e44a3b827464797ca7bf" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] +checksum = "eaed5874effa6cde088c644ddcdcb4ffd1511391c5be4fdd7a5ccd02c7e4a183" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", +] [[package]] name = "dialoguer" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4aa86af7b19b40ef9cbef761ed411a49f0afa06b7b6dcd3dfe2f96a3c546138" -dependencies = ["console 0.11.3", "lazy_static", "tempfile"] +dependencies = [ + "console 0.11.3", + "lazy_static", + "tempfile", +] [[package]] name = "digest" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = ["generic-array 0.12.4"] +dependencies = [ + "generic-array 0.12.3", +] [[package]] name = "digest" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = ["generic-array 0.14.4"] +dependencies = [ + "generic-array 0.14.3", +] [[package]] name = "dir-diff" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2860407d7d7e2e004bb2128510ad9e8d669e76fa005ccf567977b5d71b8b4a0b" -dependencies = ["walkdir"] - -[[package]] -name = "doc-comment" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" - -[[package]] -name = "ed25519" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d0860415b12243916284c67a9be413e044ee6668247b99ba26d94b2bc06c8f6" -dependencies = ["serde", "signature"] - -[[package]] -name = "ed25519-dalek" -version = "1.0.0-pre.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a8a37f4e8b35af971e6db5e3897e7a6344caa3f92f6544f88125a1f5f0035a" dependencies = [ - "curve25519-dalek 2.1.2", - "ed25519", - "rand 0.7.3", - "serde", - "sha2 0.8.2", - "zeroize", + "walkdir", ] [[package]] -name = "either" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" - -[[package]] -name = "elf" -version = "0.0.10" +name = "dirs-next" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4841de15dbe0e49b9b62a417589299e3be0d557e0900d36acb87e6dae47197f5" -dependencies = ["byteorder 0.5.3"] +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if 1.0.0", + "dirs-sys-next", +] [[package]] -name = "encode_unicode" -version = "0.3.6" +name = "dirs-sys-next" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi 0.3.8", +] [[package]] -name = "encoding_rs" -version = "0.8.28" +name = "dtoa" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" -dependencies = ["cfg-if 1.0.0"] +checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" [[package]] -name = "env_logger" -version = "0.8.3" +name = "ed25519" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" -dependencies = ["atty", "humantime", "log 0.4.14", "regex", "termcolor"] +checksum = "bf038a7b6fd7ef78ad3348b63f3a17550877b0e28f8d68bcc94894d1412158bc" +dependencies = [ + "serde", + "signature", +] [[package]] -name = "ethabi" -version = "13.0.0" +name = "ed25519-dalek" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d4e679d6864bc26210feb5cf044e245741cd9d7701b35c00440a6e84d61399" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "anyhow", - "ethereum-types", - "hex", - "serde", - "serde_json", - "sha3 0.9.1", - "thiserror", - "uint", + "curve25519-dalek 3.0.0", + "ed25519", + "rand 0.7.3", + "serde", + "serde_bytes", + "sha2 0.9.2", + "zeroize", ] [[package]] -name = "ethbloom" -version = "0.10.0" +name = "ed25519-dalek-bip32" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a621dcebea74f2a6f2002d0a885c81ccf6cbdf86760183316a7722b5707ca4" +checksum = "057f328f31294b5ab432e6c39642f54afd1531677d6d4ba2905932844cc242f3" dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak", + "derivation-path", + "ed25519-dalek", + "failure", + "hmac 0.9.0", + "sha2 0.9.2", ] [[package]] -name = "ethbloom" -version = "0.11.0" +name = "either" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "779864b9c7f7ead1f092972c3257496c6a84b46dba2ce131dd8a282cb2cc5972" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak", -] +checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" [[package]] -name = "ethereum" -version = "0.6.0" +name = "elf" +version = "0.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8603f637f22e3ab9aff8466b37850cd7ea2ae52034b07e41fdc82f1f68dfa2c" +checksum = "4841de15dbe0e49b9b62a417589299e3be0d557e0900d36acb87e6dae47197f5" dependencies = [ - "ethereum-types", - "hash-db", - "hash256-std-hasher", - "parity-scale-codec", - "rlp", - "rlp-derive", - "serde", - "sha3 0.9.1", - "triehash", + "byteorder 0.5.3", ] [[package]] -name = "ethereum-types" -version = "0.10.0" +name = "encode_unicode" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05dc5f0df4915fa6dff7f975a8366ecfaaa8959c74235469495153e7bb1b280e" -dependencies = [ - "ethbloom 0.10.0", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", -] +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] -name = "evm" -version = "0.23.0" -source = "git+https://github.com/velas/evm?branch=evm-estimate-dontrecord-l64#e60bf6c62197f3486f31a263c74a058479d923d8" +name = "encoding_rs" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8ac63f94732332f44fe654443c46f6375d1939684c17b0afb6cb56b0456e171" dependencies = [ - "ethereum", - "evm-core", - "evm-gasometer", - "evm-runtime", - "log 0.4.14", - "parity-scale-codec", - "primitive-types", - "rlp", - "serde", - "sha3 0.8.2", + "cfg-if 0.1.10", ] [[package]] -name = "evm-core" -version = "0.23.0" -source = "git+https://github.com/velas/evm?branch=evm-estimate-dontrecord-l64#e60bf6c62197f3486f31a263c74a058479d923d8" -dependencies = ["parity-scale-codec", "primitive-types", "serde"] - -[[package]] -name = "evm-gasometer" -version = "0.23.0" -source = "git+https://github.com/velas/evm?branch=evm-estimate-dontrecord-l64#e60bf6c62197f3486f31a263c74a058479d923d8" -dependencies = ["evm-core", "evm-runtime", "primitive-types"] - -[[package]] -name = "evm-rpc" -version = "0.1.0" -dependencies = [ - "anyhow", - "derive_more", - "ethabi", - "ethbloom 0.11.0", - "evm-state", - "hex", - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-http-server", - "jsonrpc-pubsub", - "primitive-types", - "rlp", - "rustc-hex", - "secp256k1", - "serde", - "serde_json", - "snafu", - "uint", -] - -[[package]] -name = "evm-runtime" -version = "0.23.0" -source = "git+https://github.com/velas/evm?branch=evm-estimate-dontrecord-l64#e60bf6c62197f3486f31a263c74a058479d923d8" -dependencies = ["evm-core", "primitive-types", "sha3 0.8.2"] - -[[package]] -name = "evm-state" -version = "0.1.0" +name = "env_logger" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" dependencies = [ - "anyhow", - "auto_enums", - "bincode", - "bytes 0.6.0", - "derive_more", - "ethbloom 0.11.0", - "evm", - "fixed-hash", - "hex", - "impl-rlp", - "itertools 0.10.0", - "keccak-hash", - "lazy_static", - "log 0.4.14", - "primitive-types", - "rand 0.6.1", - "rand 0.8.3", - "rlp", - "rocksdb", - "secp256k1", - "serde", - "sha3 0.9.1", - "simple_logger", - "snafu", - "tempfile", - "thiserror", - "triedb", + "atty", + "humantime", + "log", + "regex", + "termcolor", ] [[package]] @@ -963,7 +838,10 @@ name = "failure" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" -dependencies = ["backtrace", "failure_derive"] +dependencies = [ + "backtrace", + "failure_derive", +] [[package]] name = "failure_derive" @@ -971,10 +849,10 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ - "proc-macro2 1.0.27", - "quote 1.0.9", - "syn 1.0.72", - "synstructure", + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", + "synstructure", ] [[package]] @@ -991,29 +869,27 @@ checksum = "835a3dc7d1ec9e75e2b5fb4ba75396837112d2060b03f7d43bc1897c7f7211da" [[package]] name = "filetime" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d34cfa13a63ae058bfa601fe9e313bbdb3746427c1459185464ce0fcf62e1e8" -dependencies = ["cfg-if 1.0.0", "libc", "redox_syscall 0.2.8", "winapi 0.3.9"] - -[[package]] -name = "fixed-hash" -version = "0.7.0" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +checksum = "affc17579b132fc2461adf7c575cc6e8b134ebca52c51f5411388965227dc695" dependencies = [ - "byteorder 1.4.3", - "rand 0.8.3", - "rustc-hex", - "static_assertions", + "cfg-if 0.1.10", + "libc", + "redox_syscall 0.1.56", + "winapi 0.3.8", ] [[package]] name = "flate2" -version = "1.0.20" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" -dependencies = ["cfg-if 1.0.0", "crc32fast", "libc", "miniz_oxide"] +checksum = "2cfff41391129e0a856d6d822600b8d71179d46879e310417eb9c762eb178b42" +dependencies = [ + "cfg-if 0.1.10", + "crc32fast", + "libc", + "miniz_oxide", +] [[package]] name = "fnv" @@ -1026,7 +902,9 @@ name = "foreign-types" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = ["foreign-types-shared"] +dependencies = [ + "foreign-types-shared", +] [[package]] name = "foreign-types-shared" @@ -1039,7 +917,10 @@ name = "form_urlencoded" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" -dependencies = ["matches", "percent-encoding 2.1.0"] +dependencies = [ + "matches", + "percent-encoding", +] [[package]] name = "fs_extra" @@ -1052,7 +933,10 @@ name = "fuchsia-zircon" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = ["bitflags", "fuchsia-zircon-sys"] +dependencies = [ + "bitflags", + "fuchsia-zircon-sys", +] [[package]] name = "fuchsia-zircon-sys" @@ -1062,120 +946,162 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.1.31" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" +checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" [[package]] -name = "futures-channel" -version = "0.3.15" +name = "futures" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e682a68b29a882df0545c143dc3646daefe80ba479bcdede94d5a703de2871e2" -dependencies = ["futures-core"] - +checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" +dependencies = [ + "futures-core", + "futures-sink", +] + [[package]] name = "futures-core" -version = "0.3.15" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0402f765d8a89a26043b889b26ce3c4679d268fa6bb22cd7c6aad98340e179d1" +checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" [[package]] -name = "futures-cpupool" -version = "0.1.8" +name = "futures-executor" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" -dependencies = ["futures", "num_cpus"] +checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] [[package]] name = "futures-io" -version = "0.3.15" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acc499defb3b348f8d8f3f66415835a9131856ff7714bf10dadfc4ec4bdb29a1" +checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" [[package]] name = "futures-macro" -version = "0.3.15" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c40298486cdf52cc00cd6d6987892ba502c7656a16a4192a9992b1ccedd121" +checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" dependencies = [ - "autocfg 1.0.1", - "proc-macro-hack", - "proc-macro2 1.0.27", - "quote 1.0.9", - "syn 1.0.72", + "proc-macro-hack", + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", ] [[package]] name = "futures-sink" -version = "0.3.15" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a57bead0ceff0d6dde8f465ecd96c9338121bb7717d3e7b108059531870c4282" +checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" [[package]] name = "futures-task" -version = "0.3.15" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a16bef9fc1a4dddb5bee51c989e3fbba26569cbb0e31f5b303c184e3dd33dae" +checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" +dependencies = [ + "once_cell", +] [[package]] name = "futures-util" -version = "0.3.15" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb5c238d27e2bf94ffdfd27b2c29e3df4a68c4193bb6427384259e2bf191967" +checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" dependencies = [ - "autocfg 1.0.1", - "futures-core", - "futures-io", - "futures-macro", - "futures-task", - "memchr", - "pin-project-lite 0.2.6", - "pin-utils", - "proc-macro-hack", - "proc-macro-nested", - "slab", + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite 0.2.4", + "pin-utils", + "proc-macro-hack", + "proc-macro-nested", + "slab", ] [[package]] name = "generic-array" -version = "0.12.4" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" -dependencies = ["typenum"] +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +dependencies = [ + "typenum", +] [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" -dependencies = ["serde", "typenum", "version_check 0.9.3"] +checksum = "60fb4bb6bba52f78a471264d9a3b7d026cc0af47b22cd2cffbc0b787ca003e63" +dependencies = [ + "serde", + "typenum", + "version_check", +] [[package]] name = "gethostname" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e692e296bfac1d2533ef168d0b60ff5897b8b70a4009276834014dd8924cc028" -dependencies = ["libc", "winapi 0.3.9"] +dependencies = [ + "libc", + "winapi 0.3.8", +] [[package]] name = "getrandom" -version = "0.1.16" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = ["cfg-if 1.0.0", "libc", "wasi 0.9.0+wasi-snapshot-preview1"] +checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" -dependencies = ["cfg-if 1.0.0", "libc", "wasi 0.10.2+wasi-snapshot-preview1"] +checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.10.1+wasi-snapshot-preview1", +] [[package]] name = "gimli" -version = "0.23.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" +checksum = "bcc8e0c9bce37868955864dbecd2b1ab2bdf967e6f28066d65aaac620444b65c" [[package]] name = "glob" @@ -1183,159 +1109,157 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" -[[package]] -name = "globset" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c152169ef1e421390738366d2f796655fec62621dabbd0fd476f905934061e4a" -dependencies = ["aho-corasick", "bstr", "fnv", "log 0.4.14", "regex"] - [[package]] name = "goblin" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "669cdc3826f69a51d3f8fc3f86de81c2378110254f678b8407977736122057a4" -dependencies = ["log 0.4.14", "plain", "scroll"] - -[[package]] -name = "h2" -version = "0.1.26" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" +checksum = "c69552f48b18aa6102ce0c82dd9bc9d3f8af5fc0a5797069b1b466b90570e39c" dependencies = [ - "byteorder 1.4.3", - "bytes 0.4.12", - "fnv", - "futures", - "http 0.1.21", - "indexmap", - "log 0.4.14", - "slab", - "string", - "tokio-io", + "log", + "plain", + "scroll", ] [[package]] name = "h2" -version = "0.2.7" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" +checksum = "d832b01df74254fe364568d6ddc294443f61cbec82816b60904303af87efae78" dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.4", - "indexmap", - "slab", - "tokio 0.2.25", - "tokio-util", - "tracing", - "tracing-futures", + "bytes 1.0.1", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio 1.1.1", + "tokio-util", + "tracing", ] -[[package]] -name = "hash-db" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" - -[[package]] -name = "hash256-std-hasher" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" -dependencies = ["crunchy"] - [[package]] name = "hash32" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4041af86e63ac4298ce40e5cca669066e75b6f1aa3390fe2561ffa5e1d9f4cc" -dependencies = ["byteorder 1.4.3"] +dependencies = [ + "byteorder 1.3.4", +] [[package]] name = "hashbrown" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" -dependencies = ["ahash"] +dependencies = [ + "ahash", +] [[package]] name = "hermit-abi" -version = "0.1.18" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" -dependencies = ["libc"] +checksum = "91780f809e750b0a89f5544be56617ff6b1227ee485bcb06ebe10cdf89bd3b71" +dependencies = [ + "libc", +] [[package]] name = "hex" -version = "0.4.3" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" [[package]] name = "hidapi" -version = "1.2.6" +version = "1.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81e07da7e8614133e88b3a93b7352eb3729e3ccd82d5ab661adf23bef1761bf8" -dependencies = ["cc", "libc", "pkg-config"] +checksum = "76c352a18370f7e7e47bcbfcbdc5432b8c80c705b5d751a25232c659fcf5c775" +dependencies = [ + "cc", + "libc", + "pkg-config", +] [[package]] name = "hmac" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" -dependencies = ["crypto-mac 0.7.0", "digest 0.8.1"] +dependencies = [ + "crypto-mac 0.7.0", + "digest 0.8.1", +] [[package]] name = "hmac" -version = "0.10.1" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" -dependencies = ["crypto-mac 0.10.0", "digest 0.9.0"] +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", +] [[package]] -name = "hmac-drbg" -version = "0.2.0" +name = "hmac" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" -dependencies = ["digest 0.8.1", "generic-array 0.12.4", "hmac 0.7.1"] +checksum = "deae6d9dbb35ec2c502d62b8f7b1c000a0822c3b0794ba36b3149c0a1c840dff" +dependencies = [ + "crypto-mac 0.9.1", + "digest 0.9.0", +] [[package]] -name = "http" -version = "0.1.21" +name = "hmac" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" -dependencies = ["bytes 0.4.12", "fnv", "itoa"] +checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" +dependencies = [ + "crypto-mac 0.10.0", + "digest 0.9.0", +] [[package]] -name = "http" -version = "0.2.4" +name = "hmac-drbg" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" -dependencies = ["bytes 1.0.1", "fnv", "itoa"] +checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" +dependencies = [ + "digest 0.8.1", + "generic-array 0.12.3", + "hmac 0.7.1", +] [[package]] -name = "http-body" -version = "0.1.0" +name = "http" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" -dependencies = ["bytes 0.4.12", "futures", "http 0.1.21", "tokio-buf"] +checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +dependencies = [ + "bytes 0.5.4", + "fnv", + "itoa", +] [[package]] name = "http-body" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" -dependencies = ["bytes 0.5.6", "http 0.2.4"] +checksum = "2861bd27ee074e5ee891e8b539837a9430012e249d7f0ca2d795650f579c1994" +dependencies = [ + "bytes 1.0.1", + "http", +] [[package]] name = "httparse" -version = "1.4.1" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" +checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" [[package]] name = "httpdate" @@ -1345,168 +1269,108 @@ checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" [[package]] name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "hyper" -version = "0.10.16" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a0652d9a2609a968c14be1a9ea00bf4b1d64e2e1f53a1b51b6fff3a6e829273" -dependencies = [ - "base64 0.9.3", - "httparse", - "language-tags", - "log 0.3.9", - "mime 0.2.6", - "num_cpus", - "time", - "traitobject", - "typeable", - "unicase 1.4.2", - "url 1.7.2", -] - -[[package]] -name = "hyper" -version = "0.12.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c843caf6296fc1f93444735205af9ed4e109a539005abb2564ae1d6fad34c52" -dependencies = [ - "bytes 0.4.12", - "futures", - "futures-cpupool", - "h2 0.1.26", - "http 0.1.21", - "http-body 0.1.0", - "httparse", - "iovec", - "itoa", - "log 0.4.14", - "net2", - "rustc_version", - "time", - "tokio 0.1.22", - "tokio-buf", - "tokio-executor", - "tokio-io", - "tokio-reactor", - "tokio-tcp", - "tokio-threadpool", - "tokio-timer", - "want 0.2.0", -] +checksum = "3c1ad908cc71012b7bea4d0c53ba96a8cba9962f048fa68d143376143d863b7a" [[package]] name = "hyper" -version = "0.13.10" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" +checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" dependencies = [ - "bytes 0.5.6", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.2.7", - "http 0.2.4", - "http-body 0.3.1", - "httparse", - "httpdate", - "itoa", - "pin-project", - "socket2", - "tokio 0.2.25", - "tower-service", - "tracing", - "want 0.3.0", + "bytes 1.0.1", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project", + "socket2", + "tokio 1.1.1", + "tower-service", + "tracing", + "want", ] [[package]] name = "hyper-rustls" -version = "0.21.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6" +checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" dependencies = [ - "bytes 0.5.6", - "futures-util", - "hyper 0.13.10", - "log 0.4.14", - "rustls", - "tokio 0.2.25", - "tokio-rustls", - "webpki", + "futures-util", + "hyper", + "log", + "rustls", + "tokio 1.1.1", + "tokio-rustls", + "webpki", ] [[package]] name = "idna" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" -dependencies = ["matches", "unicode-bidi", "unicode-normalization"] - -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = ["matches", "unicode-bidi", "unicode-normalization"] - -[[package]] -name = "impl-codec" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" -dependencies = ["parity-scale-codec"] - -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = ["rlp"] - -[[package]] -name = "impl-serde" -version = "0.3.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" -dependencies = ["serde"] +checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] [[package]] name = "indexmap" version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" -dependencies = ["autocfg 1.0.1", "hashbrown"] +dependencies = [ + "autocfg", + "hashbrown", +] [[package]] name = "indicatif" version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7baab56125e25686df467fe470785512329883aab42696d661247aca2a2896e4" -dependencies = ["console 0.14.1", "lazy_static", "number_prefix", "regex"] +dependencies = [ + "console 0.13.0", + "lazy_static", + "number_prefix", + "regex", +] [[package]] name = "input_buffer" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19a8a95243d5a0398cae618ec29477c6e3cb631152be5c19481f80bc71559754" -dependencies = ["bytes 0.5.6"] +dependencies = [ + "bytes 0.5.4", +] [[package]] name = "instant" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" -dependencies = ["cfg-if 1.0.0"] +dependencies = [ + "cfg-if 1.0.0", +] [[package]] name = "iovec" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = ["libc"] +dependencies = [ + "libc", +] [[package]] name = "ipnet" @@ -1519,142 +1383,86 @@ name = "itertools" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" -dependencies = ["either"] +dependencies = [ + "either", +] [[package]] name = "itertools" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" -dependencies = ["either"] +dependencies = [ + "either", +] [[package]] name = "itoa" -version = "0.4.7" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" [[package]] name = "jemalloc-ctl" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c502a5ff9dd2924f1ed32ba96e3b65735d837b4bfd978d3161b1702e66aca4b7" -dependencies = ["jemalloc-sys", "libc", "paste"] +dependencies = [ + "jemalloc-sys", + "libc", + "paste", +] [[package]] name = "jemalloc-sys" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d3b9f3f5c9b31aa0f5ed3260385ac205db665baa41d49bb8338008ae94ede45" -dependencies = ["cc", "fs_extra", "libc"] +dependencies = [ + "cc", + "fs_extra", + "libc", +] [[package]] name = "jemallocator" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43ae63fcfc45e99ab3d1b29a46782ad679e98436c3169d15a167a1108a724b69" -dependencies = ["jemalloc-sys", "libc"] - -[[package]] -name = "jobserver" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd" -dependencies = ["libc"] - -[[package]] -name = "js-sys" -version = "0.3.51" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83bdfbace3a0e81a4253f73b49e960b053e396a11012cbd49b9b74d6a2b67062" -dependencies = ["wasm-bindgen"] - -[[package]] -name = "jsonrpc-client-transports" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "489b9c612e60c766f751ab40fcb43cbb55a1e10bb44a9b4307ed510ca598cbd7" -dependencies = [ - "failure", - "futures", - "jsonrpc-core", - "jsonrpc-pubsub", - "log 0.4.14", - "serde", - "serde_json", - "tokio 0.1.22", - "url 1.7.2", - "websocket", -] - -[[package]] -name = "jsonrpc-core" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0745a6379e3edc893c84ec203589790774e4247420033e71a76d3ab4687991fa" -dependencies = ["futures", "log 0.4.14", "serde", "serde_derive", "serde_json"] - -[[package]] -name = "jsonrpc-core-client" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f764902d7b891344a0acb65625f32f6f7c6db006952143bd650209fbe7d94db" -dependencies = ["jsonrpc-client-transports"] - -[[package]] -name = "jsonrpc-derive" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99a847f9ec7bb52149b2786a17c9cb260d6effc6b8eeb8c16b343a487a7563a3" dependencies = [ - "proc-macro-crate", - "proc-macro2 1.0.27", - "quote 1.0.9", - "syn 1.0.72", + "jemalloc-sys", + "libc", ] [[package]] -name = "jsonrpc-http-server" -version = "15.1.0" +name = "jobserver" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb5c4513b7b542f42da107942b7b759f27120b5cc894729f88254b28dff44b7" +checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" dependencies = [ - "hyper 0.12.36", - "jsonrpc-core", - "jsonrpc-server-utils", - "log 0.4.14", - "net2", - "parking_lot 0.10.2", - "unicase 2.6.0", + "libc", ] [[package]] -name = "jsonrpc-pubsub" -version = "15.1.0" +name = "js-sys" +version = "0.3.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "639558e0604013be9787ae52f798506ae42bf4220fe587bdc5625871cc8b9c77" +checksum = "dc15e39392125075f60c95ba416f5381ff6c3a948ff02ab12464715adf56c821" dependencies = [ - "jsonrpc-core", - "log 0.4.14", - "parking_lot 0.10.2", - "rand 0.7.3", - "serde", + "wasm-bindgen", ] [[package]] -name = "jsonrpc-server-utils" -version = "15.1.0" +name = "jsonrpc-core" +version = "17.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72f1f3990650c033bd8f6bd46deac76d990f9bbfb5f8dc8c4767bf0a00392176" +checksum = "07569945133257ff557eb37b015497104cea61a2c9edaf126c1cbd6e8332397f" dependencies = [ - "bytes 0.4.12", - "globset", - "jsonrpc-core", - "lazy_static", - "log 0.4.14", - "tokio 0.1.22", - "tokio-codec", - "unicase 2.6.0", + "futures 0.3.12", + "log", + "serde", + "serde_derive", + "serde_json", ] [[package]] @@ -1663,71 +1471,39 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" -[[package]] -name = "keccak-hash" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3208a3f8f6fad9a5c626276812ebdd54d3b5f79cb59e34c2fdcdf03837900072" -dependencies = ["primitive-types", "tiny-keccak"] - -[[package]] -name = "keccak-hasher" -version = "0.15.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711adba9940a039f4374fc5724c0a5eaca84a2d558cce62256bfe26f0dbef05e" -dependencies = ["hash-db", "hash256-std-hasher", "tiny-keccak"] - [[package]] name = "kernel32-sys" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = ["winapi 0.2.8", "winapi-build"] - -[[package]] -name = "language-tags" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -dependencies = ["spin"] - -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" +dependencies = [ + "spin", +] [[package]] name = "libc" -version = "0.2.95" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36" +checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" [[package]] name = "libloading" -version = "0.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "351a32417a12d5f7e82c368a66781e307834dae04c6ce0cd4456d52989229883" -dependencies = ["cfg-if 1.0.0", "winapi 0.3.9"] - -[[package]] -name = "libloading" -version = "0.7.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" -dependencies = ["cfg-if 1.0.0", "winapi 0.3.9"] - -[[package]] -name = "librocksdb-sys" -version = "6.15.4" -source = "git+https://github.com/rust-rocksdb/rust-rocksdb?rev=39b877b#39b877b41aac99f6accee814410c478878a79454" -dependencies = ["bindgen", "cc", "glob", "libc"] +checksum = "2cadb8e769f070c45df05c78c7520eb4cd17061d4ab262e43cfc68b4d00ac71c" +dependencies = [ + "winapi 0.3.8", +] [[package]] name = "libsecp256k1" @@ -1735,14 +1511,14 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" dependencies = [ - "arrayref", - "crunchy", - "digest 0.8.1", - "hmac-drbg", - "rand 0.7.3", - "sha2 0.8.2", - "subtle 2.4.0", - "typenum", + "arrayref", + "crunchy", + "digest 0.8.1", + "hmac-drbg", + "rand 0.7.3", + "sha2 0.8.2", + "subtle 2.2.2", + "typenum", ] [[package]] @@ -1756,28 +1532,27 @@ name = "lock_api" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" -dependencies = ["scopeguard"] +dependencies = [ + "scopeguard", +] [[package]] name = "lock_api" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" -dependencies = ["scopeguard"] - -[[package]] -name = "log" -version = "0.3.9" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" -dependencies = ["log 0.4.14"] +checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" +dependencies = [ + "scopeguard", +] [[package]] name = "log" -version = "0.4.14" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" -dependencies = ["cfg-if 1.0.0"] +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +dependencies = [ + "cfg-if 0.1.10", +] [[package]] name = "matches" @@ -1793,37 +1568,36 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.4.0" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" +checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" [[package]] name = "memmap2" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b70ca2a6103ac8b665dc150b142ef0e4e89df640c9e6cf295d189c3caebe5a" -dependencies = ["libc"] +dependencies = [ + "libc", +] [[package]] name = "memoffset" -version = "0.5.6" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = ["autocfg 1.0.1"] +checksum = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8" +dependencies = [ + "autocfg", +] [[package]] name = "memoffset" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d" -dependencies = ["autocfg 1.0.1"] - -[[package]] -name = "mime" -version = "0.2.6" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" -dependencies = ["log 0.3.9"] +checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87" +dependencies = [ + "autocfg", +] [[package]] name = "mime" @@ -1831,66 +1605,79 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" -dependencies = ["mime 0.3.16", "unicase 2.6.0"] - [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" -dependencies = ["adler", "autocfg 1.0.1"] +checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435" +dependencies = [ + "adler32", +] [[package]] name = "mio" -version = "0.6.23" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" +checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", - "libc", - "log 0.4.14", - "miow 0.2.2", - "net2", - "slab", - "winapi 0.2.8", + "cfg-if 0.1.10", + "fuchsia-zircon", + "fuchsia-zircon-sys", + "iovec", + "kernel32-sys", + "libc", + "log", + "miow 0.2.2", + "net2", + "slab", + "winapi 0.2.8", ] [[package]] name = "mio" -version = "0.7.11" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf80d3e903b34e0bd7282b218398aec54e082c840d9baf8339e0080a0c542956" -dependencies = ["libc", "log 0.4.14", "miow 0.3.7", "ntapi", "winapi 0.3.9"] +checksum = "e50ae3f04d169fcc9bde0b547d1c205219b7157e07ded9c5aff03e0637cb3ed7" +dependencies = [ + "libc", + "log", + "miow 0.3.6", + "ntapi", + "winapi 0.3.8", +] [[package]] name = "mio-uds" version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" -dependencies = ["iovec", "libc", "mio 0.6.23"] +dependencies = [ + "iovec", + "libc", + "mio 0.6.22", +] [[package]] name = "miow" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" -dependencies = ["kernel32-sys", "net2", "winapi 0.2.8", "ws2_32-sys"] +dependencies = [ + "kernel32-sys", + "net2", + "winapi 0.2.8", + "ws2_32-sys", +] [[package]] name = "miow" -version = "0.3.7" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = ["winapi 0.3.9"] +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +dependencies = [ + "socket2", + "winapi 0.3.8", +] [[package]] name = "native-tls" @@ -1898,16 +1685,16 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" dependencies = [ - "lazy_static", - "libc", - "log 0.4.14", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", ] [[package]] @@ -1915,70 +1702,93 @@ name = "net2" version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" -dependencies = ["cfg-if 0.1.10", "libc", "winapi 0.3.9"] +dependencies = [ + "cfg-if 0.1.10", + "libc", + "winapi 0.3.8", +] [[package]] name = "nix" version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ccba0cfe4fdf15982d1674c69b1fd80bad427d293849982668dfe454bd61f2" -dependencies = ["bitflags", "cc", "cfg-if 1.0.0", "libc"] - -[[package]] -name = "nom" -version = "5.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" -dependencies = ["memchr", "version_check 0.9.3"] +dependencies = [ + "bitflags", + "cc", + "cfg-if 1.0.0", + "libc", +] [[package]] name = "ntapi" -version = "0.3.6" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = ["winapi 0.3.9"] +checksum = "7a31937dea023539c72ddae0e3571deadc1414b300483fa7aaec176168cfa9d2" +dependencies = [ + "winapi 0.3.8", +] [[package]] name = "num-derive" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eafd0b45c5537c3ba526f79d3e75120036502bebacbb3f3220914067ce39dbf2" -dependencies = ["proc-macro2 0.4.30", "quote 0.6.13", "syn 0.15.44"] +dependencies = [ + "proc-macro2 0.4.30", + "quote 0.6.13", + "syn 0.15.44", +] [[package]] name = "num-derive" -version = "0.3.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] +checksum = "0c8b15b261814f992e33760b1fca9fe8b693d8a65299f20c9901688636cfb746" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", +] [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" -dependencies = ["autocfg 1.0.1", "num-traits"] +checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" +dependencies = [ + "autocfg", + "num-traits", +] [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" -dependencies = ["autocfg 1.0.1"] +checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" +dependencies = [ + "autocfg", +] [[package]] name = "num_cpus" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" -dependencies = ["hermit-abi", "libc"] +dependencies = [ + "hermit-abi", + "libc", +] [[package]] name = "num_enum" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "226b45a5c2ac4dd696ed30fa6b94b057ad909c7b7fc2e0d0808192bced894066" -dependencies = ["derivative", "num_enum_derive"] +dependencies = [ + "derivative", + "num_enum_derive", +] [[package]] name = "num_enum_derive" @@ -1986,10 +1796,10 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c0fd9eba1d5db0994a239e09c1be402d35622277e35468ba891aa5e3188ce7e" dependencies = [ - "proc-macro-crate", - "proc-macro2 1.0.27", - "quote 1.0.9", - "syn 1.0.72", + "proc-macro-crate", + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", ] [[package]] @@ -2000,16 +1810,18 @@ checksum = "17b02fc0ff9a9e4b35b3342880f48e896ebf69f2967921fe8646bf5b7125956a" [[package]] name = "object" -version = "0.23.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" +checksum = "9cbca9424c482ee628fa549d9c812e2cd22f1180b9222c9200fdfa6eb31aecb2" [[package]] name = "once_cell" -version = "1.7.2" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" -dependencies = ["parking_lot 0.11.1"] +checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +dependencies = [ + "parking_lot 0.11.1", +] [[package]] name = "opaque-debug" @@ -2025,68 +1837,57 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.34" +version = "0.10.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7830286ad6a3973c0f1d9b73738f69c76b739301d0229c4b96501695cbe4c8" +checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" dependencies = [ - "bitflags", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-sys", + "bitflags", + "cfg-if 1.0.0", + "foreign-types", + "lazy_static", + "libc", + "openssl-sys", ] [[package]] name = "openssl-probe" -version = "0.1.4" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-sys" -version = "0.9.63" +version = "0.9.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b0d6fb7d80f877617dfcb014e605e2b5ab2fb0afdf27935219bb6bd984cb98" -dependencies = ["autocfg 1.0.1", "cc", "libc", "pkg-config", "vcpkg"] +checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] [[package]] name = "ouroboros" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc04551635026d3ac7bc646698ea1836a85ed2a26b7094fe1d15d8b14854c4a2" -dependencies = ["ouroboros_macro", "stable_deref_trait"] +dependencies = [ + "ouroboros_macro", + "stable_deref_trait", +] [[package]] name = "ouroboros_macro" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cec33dfceabec83cd0e95a5ce9d20e76ab3a5cbfef59659b8c927f69b93ed8ae" -dependencies = ["Inflector", "proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] - -[[package]] -name = "parity-scale-codec" -version = "1.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4b26b16c7687c3075982af47719e481815df30bc544f7a6690763a25ca16e9d" dependencies = [ - "arrayvec", - "bitvec", - "byte-slice-cast", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c41512944b1faff334a5f1b9447611bf4ef40638ccb6328173dacefb338e878c" -dependencies = [ - "proc-macro-crate", - "proc-macro2 1.0.27", - "quote 1.0.9", - "syn 1.0.72", + "Inflector", + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", ] [[package]] @@ -2094,21 +1895,32 @@ name = "parking_lot" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" -dependencies = ["lock_api 0.3.4", "parking_lot_core 0.6.2", "rustc_version"] +dependencies = [ + "lock_api 0.3.4", + "parking_lot_core 0.6.2", + "rustc_version", +] [[package]] name = "parking_lot" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" -dependencies = ["lock_api 0.3.4", "parking_lot_core 0.7.2"] +dependencies = [ + "lock_api 0.3.4", + "parking_lot_core 0.7.2", +] [[package]] name = "parking_lot" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" -dependencies = ["instant", "lock_api 0.4.4", "parking_lot_core 0.8.3"] +dependencies = [ + "instant", + "lock_api 0.4.2", + "parking_lot_core 0.8.2", +] [[package]] name = "parking_lot_core" @@ -2116,13 +1928,13 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" dependencies = [ - "cfg-if 0.1.10", - "cloudabi", - "libc", - "redox_syscall 0.1.57", - "rustc_version", - "smallvec 0.6.14", - "winapi 0.3.9", + "cfg-if 0.1.10", + "cloudabi", + "libc", + "redox_syscall 0.1.56", + "rustc_version", + "smallvec 0.6.14", + "winapi 0.3.8", ] [[package]] @@ -2131,26 +1943,26 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" dependencies = [ - "cfg-if 0.1.10", - "cloudabi", - "libc", - "redox_syscall 0.1.57", - "smallvec 1.6.1", - "winapi 0.3.9", + "cfg-if 0.1.10", + "cloudabi", + "libc", + "redox_syscall 0.1.56", + "smallvec 1.6.1", + "winapi 0.3.8", ] [[package]] name = "parking_lot_core" -version = "0.8.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall 0.2.8", - "smallvec 1.6.1", - "winapi 0.3.9", + "cfg-if 1.0.0", + "instant", + "libc", + "redox_syscall 0.1.56", + "smallvec 1.6.1", + "winapi 0.3.8", ] [[package]] @@ -2158,40 +1970,37 @@ name = "paste" version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" -dependencies = ["paste-impl", "proc-macro-hack"] +dependencies = [ + "paste-impl", + "proc-macro-hack", +] [[package]] name = "paste-impl" version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" -dependencies = ["proc-macro-hack"] +dependencies = [ + "proc-macro-hack", +] [[package]] name = "pbkdf2" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" -dependencies = ["byteorder 1.4.3", "crypto-mac 0.7.0"] +checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" +dependencies = [ + "crypto-mac 0.8.0", +] [[package]] name = "pbkdf2" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3b8c0d71734018084da0c0354193a5edfb81b20d2d57a92c5b154aefc554a4a" -dependencies = ["crypto-mac 0.10.0"] - -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - -[[package]] -name = "percent-encoding" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" +dependencies = [ + "crypto-mac 0.10.0", +] [[package]] name = "percent-encoding" @@ -2204,33 +2013,41 @@ name = "pest" version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" -dependencies = ["ucd-trie"] +dependencies = [ + "ucd-trie", +] [[package]] name = "pin-project" -version = "1.0.7" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7509cc106041c40a4518d2af7a61530e1eed0e6285296a3d8c5472806ccc4a4" -dependencies = ["pin-project-internal"] +checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" +dependencies = [ + "pin-project-internal", +] [[package]] name = "pin-project-internal" -version = "1.0.7" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c950132583b500556b1efd71d45b319029f2b71518d979fcc208e16b42426f" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] +checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", +] [[package]] name = "pin-project-lite" -version = "0.1.12" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" +checksum = "f7505eeebd78492e0f6108f7171c4948dbb120ee8119d9d77d0afa5469bef67f" [[package]] name = "pin-project-lite" -version = "0.2.6" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905" +checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" [[package]] name = "pin-utils" @@ -2240,9 +2057,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.19" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" [[package]] name = "plain" @@ -2252,23 +2069,18 @@ checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" [[package]] name = "ppv-lite86" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" - -[[package]] -name = "primitive-types" -version = "0.8.0" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3824ae2c5e27160113b9e029a10ec9e3f0237bad8029f69c7724393c9fdefd8" -dependencies = ["fixed-hash", "impl-codec", "impl-rlp", "impl-serde", "uint"] +checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" [[package]] name = "proc-macro-crate" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" -dependencies = ["toml"] +dependencies = [ + "toml", +] [[package]] name = "proc-macro-hack" @@ -2278,61 +2090,53 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" -version = "0.1.7" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" +checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" [[package]] name = "proc-macro2" version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -dependencies = ["unicode-xid 0.1.0"] +dependencies = [ + "unicode-xid 0.1.0", +] [[package]] name = "proc-macro2" -version = "1.0.27" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" -dependencies = ["unicode-xid 0.2.2"] +checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +dependencies = [ + "unicode-xid 0.2.0", +] [[package]] -name = "quote" -version = "0.6.13" +name = "qstring" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -dependencies = ["proc-macro2 0.4.30"] +checksum = "d464fae65fff2680baf48019211ce37aaec0c78e9264c84a3e484717f965104e" +dependencies = [ + "percent-encoding", +] [[package]] name = "quote" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" -dependencies = ["proc-macro2 1.0.27"] - -[[package]] -name = "radium" -version = "0.3.0" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" +checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" +dependencies = [ + "proc-macro2 0.4.30", +] [[package]] -name = "rand" -version = "0.6.1" +name = "quote" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae9d223d52ae411a33cf7e54ec6034ec165df296ccd23533d671a28252b6f66a" +checksum = "54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea" dependencies = [ - "cloudabi", - "fuchsia-zircon", - "libc", - "rand_chacha 0.1.1", - "rand_core 0.3.1", - "rand_hc 0.1.0", - "rand_isaac", - "rand_pcg 0.1.2", - "rand_xorshift", - "rustc_version", - "winapi 0.3.9", + "proc-macro2 1.0.24", ] [[package]] @@ -2341,223 +2145,202 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", - "rand_pcg 0.2.1", + "getrandom 0.1.14", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", + "rand_pcg", ] [[package]] name = "rand" -version = "0.8.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" dependencies = [ - "libc", - "rand_chacha 0.3.0", - "rand_core 0.6.2", - "rand_hc 0.3.0", + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.2", + "rand_hc 0.3.0", ] -[[package]] -name = "rand_chacha" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -dependencies = ["autocfg 0.1.7", "rand_core 0.3.1"] - [[package]] name = "rand_chacha" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = ["ppv-lite86", "rand_core 0.5.1"] +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] [[package]] name = "rand_chacha" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" -dependencies = ["ppv-lite86", "rand_core 0.6.2"] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = ["rand_core 0.4.2"] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.2", +] [[package]] name = "rand_core" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = ["getrandom 0.1.16"] +dependencies = [ + "getrandom 0.1.14", +] [[package]] name = "rand_core" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" -dependencies = ["getrandom 0.2.3"] - -[[package]] -name = "rand_hc" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -dependencies = ["rand_core 0.3.1"] +dependencies = [ + "getrandom 0.2.1", +] [[package]] name = "rand_hc" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = ["rand_core 0.5.1"] +dependencies = [ + "rand_core 0.5.1", +] [[package]] name = "rand_hc" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" -dependencies = ["rand_core 0.6.2"] - -[[package]] -name = "rand_isaac" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -dependencies = ["rand_core 0.3.1"] - -[[package]] -name = "rand_pcg" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -dependencies = ["autocfg 0.1.7", "rand_core 0.4.2"] +dependencies = [ + "rand_core 0.6.2", +] [[package]] name = "rand_pcg" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" -dependencies = ["rand_core 0.5.1"] - -[[package]] -name = "rand_xorshift" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -dependencies = ["rand_core 0.3.1"] +dependencies = [ + "rand_core 0.5.1", +] [[package]] name = "rayon" -version = "1.5.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" dependencies = [ - "autocfg 1.0.1", - "crossbeam-deque 0.8.0", - "either", - "rayon-core", + "autocfg", + "crossbeam-deque 0.8.0", + "either", + "rayon-core", ] [[package]] name = "rayon-core" -version = "1.9.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" +checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" dependencies = [ - "crossbeam-channel 0.5.1", - "crossbeam-deque 0.8.0", - "crossbeam-utils 0.8.4", - "lazy_static", - "num_cpus", + "crossbeam-channel 0.5.0", + "crossbeam-deque 0.8.0", + "crossbeam-utils 0.8.1", + "lazy_static", + "num_cpus", ] [[package]] name = "redox_syscall" -version = "0.1.57" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" +checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" [[package]] name = "redox_syscall" -version = "0.2.8" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "742739e41cd49414de871ea5e549afb7e2a3ac77b589bcbebe8c82fab37147fc" -dependencies = ["bitflags"] +checksum = "05ec8ca9416c5ea37062b502703cd7fcb207736bc294f6e0cf367ac6fc234570" +dependencies = [ + "bitflags", +] [[package]] name = "redox_users" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" -dependencies = ["getrandom 0.2.1", "redox_syscall 0.2.4"] +dependencies = [ + "getrandom 0.2.1", + "redox_syscall 0.2.4", +] [[package]] name = "regex" -version = "1.5.4" +version = "1.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" -dependencies = ["aho-corasick", "memchr", "regex-syntax"] +checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", + "thread_local", +] [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" [[package]] name = "remove_dir_all" -version = "0.5.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = ["winapi 0.3.9"] +checksum = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" +dependencies = [ + "winapi 0.3.8", +] [[package]] name = "reqwest" -version = "0.10.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" -dependencies = [ - "base64 0.13.0", - "bytes 0.5.6", - "encoding_rs", - "futures-core", - "futures-util", - "http 0.2.4", - "http-body 0.3.1", - "hyper 0.13.10", - "hyper-rustls", - "ipnet", - "js-sys", - "lazy_static", - "log 0.4.14", - "mime 0.3.16", - "mime_guess", - "percent-encoding 2.1.0", - "pin-project-lite 0.2.6", - "rustls", - "serde", - "serde_json", - "serde_urlencoded", - "tokio 0.2.25", - "tokio-rustls", - "url 2.2.2", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "webpki-roots", - "winreg", +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf12057f289428dbf5c591c74bf10392e4a8003f993405a902f20117019022d4" +dependencies = [ + "base64 0.13.0", + "bytes 1.0.1", + "encoding_rs", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "hyper-rustls", + "ipnet", + "js-sys", + "lazy_static", + "log", + "mime", + "percent-encoding", + "pin-project-lite 0.2.4", + "rustls", + "serde", + "serde_json", + "serde_urlencoded", + "tokio 1.1.1", + "tokio-rustls", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots", + "winreg", ] [[package]] @@ -2566,54 +2349,30 @@ version = "0.16.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ba5a8ec64ee89a76c98c549af81ff14813df09c3e6dc4766c3856da48597a0c" dependencies = [ - "cc", - "lazy_static", - "libc", - "spin", - "untrusted", - "web-sys", - "winapi 0.3.9", + "cc", + "lazy_static", + "libc", + "spin", + "untrusted", + "web-sys", + "winapi 0.3.8", ] -[[package]] -name = "ripemd160" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" -dependencies = ["block-buffer 0.9.0", "digest 0.9.0", "opaque-debug 0.3.0"] - -[[package]] -name = "rlp" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e54369147e3e7796c9b885c7304db87ca3d09a0a98f72843d532868675bbfba8" -dependencies = ["bytes 1.0.1", "rustc-hex"] - -[[package]] -name = "rlp-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] - -[[package]] -name = "rocksdb" -version = "0.15.0" -source = "git+https://github.com/rust-rocksdb/rust-rocksdb?rev=39b877b#39b877b41aac99f6accee814410c478878a79454" -dependencies = ["libc", "librocksdb-sys"] - [[package]] name = "rpassword" version = "4.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99371657d3c8e4d816fb6221db98fa408242b0b53bac08f8676a41f8554fe99f" -dependencies = ["libc", "winapi 0.3.9"] +dependencies = [ + "libc", + "winapi 0.3.8", +] [[package]] name = "rustc-demangle" -version = "0.1.19" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410f7acf3cb3a44527c5d9546bad4bf4e6c460915d5f9f2fc524498bfe8f70ce" +checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" [[package]] name = "rustc-hash" @@ -2621,57 +2380,58 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - [[package]] name = "rustc_version" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = ["semver 0.9.0"] +dependencies = [ + "semver 0.9.0", +] [[package]] name = "rustls" -version = "0.18.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" -dependencies = ["base64 0.12.3", "log 0.4.14", "ring", "sct", "webpki"] +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" +dependencies = [ + "base64 0.13.0", + "log", + "ring", + "sct", + "webpki", +] [[package]] name = "rustversion" -version = "1.0.5" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" +checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd" [[package]] name = "ryu" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" - -[[package]] -name = "safemem" -version = "0.3.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" +checksum = "ed3d612bc64430efeb3f7ee6ef26d590dce0c43249217bddc62112540c7941e1" [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = ["winapi-util"] +dependencies = [ + "winapi-util", +] [[package]] name = "schannel" version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" -dependencies = ["lazy_static", "winapi 0.3.9"] +dependencies = [ + "lazy_static", + "winapi 0.3.8", +] [[package]] name = "scopeguard" @@ -2681,72 +2441,74 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scroll" -version = "0.10.2" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda28d4b4830b807a8b43f7b0e6b5df875311b3e7621d84577188c175b6ec1ec" -dependencies = ["scroll_derive"] +checksum = "abb2332cb595d33f7edd5700f4cbf94892e680c7f0ae56adab58a35190b66cb1" +dependencies = [ + "scroll_derive", +] [[package]] name = "scroll_derive" -version = "0.10.5" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaaae8f38bb311444cfb7f1979af0bc9240d95795f75f9ceddf6a59b79ceffa0" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] +checksum = "e367622f934864ffa1c704ba2b82280aab856e3d8213c84c5720257eb34b15b9" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", +] [[package]] name = "sct" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" -dependencies = ["ring", "untrusted"] - -[[package]] -name = "secp256k1" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6179428c22c73ac0fbb7b5579a56353ce78ba29759b3b8575183336ea74cdfb" -dependencies = ["rand 0.6.1", "secp256k1-sys"] - -[[package]] -name = "secp256k1-sys" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11553d210db090930f4432bea123b31f70bbf693ace14504ea2a35e796c28dd2" -dependencies = ["cc"] +dependencies = [ + "ring", + "untrusted", +] [[package]] name = "security-framework" -version = "2.2.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3670b1d2fdf6084d192bc71ead7aabe6c06aa2ea3fbd9cc3ac111fa5c2b1bd84" +checksum = "c1759c2e3c8580017a484a7ac56d3abc5a6c1feadf88db2f3633f12ae4268c69" dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "2.2.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3676258fd3cfe2c9a0ec99ce3038798d847ce3e4bb17746373eb9f0f1ac16339" -dependencies = ["core-foundation-sys", "libc"] +checksum = "f99b9d5e26d2a71633cc4f2ebae7cc9f874044e0c351a27e17892d76dce5678b" +dependencies = [ + "core-foundation-sys", + "libc", +] [[package]] name = "semver" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = ["semver-parser 0.7.0"] +dependencies = [ + "semver-parser 0.7.0", +] [[package]] name = "semver" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = ["semver-parser 0.10.2"] +dependencies = [ + "semver-parser 0.10.2", +] [[package]] name = "semver-parser" @@ -2759,49 +2521,73 @@ name = "semver-parser" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = ["pest"] +dependencies = [ + "pest", +] [[package]] name = "serde" -version = "1.0.126" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" -dependencies = ["serde_derive"] +checksum = "974ef1bd2ad8a507599b336595454081ff68a9599b4890af7643c0c0ed73a62c" +dependencies = [ + "serde_derive", +] [[package]] name = "serde_bytes" -version = "0.11.5" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" -dependencies = ["serde"] +checksum = "3bf487fbf5c6239d7ea2ff8b10cb6b811cd4b5080d1c2aeed1dec18753c06e10" +dependencies = [ + "serde", +] [[package]] name = "serde_derive" -version = "1.0.126" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] +checksum = "8dee1f300f838c8ac340ecb0112b3ac472464fa67e87292bdb3dfc9c49128e17" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", +] [[package]] name = "serde_json" -version = "1.0.64" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" -dependencies = ["itoa", "ryu", "serde"] +checksum = "3433e879a558dde8b5e8feb2a04899cf34fdde1fafb894687e52105fc1162ac3" +dependencies = [ + "itoa", + "ryu", + "serde", +] [[package]] name = "serde_urlencoded" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" -dependencies = ["form_urlencoded", "itoa", "ryu", "serde"] +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] [[package]] name = "serde_yaml" version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15654ed4ab61726bf918a39cb8d98a2e2995b002387807fa6ba58fdf7f59bb23" -dependencies = ["dtoa", "linked-hash-map", "serde", "yaml-rust"] +dependencies = [ + "dtoa", + "linked-hash-map", + "serde", + "yaml-rust", +] [[package]] name = "sha-1" @@ -2809,54 +2595,35 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", ] -[[package]] -name = "sha1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" - [[package]] name = "sha2" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", ] [[package]] name = "sha2" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362ae5752fd2137731f9fa25fd4d9058af34666ca1966fb969119cc35719f12" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "sha3" -version = "0.8.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd26bc0e7a2e3a7c959bc494caf58b72ee0c71d67704e9520f736ca7e4853ecf" +checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" dependencies = [ - "block-buffer 0.7.3", - "byte-tools", - "digest 0.8.1", - "keccak", - "opaque-debug 0.2.3", + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpuid-bool", + "digest 0.9.0", + "opaque-debug 0.3.0", ] [[package]] @@ -2865,50 +2632,41 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug 0.3.0", + "block-buffer 0.9.0", + "digest 0.9.0", + "keccak", + "opaque-debug 0.3.0", ] -[[package]] -name = "shlex" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" - [[package]] name = "signal-hook-registry" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" -dependencies = ["libc"] +dependencies = [ + "libc", +] [[package]] name = "signature" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68" - -[[package]] -name = "simple_logger" -version = "1.11.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd57f17c093ead1d4a1499dc9acaafdd71240908d64775465543b8d9a9f1d198" -dependencies = ["atty", "chrono", "colored", "log 0.4.14", "winapi 0.3.9"] +checksum = "65211b7b6fc3f14ff9fc7a2011a434e3e6880585bd2e9e9396315ae24cbf7852" [[package]] name = "slab" -version = "0.4.3" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527" +checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "smallvec" version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" -dependencies = ["maybe-uninit"] +dependencies = [ + "maybe-uninit", +] [[package]] name = "smallvec" @@ -2916,858 +2674,914 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" -[[package]] -name = "snafu" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eab12d3c261b2308b0d80c26fffb58d17eba81a4be97890101f416b478c79ca7" -dependencies = ["doc-comment", "snafu-derive"] - -[[package]] -name = "snafu-derive" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1508efa03c362e23817f96cde18abed596a25219a8b2c66e8db33c03543d315b" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] - [[package]] name = "socket2" -version = "0.3.19" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" -dependencies = ["cfg-if 1.0.0", "libc", "winapi 0.3.9"] +checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "redox_syscall 0.1.56", + "winapi 0.3.8", +] [[package]] name = "solana-account-decoder" -version = "1.5.19" -dependencies = [ - "Inflector", - "base64 0.12.3", - "bincode", - "bs58", - "bv", - "lazy_static", - "serde", - "serde_derive", - "serde_json", - "solana-config-program", - "solana-sdk", - "solana-stake-program", - "solana-vote-program", - "spl-token", - "thiserror", - "velas-account-program", - "zstd", +version = "1.6.14" +dependencies = [ + "Inflector", + "base64 0.12.3", + "bincode", + "bs58", + "bv", + "lazy_static", + "serde", + "serde_derive", + "serde_json", + "solana-config-program", + "solana-sdk", + "solana-stake-program", + "solana-vote-program", + "spl-token", + "thiserror", + "zstd", ] [[package]] name = "solana-bpf-loader-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "bincode", - "byteorder 1.4.3", - "curve25519-dalek 3.1.0", - "log 0.4.14", - "num-derive 0.3.3", - "num-traits", - "rand_core 0.6.2", - "solana-runtime", - "solana-sdk", - "solana_rbpf", - "thiserror", + "bincode", + "byteorder 1.3.4", + "log", + "num-derive 0.3.0", + "num-traits", + "rand_core 0.6.2", + "sha3", + "solana-measure", + "solana-runtime", + "solana-sdk", + "solana_rbpf", + "thiserror", ] [[package]] name = "solana-bpf-programs" -version = "1.5.19" -dependencies = [ - "bincode", - "byteorder 1.4.3", - "elf", - "itertools 0.10.0", - "miow 0.2.2", - "net2", - "solana-bpf-loader-program", - "solana-cli-output", - "solana-logger 1.5.19", - "solana-measure", - "solana-runtime", - "solana-sdk", - "solana-transaction-status", - "solana_rbpf", - "walkdir", +version = "1.6.14" +dependencies = [ + "bincode", + "byteorder 1.3.4", + "elf", + "itertools 0.10.0", + "miow 0.2.2", + "net2", + "solana-account-decoder", + "solana-bpf-loader-program", + "solana-cli-output", + "solana-logger 1.6.14", + "solana-measure", + "solana-runtime", + "solana-sdk", + "solana-transaction-status", + "solana_rbpf", + "walkdir", ] [[package]] name = "solana-bpf-rust-128bit" -version = "1.5.19" -dependencies = ["solana-bpf-rust-128bit-dep", "solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-bpf-rust-128bit-dep", + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-128bit-dep" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-alloc" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-call-depth" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-caller-access" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-custom-heap" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-dep-crate" -version = "1.5.19" -dependencies = ["byteorder 1.4.3", "solana-program 1.5.18"] +version = "1.6.14" +dependencies = [ + "byteorder 1.3.4", + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-deprecated-loader" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-dup-accounts" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-error-handling" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "num-derive 0.2.5", - "num-traits", - "solana-program 1.5.19", - "thiserror", + "num-derive 0.2.5", + "num-traits", + "solana-program 1.6.14", + "thiserror", ] [[package]] name = "solana-bpf-rust-external-spend" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] + +[[package]] +name = "solana-bpf-rust-finalize" +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-instruction-introspection" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-invoke" -version = "1.5.19" -dependencies = ["solana-bpf-rust-invoked", "solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-bpf-rust-invoked", + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-invoke-and-error" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-invoke-and-ok" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-invoke-and-return" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-invoked" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-iter" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-many-args" -version = "1.5.19" -dependencies = ["solana-bpf-rust-many-args-dep", "solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-bpf-rust-many-args-dep", + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-many-args-dep" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-mem" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-noop" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-panic" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-param-passing" -version = "1.5.19" -dependencies = ["solana-bpf-rust-param-passing-dep", "solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-bpf-rust-param-passing-dep", + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-param-passing-dep" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] - -[[package]] -name = "solana-bpf-rust-rand" -version = "1.5.19" -dependencies = ["getrandom 0.1.16", "rand 0.7.3", "solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] -name = "solana-bpf-rust-ristretto" -version = "1.5.19" +name = "solana-bpf-rust-rand" +version = "1.6.14" dependencies = [ - "curve25519-dalek 3.1.0", - "getrandom 0.1.16", - "solana-program 1.5.18", + "getrandom 0.1.14", + "rand 0.7.3", + "solana-program 1.6.14", ] [[package]] name = "solana-bpf-rust-ro-modify" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-sanity" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] -name = "solana-bpf-rust-sha256" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +name = "solana-bpf-rust-sha" +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-spoof1" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-spoof1-system" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] -name = "solana-bpf-rust-sysval" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +name = "solana-bpf-rust-sysvar" +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-upgradeable" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-bpf-rust-upgraded" -version = "1.5.19" -dependencies = ["solana-program 1.5.19"] +version = "1.6.14" +dependencies = [ + "solana-program 1.6.14", +] [[package]] name = "solana-clap-utils" -version = "1.5.19" +version = "1.6.14" +dependencies = [ + "chrono", + "clap", + "rpassword", + "solana-remote-wallet", + "solana-sdk", + "thiserror", + "tiny-bip39", + "uriparse", + "url", +] + +[[package]] +name = "solana-cli-config" +version = "1.6.14" dependencies = [ - "chrono", - "clap", - "rpassword", - "solana-remote-wallet", - "solana-sdk", - "thiserror", - "tiny-bip39", - "url 2.2.2", + "dirs-next", + "lazy_static", + "serde", + "serde_derive", + "serde_yaml", + "url", ] [[package]] name = "solana-cli-output" -version = "1.5.19" -dependencies = [ - "Inflector", - "base64 0.13.0", - "chrono", - "console 0.11.3", - "humantime", - "indicatif", - "serde", - "serde_derive", - "serde_json", - "solana-account-decoder", - "solana-clap-utils", - "solana-client", - "solana-sdk", - "solana-stake-program", - "solana-transaction-status", - "solana-vote-program", +version = "1.6.14" +dependencies = [ + "Inflector", + "base64 0.13.0", + "chrono", + "console 0.11.3", + "humantime", + "indicatif", + "serde", + "serde_derive", + "serde_json", + "solana-account-decoder", + "solana-clap-utils", + "solana-client", + "solana-sdk", + "solana-stake-program", + "solana-transaction-status", + "solana-vote-program", + "spl-memo", ] [[package]] name = "solana-client" -version = "1.5.19" -dependencies = [ - "base64 0.13.0", - "bincode", - "bs58", - "clap", - "derivative", - "evm-rpc", - "evm-state", - "indicatif", - "jsonrpc-core", - "log 0.4.14", - "net2", - "rayon", - "reqwest", - "semver 0.11.0", - "serde", - "serde_derive", - "serde_json", - "solana-account-decoder", - "solana-clap-utils", - "solana-faucet", - "solana-net-utils", - "solana-sdk", - "solana-stake-program", - "solana-transaction-status", - "solana-version", - "solana-vote-program", - "thiserror", - "tungstenite", - "url 2.2.2", +version = "1.6.14" +dependencies = [ + "base64 0.13.0", + "bincode", + "bs58", + "clap", + "indicatif", + "jsonrpc-core", + "log", + "net2", + "rayon", + "reqwest", + "semver 0.11.0", + "serde", + "serde_derive", + "serde_json", + "solana-account-decoder", + "solana-clap-utils", + "solana-faucet", + "solana-net-utils", + "solana-sdk", + "solana-transaction-status", + "solana-version", + "solana-vote-program", + "thiserror", + "tokio 1.1.1", + "tungstenite", + "url", ] [[package]] name = "solana-config-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "bincode", - "chrono", - "log 0.4.14", - "rand_core 0.6.2", - "serde", - "serde_derive", - "solana-sdk", + "bincode", + "chrono", + "log", + "rand_core 0.6.2", + "serde", + "serde_derive", + "solana-sdk", ] [[package]] name = "solana-crate-features" -version = "1.5.19" -dependencies = [ - "backtrace", - "bytes 0.4.12", - "cc", - "curve25519-dalek 2.1.2", - "ed25519-dalek", - "either", - "lazy_static", - "libc", - "rand_chacha 0.2.2", - "regex-syntax", - "reqwest", - "serde", - "syn 0.15.44", - "syn 1.0.72", - "tokio 0.1.22", - "winapi 0.3.9", -] - -[[package]] -name = "solana-evm-loader-program" -version = "0.1.0" +version = "1.6.14" dependencies = [ - "assert_matches", - "bincode", - "crc32fast", - "ethabi", - "evm-state", - "hex", - "log 0.4.14", - "once_cell", - "primitive-types", - "ripemd160", - "serde", - "sha2 0.9.5", - "sha3 0.9.1", - "simple_logger", - "snafu", - "solana-logger 1.5.18", - "solana-sdk", + "backtrace", + "bytes 0.4.12", + "cc", + "curve25519-dalek 2.1.0", + "ed25519-dalek", + "either", + "lazy_static", + "libc", + "rand_chacha 0.2.2", + "regex-syntax", + "reqwest", + "serde", + "syn 0.15.44", + "syn 1.0.60", + "tokio 0.1.22", + "winapi 0.3.8", ] [[package]] name = "solana-faucet" -version = "1.5.19" -dependencies = [ - "bincode", - "byteorder 1.3.4", - "clap", - "log", - "serde", - "serde_derive", - "solana-clap-utils", - "solana-cli-config", - "solana-logger 1.5.19", - "solana-metrics", - "solana-sdk", - "solana-version", - "spl-memo 3.0.0", - "thiserror", - "tokio 0.3.6", +version = "1.6.14" +dependencies = [ + "bincode", + "byteorder 1.3.4", + "clap", + "log", + "serde", + "serde_derive", + "solana-clap-utils", + "solana-cli-config", + "solana-logger 1.6.14", + "solana-metrics", + "solana-sdk", + "solana-version", + "spl-memo", + "thiserror", + "tokio 1.1.1", ] [[package]] name = "solana-frozen-abi" -version = "1.5.18" +version = "1.6.14" dependencies = [ - "bs58", - "bv", - "generic-array 0.14.4", - "log 0.4.14", - "memmap2", - "rustc_version", - "serde", - "serde_derive", - "sha2 0.9.5", - "solana-frozen-abi-macro 1.5.18", - "solana-logger 1.5.18", - "thiserror", + "bs58", + "bv", + "generic-array 0.14.3", + "log", + "memmap2", + "rustc_version", + "serde", + "serde_derive", + "sha2 0.9.2", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", + "thiserror", ] [[package]] name = "solana-frozen-abi" -version = "1.6.10" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03201ac63bad0f18339e1631b75d10b18d4a0a0adef5551298174fb8346cf6de" +checksum = "95c481f0f29753f5b2d382628abae98a1dd87b572ddc7cbe5fe55ca62b6f7f07" dependencies = [ - "bs58", - "bv", - "generic-array 0.14.4", - "log 0.4.14", - "memmap2", - "rustc_version", - "serde", - "serde_derive", - "sha2 0.9.5", - "solana-frozen-abi-macro 1.6.10", - "solana-logger 1.6.10", - "thiserror", + "bs58", + "bv", + "generic-array 0.14.3", + "log", + "memmap2", + "rustc_version", + "serde", + "serde_derive", + "sha2 0.9.2", + "solana-frozen-abi-macro 1.7.1", + "solana-logger 1.7.1", + "thiserror", ] [[package]] name = "solana-frozen-abi-macro" -version = "1.5.18" +version = "1.6.14" dependencies = [ - "lazy_static", - "proc-macro2 1.0.27", - "quote 1.0.9", - "rustc_version", - "syn 1.0.72", + "lazy_static", + "proc-macro2 1.0.24", + "quote 1.0.6", + "rustc_version", + "syn 1.0.60", ] [[package]] name = "solana-frozen-abi-macro" -version = "1.6.10" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7817d332895c39ee895508c1327bef17eb35e34fbdc5b06ca4ae26649a0392d5" +checksum = "f2068bcc47160ac9081893439b10a05e4bbe85cc0f6bccb6f1b0815423fbdd0c" dependencies = [ - "lazy_static", - "proc-macro2 1.0.27", - "quote 1.0.9", - "rustc_version", - "syn 1.0.72", + "proc-macro2 1.0.24", + "quote 1.0.6", + "rustc_version", + "syn 1.0.60", ] [[package]] name = "solana-logger" -version = "1.5.18" -dependencies = ["env_logger", "lazy_static", "log 0.4.14"] +version = "1.6.14" +dependencies = [ + "env_logger", + "lazy_static", + "log", +] [[package]] name = "solana-logger" -version = "1.6.10" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff64fd45c789b34870b58c48c27f2c137b95a446e3a359873608468d3efe77f" -dependencies = ["env_logger", "lazy_static", "log 0.4.14"] +checksum = "7ea5932e186629f47859924b3773cfd8bcb4b8796898ac85c1fa0a6a2024e5c6" +dependencies = [ + "env_logger", + "lazy_static", + "log", +] [[package]] name = "solana-measure" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "jemalloc-ctl", - "jemallocator", - "log 0.4.14", - "solana-metrics", - "solana-sdk", + "jemalloc-ctl", + "jemallocator", + "log", + "solana-metrics", + "solana-sdk", ] [[package]] name = "solana-metrics" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "env_logger", - "gethostname", - "lazy_static", - "log 0.4.14", - "reqwest", - "solana-sdk", + "env_logger", + "gethostname", + "lazy_static", + "log", + "reqwest", + "solana-sdk", ] [[package]] name = "solana-net-utils" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "bincode", - "clap", - "log 0.4.14", - "nix", - "rand 0.7.3", - "serde", - "serde_derive", - "socket2", - "solana-clap-utils", - "solana-logger 1.5.19", - "solana-version", - "tokio 0.3.7", - "url 2.2.2", + "bincode", + "clap", + "log", + "nix", + "rand 0.7.3", + "serde", + "serde_derive", + "socket2", + "solana-clap-utils", + "solana-logger 1.6.14", + "solana-version", + "tokio 1.1.1", + "url", ] [[package]] name = "solana-program" -version = "1.5.18" -dependencies = [ - "bincode", - "borsh", - "borsh-derive", - "bs58", - "bv", - "curve25519-dalek 2.1.2", - "hex", - "itertools 0.9.0", - "lazy_static", - "log 0.4.14", - "num-derive 0.3.3", - "num-traits", - "rand 0.7.3", - "rustc_version", - "rustversion", - "serde", - "serde_bytes", - "serde_derive", - "sha2 0.9.5", - "solana-frozen-abi 1.5.18", - "solana-frozen-abi-macro 1.5.18", - "solana-logger 1.5.18", - "solana-sdk-macro 1.5.18", - "thiserror", +version = "1.6.14" +dependencies = [ + "bincode", + "blake3", + "borsh", + "borsh-derive", + "bs58", + "bv", + "curve25519-dalek 2.1.0", + "hex", + "itertools 0.9.0", + "lazy_static", + "log", + "num-derive 0.3.0", + "num-traits", + "rand 0.7.3", + "rustc_version", + "rustversion", + "serde", + "serde_bytes", + "serde_derive", + "sha2 0.9.2", + "sha3", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", + "solana-sdk-macro 1.6.14", + "thiserror", ] [[package]] name = "solana-program" -version = "1.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632dc4e769b8dbb1884167737fd390cfa4266159ce37229e6ed64d61aba192d6" -dependencies = [ - "bincode", - "blake3", - "borsh", - "borsh-derive", - "bs58", - "bv", - "curve25519-dalek 2.1.2", - "hex", - "itertools 0.9.0", - "lazy_static", - "log 0.4.14", - "num-derive 0.3.3", - "num-traits", - "rand 0.7.3", - "rustc_version", - "rustversion", - "serde", - "serde_bytes", - "serde_derive", - "sha2 0.9.5", - "sha3 0.9.1", - "solana-frozen-abi 1.6.10", - "solana-frozen-abi-macro 1.6.10", - "solana-logger 1.6.10", - "solana-sdk-macro 1.6.10", - "thiserror", +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2df39c63e21c5b58e2012e7675bed5e8dd5100470ffddedcafb78f5a7e3abe" +dependencies = [ + "bincode", + "blake3", + "borsh", + "borsh-derive", + "bs58", + "bv", + "curve25519-dalek 2.1.0", + "hex", + "itertools 0.9.0", + "lazy_static", + "log", + "num-derive 0.3.0", + "num-traits", + "rand 0.7.3", + "rustc_version", + "rustversion", + "serde", + "serde_bytes", + "serde_derive", + "sha2 0.9.2", + "sha3", + "solana-frozen-abi 1.7.1", + "solana-frozen-abi-macro 1.7.1", + "solana-logger 1.7.1", + "solana-sdk-macro 1.7.1", + "thiserror", ] [[package]] name = "solana-rayon-threadlimit" -version = "1.5.19" -dependencies = ["lazy_static", "num_cpus"] +version = "1.6.14" +dependencies = [ + "lazy_static", + "num_cpus", +] [[package]] name = "solana-remote-wallet" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "base32", - "console 0.11.3", - "dialoguer", - "hidapi", - "log 0.4.14", - "num-derive 0.3.3", - "num-traits", - "parking_lot 0.10.2", - "semver 0.9.0", - "solana-sdk", - "thiserror", - "url 2.2.2", + "base32", + "console 0.11.3", + "dialoguer", + "hidapi", + "log", + "num-derive 0.3.0", + "num-traits", + "parking_lot 0.10.2", + "qstring", + "semver 0.9.0", + "solana-sdk", + "thiserror", + "uriparse", ] [[package]] name = "solana-runtime" -version = "1.5.19" -dependencies = [ - "arrayref", - "bincode", - "blake3", - "bv", - "byteorder 1.4.3", - "bzip2", - "crossbeam-channel 0.4.4", - "dashmap", - "dir-diff", - "evm-rpc", - "evm-state", - "flate2", - "fnv", - "fs_extra", - "itertools 0.9.0", - "lazy_static", - "libc", - "libloading 0.6.7", - "log 0.4.14", - "memmap2", - "num-derive 0.3.3", - "num-traits", - "num_cpus", - "ouroboros", - "rand 0.7.3", - "rayon", - "regex", - "rustc_version", - "serde", - "serde_derive", - "solana-config-program", - "solana-evm-loader-program", - "solana-frozen-abi 1.5.18", - "solana-frozen-abi-macro 1.5.18", - "solana-logger 1.5.18", - "solana-measure", - "solana-metrics", - "solana-rayon-threadlimit", - "solana-sdk", - "solana-secp256k1-program", - "solana-stake-program", - "solana-vote-program", - "symlink", - "tar", - "tempfile", - "thiserror", - "velas-account-program", - "zstd", +version = "1.6.14" +dependencies = [ + "arrayref", + "bincode", + "blake3", + "bv", + "byteorder 1.3.4", + "bzip2", + "crossbeam-channel 0.4.4", + "dashmap", + "dir-diff", + "flate2", + "fnv", + "itertools 0.9.0", + "lazy_static", + "libc", + "libloading", + "log", + "memmap2", + "num-derive 0.3.0", + "num-traits", + "num_cpus", + "ouroboros", + "rand 0.7.3", + "rayon", + "regex", + "rustc_version", + "serde", + "serde_derive", + "solana-config-program", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", + "solana-measure", + "solana-metrics", + "solana-rayon-threadlimit", + "solana-sdk", + "solana-secp256k1-program", + "solana-stake-program", + "solana-vote-program", + "symlink", + "tar", + "tempfile", + "thiserror", + "zstd", ] [[package]] name = "solana-sdk" -version = "1.5.19" -dependencies = [ - "assert_matches", - "bincode", - "bs58", - "bv", - "byteorder 1.4.3", - "chrono", - "digest 0.9.0", - "ed25519-dalek", - "evm-rpc", - "evm-state", - "generic-array 0.14.4", - "hex", - "hmac 0.10.1", - "itertools 0.9.0", - "keccak-hasher", - "lazy_static", - "libsecp256k1", - "log 0.4.14", - "memmap2", - "num-derive 0.3.3", - "num-traits", - "once_cell", - "pbkdf2 0.6.0", - "rand 0.7.3", - "rand_chacha 0.2.2", - "rlp", - "rustc_version", - "rustversion", - "serde", - "serde_bytes", - "serde_derive", - "serde_json", - "sha2 0.9.5", - "sha3 0.9.1", - "solana-crate-features", - "solana-frozen-abi 1.5.18", - "solana-frozen-abi-macro 1.5.18", - "solana-logger 1.5.18", - "solana-program 1.5.18", - "solana-sdk-macro 1.5.18", - "tempfile", - "thiserror", - "triehash", +version = "1.6.14" +dependencies = [ + "assert_matches", + "bincode", + "bs58", + "bv", + "byteorder 1.3.4", + "chrono", + "derivation-path", + "digest 0.9.0", + "ed25519-dalek", + "ed25519-dalek-bip32", + "generic-array 0.14.3", + "hex", + "hmac 0.10.1", + "itertools 0.9.0", + "lazy_static", + "libsecp256k1", + "log", + "memmap2", + "num-derive 0.3.0", + "num-traits", + "pbkdf2 0.6.0", + "qstring", + "rand 0.7.3", + "rand_chacha 0.2.2", + "rand_core 0.6.2", + "rustc_version", + "rustversion", + "serde", + "serde_bytes", + "serde_derive", + "serde_json", + "sha2 0.9.2", + "sha3", + "solana-crate-features", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", + "solana-program 1.6.14", + "solana-sdk-macro 1.6.14", + "thiserror", + "uriparse", ] [[package]] name = "solana-sdk-macro" -version = "1.5.18" +version = "1.6.14" dependencies = [ - "bs58", - "proc-macro2 1.0.27", - "quote 1.0.9", - "rustversion", - "syn 1.0.72", + "bs58", + "proc-macro2 1.0.24", + "quote 1.0.6", + "rustversion", + "syn 1.0.60", ] [[package]] name = "solana-sdk-macro" -version = "1.6.10" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6762b630db60c40e3efbb461cc945e0c5dfb7b0bcf719f76551f0f97f0c005" +checksum = "6473d8fa445520564c84e8803320721404d160ffd876a125326a726541f11534" dependencies = [ - "bs58", - "proc-macro2 1.0.27", - "quote 1.0.9", - "rustversion", - "syn 1.0.72", + "bs58", + "proc-macro2 1.0.24", + "quote 1.0.6", + "rustversion", + "syn 1.0.60", ] [[package]] name = "solana-secp256k1-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "bincode", - "digest 0.9.0", - "libsecp256k1", - "rand 0.7.3", - "sha3 0.9.1", - "solana-logger 1.5.18", - "solana-sdk", + "bincode", + "digest 0.9.0", + "libsecp256k1", + "rand 0.7.3", + "sha3", + "solana-logger 1.6.14", + "solana-sdk", ] [[package]] name = "solana-stake-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "bincode", - "log 0.4.14", - "num-derive 0.3.3", - "num-traits", - "rustc_version", - "serde", - "serde_derive", - "solana-config-program", - "solana-frozen-abi 1.5.19", - "solana-frozen-abi-macro 1.5.19", - "solana-metrics", - "solana-sdk", - "solana-vote-program", - "thiserror", + "bincode", + "log", + "num-derive 0.3.0", + "num-traits", + "rustc_version", + "serde", + "serde_derive", + "solana-config-program", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", + "solana-metrics", + "solana-sdk", + "solana-vote-program", + "thiserror", ] [[package]] name = "solana-transaction-status" -version = "1.5.19" -dependencies = [ - "Inflector", - "base64 0.12.3", - "bincode", - "bs58", - "lazy_static", - "serde", - "serde_derive", - "serde_json", - "solana-account-decoder", - "solana-runtime", - "solana-sdk", - "solana-stake-program", - "solana-vote-program", - "spl-associated-token-account", - "spl-memo 2.0.1", - "spl-memo 3.0.0", - "spl-token", - "thiserror", +version = "1.6.14" +dependencies = [ + "Inflector", + "base64 0.12.3", + "bincode", + "bs58", + "lazy_static", + "serde", + "serde_derive", + "serde_json", + "solana-account-decoder", + "solana-runtime", + "solana-sdk", + "solana-stake-program", + "solana-vote-program", + "spl-associated-token-account", + "spl-memo", + "spl-token", + "thiserror", ] [[package]] name = "solana-version" -version = "0.3.6" +version = "1.6.14" dependencies = [ - "log 0.4.14", - "rustc_version", - "serde", - "serde_derive", - "solana-frozen-abi 1.5.19", - "solana-frozen-abi-macro 1.5.19", - "solana-logger 1.5.19", - "solana-sdk", + "log", + "rustc_version", + "serde", + "serde_derive", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", + "solana-sdk", ] [[package]] name = "solana-vote-program" -version = "1.5.19" +version = "1.6.14" dependencies = [ - "bincode", - "log 0.4.14", - "num-derive 0.3.3", - "num-traits", - "rustc_version", - "serde", - "serde_derive", - "solana-frozen-abi 1.5.19", - "solana-frozen-abi-macro 1.5.19", - "solana-logger 1.5.19", - "solana-metrics", - "solana-sdk", - "thiserror", + "bincode", + "log", + "num-derive 0.3.0", + "num-traits", + "rustc_version", + "serde", + "serde_derive", + "solana-frozen-abi 1.6.14", + "solana-frozen-abi-macro 1.6.14", + "solana-logger 1.6.14", + "solana-metrics", + "solana-sdk", + "thiserror", ] [[package]] name = "solana_rbpf" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14e36c51d5aa290416c5dea3c43ac467cb57c0b643184af23e6bdab7434710fb" +checksum = "debbc13545a1d972955a4fd3014e7c9d6d81da16c3626ee5f64bf3aa619548f8" dependencies = [ - "byteorder 1.4.3", - "combine", - "goblin", - "hash32", - "libc", - "log 0.4.14", - "rand 0.7.3", - "scroll", - "thiserror", - "time", + "byteorder 1.3.4", + "combine", + "goblin", + "hash32", + "libc", + "log", + "rand 0.7.3", + "scroll", + "thiserror", + "time", ] [[package]] @@ -3781,34 +3595,32 @@ name = "spl-associated-token-account" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4adc47eebe5d2b662cbaaba1843719c28a67e5ec5d0460bc3ca60900a51f74e2" -dependencies = ["solana-program 1.6.5", "spl-token"] - -[[package]] -name = "spl-memo" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb2b771f6146dec14ef5fbf498f9374652c54badc3befc8c40c1d426dd45d720" -dependencies = ["solana-program 1.6.10"] +dependencies = [ + "solana-program 1.7.1", + "spl-token", +] [[package]] name = "spl-memo" -version = "3.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e76b60c6f58279b5469beb1705744e9778ee94d643c8e3e2ff91874c59bb3c63" -dependencies = ["solana-program 1.6.10"] +checksum = "bd0dc6f70db6bacea7ff25870b016a65ba1d1b6013536f08e4fd79a8f9005325" +dependencies = [ + "solana-program 1.7.1", +] [[package]] name = "spl-token" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b795e50d15dfd35aa5460b80a16414503a322be115a417a43db987c5824c6798" +checksum = "fbfa8fd791aeb4d7ad5fedb7872478de9f4e8b4fcb02dfd9e7f2f9ae3f3ddd73" dependencies = [ - "arrayref", - "num-derive 0.3.3", - "num-traits", - "num_enum", - "solana-program 1.6.10", - "thiserror", + "arrayref", + "num-derive 0.3.0", + "num-traits", + "num_enum", + "solana-program 1.7.1", + "thiserror", ] [[package]] @@ -3817,19 +3629,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "string" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" -dependencies = ["bytes 0.4.12"] - [[package]] name = "strsim" version = "0.8.0" @@ -3844,9 +3643,9 @@ checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" [[package]] name = "subtle" -version = "2.4.0" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" +checksum = "7c65d530b10ccaeac294f349038a597e435b18fb456aadd0840a623f83b9e941" [[package]] name = "symlink" @@ -3859,33 +3658,46 @@ name = "syn" version = "0.15.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -dependencies = ["proc-macro2 0.4.30", "quote 0.6.13", "unicode-xid 0.1.0"] +dependencies = [ + "proc-macro2 0.4.30", + "quote 0.6.13", + "unicode-xid 0.1.0", +] [[package]] name = "syn" -version = "1.0.72" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e8cdbefb79a9a5a65e0db8b47b723ee907b7c7f8496c76a1770b5c310bab82" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "unicode-xid 0.2.2"] +checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.6", + "unicode-xid 0.2.0", +] [[package]] name = "synstructure" -version = "0.12.4" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ - "proc-macro2 1.0.27", - "quote 1.0.9", - "syn 1.0.72", - "unicode-xid 0.2.2", + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", + "unicode-xid 0.2.0", ] [[package]] name = "tar" -version = "0.4.33" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0bcfbd6a598361fda270d82469fff3d65089dc33e175c9a131f7b4cd395f228" -dependencies = ["filetime", "libc", "xattr"] +checksum = "c8a4c1d0bee3230179544336c15eefb563cf0302955d962e456542323e8c2e8a" +dependencies = [ + "filetime", + "libc", + "redox_syscall 0.1.56", + "xattr", +] [[package]] name = "tempfile" @@ -3893,92 +3705,116 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 1.0.0", - "libc", - "rand 0.8.3", - "redox_syscall 0.2.8", - "remove_dir_all", - "winapi 0.3.9", + "cfg-if 1.0.0", + "libc", + "rand 0.8.2", + "redox_syscall 0.2.4", + "remove_dir_all", + "winapi 0.3.8", ] [[package]] name = "termcolor" -version = "1.1.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" -dependencies = ["winapi-util"] +checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" +dependencies = [ + "winapi-util", +] [[package]] name = "terminal_size" -version = "0.1.17" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" -dependencies = ["libc", "winapi 0.3.9"] +checksum = "4bd2d183bd3fac5f5fe38ddbeb4dc9aec4a39a9d7d59e7491d900302da01cbe1" +dependencies = [ + "libc", + "winapi 0.3.8", +] [[package]] name = "termios" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "411c5bf740737c7918b8b1fe232dca4dc9f8e754b8ad5e20966814001ed0ac6b" -dependencies = ["libc"] +dependencies = [ + "libc", +] [[package]] name = "textwrap" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = ["unicode-width"] +dependencies = [ + "unicode-width", +] [[package]] name = "thiserror" -version = "1.0.25" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa6f76457f59514c7eeb4e59d891395fab0b2fd1d40723ae737d64153392e9c6" -dependencies = ["thiserror-impl"] +checksum = "76cc616c6abf8c8928e2fdcc0dbfab37175edd8fb49a4641066ad1364fdab146" +dependencies = [ + "thiserror-impl", +] [[package]] name = "thiserror-impl" -version = "1.0.25" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", +] + +[[package]] +name = "thread_local" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a36768c0fbf1bb15eca10defa29526bda730a2376c2ab4393ccfa16fb1a318d" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] +checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +dependencies = [ + "lazy_static", +] [[package]] name = "time" version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = ["libc", "winapi 0.3.9"] +dependencies = [ + "libc", + "winapi 0.3.8", +] [[package]] name = "tiny-bip39" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0165e045cc2ae1660270ca65e1676dbaab60feb0f91b10f7d0665e9b47e31f2" +checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" dependencies = [ - "failure", - "hmac 0.7.1", - "once_cell", - "pbkdf2 0.3.0", - "rand 0.7.3", - "rustc-hash", - "sha2 0.8.2", - "unicode-normalization", + "anyhow", + "hmac 0.8.1", + "once_cell", + "pbkdf2 0.4.0", + "rand 0.7.3", + "rustc-hash", + "sha2 0.9.2", + "thiserror", + "unicode-normalization", + "zeroize", ] -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = ["crunchy"] - [[package]] name = "tinyvec" -version = "1.2.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" -dependencies = ["tinyvec_macros"] +checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" +dependencies = [ + "tinyvec_macros", +] [[package]] name = "tinyvec_macros" @@ -3992,112 +3828,107 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" dependencies = [ - "bytes 0.4.12", - "futures", - "mio 0.6.23", - "num_cpus", - "tokio-codec", - "tokio-current-thread", - "tokio-executor", - "tokio-fs", - "tokio-io", - "tokio-reactor", - "tokio-sync", - "tokio-tcp", - "tokio-threadpool", - "tokio-timer", - "tokio-udp", - "tokio-uds", + "bytes 0.4.12", + "futures 0.1.29", + "mio 0.6.22", + "num_cpus", + "tokio-codec", + "tokio-current-thread", + "tokio-executor", + "tokio-fs", + "tokio-io", + "tokio-reactor", + "tokio-sync", + "tokio-tcp", + "tokio-threadpool", + "tokio-timer", + "tokio-udp", + "tokio-uds", ] [[package]] name = "tokio" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", - "memchr", - "mio 0.6.23", - "num_cpus", - "pin-project-lite 0.1.12", - "slab", -] - -[[package]] -name = "tokio" -version = "0.3.7" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46409491c9375a693ce7032101970a54f8a2010efb77e13f70788f0d84489e39" +checksum = "6714d663090b6b0acb0fa85841c6d66233d150cdb2602c8f9b8abb03370beb3f" dependencies = [ - "autocfg 1.0.1", - "bytes 0.6.0", - "futures-core", - "libc", - "memchr", - "mio 0.7.11", - "num_cpus", - "once_cell", - "parking_lot 0.11.1", - "pin-project-lite 0.2.6", - "signal-hook-registry", - "slab", - "tokio-macros", - "winapi 0.3.9", + "autocfg", + "bytes 1.0.1", + "libc", + "memchr", + "mio 0.7.7", + "num_cpus", + "once_cell", + "parking_lot 0.11.1", + "pin-project-lite 0.2.4", + "signal-hook-registry", + "tokio-macros", + "winapi 0.3.8", ] -[[package]] -name = "tokio-buf" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" -dependencies = ["bytes 0.4.12", "either", "futures"] - [[package]] name = "tokio-codec" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" -dependencies = ["bytes 0.4.12", "futures", "tokio-io"] +dependencies = [ + "bytes 0.4.12", + "futures 0.1.29", + "tokio-io", +] [[package]] name = "tokio-current-thread" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" -dependencies = ["futures", "tokio-executor"] +dependencies = [ + "futures 0.1.29", + "tokio-executor", +] [[package]] name = "tokio-executor" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" -dependencies = ["crossbeam-utils 0.7.2", "futures"] +dependencies = [ + "crossbeam-utils 0.7.2", + "futures 0.1.29", +] [[package]] name = "tokio-fs" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" -dependencies = ["futures", "tokio-io", "tokio-threadpool"] +dependencies = [ + "futures 0.1.29", + "tokio-io", + "tokio-threadpool", +] [[package]] name = "tokio-io" version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" -dependencies = ["bytes 0.4.12", "futures", "log 0.4.14"] +dependencies = [ + "bytes 0.4.12", + "futures 0.1.29", + "log", +] [[package]] name = "tokio-macros" -version = "0.3.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46dfffa59fc3c8aad216ed61bdc2c263d2b9d87a9c8ac9de0c11a813e51b6db7" -dependencies = ["proc-macro2 1.0.27", "quote 1.0.9", "syn 1.0.72"] +checksum = "42517d2975ca3114b22a16192634e8241dc5cc1f130be194645970cc1c371494" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", +] [[package]] name = "tokio-reactor" @@ -4105,32 +3936,39 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" dependencies = [ - "crossbeam-utils 0.7.2", - "futures", - "lazy_static", - "log 0.4.14", - "mio 0.6.23", - "num_cpus", - "parking_lot 0.9.0", - "slab", - "tokio-executor", - "tokio-io", - "tokio-sync", + "crossbeam-utils 0.7.2", + "futures 0.1.29", + "lazy_static", + "log", + "mio 0.6.22", + "num_cpus", + "parking_lot 0.9.0", + "slab", + "tokio-executor", + "tokio-io", + "tokio-sync", ] [[package]] name = "tokio-rustls" -version = "0.14.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" -dependencies = ["futures-core", "rustls", "tokio 0.2.25", "webpki"] +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +dependencies = [ + "rustls", + "tokio 1.1.1", + "webpki", +] [[package]] name = "tokio-sync" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" -dependencies = ["fnv", "futures"] +dependencies = [ + "fnv", + "futures 0.1.29", +] [[package]] name = "tokio-tcp" @@ -4138,12 +3976,12 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" dependencies = [ - "bytes 0.4.12", - "futures", - "iovec", - "mio 0.6.23", - "tokio-io", - "tokio-reactor", + "bytes 0.4.12", + "futures 0.1.29", + "iovec", + "mio 0.6.22", + "tokio-io", + "tokio-reactor", ] [[package]] @@ -4152,15 +3990,15 @@ version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" dependencies = [ - "crossbeam-deque 0.7.3", - "crossbeam-queue", - "crossbeam-utils 0.7.2", - "futures", - "lazy_static", - "log 0.4.14", - "num_cpus", - "slab", - "tokio-executor", + "crossbeam-deque 0.7.3", + "crossbeam-queue", + "crossbeam-utils 0.7.2", + "futures 0.1.29", + "lazy_static", + "log", + "num_cpus", + "slab", + "tokio-executor", ] [[package]] @@ -4168,14 +4006,12 @@ name = "tokio-timer" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" -dependencies = ["crossbeam-utils 0.7.2", "futures", "slab", "tokio-executor"] - -[[package]] -name = "tokio-tls" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "354b8cd83825b3c20217a9dc174d6a0c67441a2fae5c41bcb1ea6679f6ae0f7c" -dependencies = ["futures", "native-tls", "tokio-io"] +dependencies = [ + "crossbeam-utils 0.7.2", + "futures 0.1.29", + "slab", + "tokio-executor", +] [[package]] name = "tokio-udp" @@ -4183,45 +4019,45 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" dependencies = [ - "bytes 0.4.12", - "futures", - "log 0.4.14", - "mio 0.6.23", - "tokio-codec", - "tokio-io", - "tokio-reactor", + "bytes 0.4.12", + "futures 0.1.29", + "log", + "mio 0.6.22", + "tokio-codec", + "tokio-io", + "tokio-reactor", ] [[package]] name = "tokio-uds" -version = "0.2.7" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" +checksum = "5076db410d6fdc6523df7595447629099a1fdc47b3d9f896220780fa48faf798" dependencies = [ - "bytes 0.4.12", - "futures", - "iovec", - "libc", - "log 0.4.14", - "mio 0.6.23", - "mio-uds", - "tokio-codec", - "tokio-io", - "tokio-reactor", + "bytes 0.4.12", + "futures 0.1.29", + "iovec", + "libc", + "log", + "mio 0.6.22", + "mio-uds", + "tokio-codec", + "tokio-io", + "tokio-reactor", ] [[package]] name = "tokio-util" -version = "0.3.1" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +checksum = "ec31e5cc6b46e653cf57762f36f71d5e6386391d88a72fd6db4508f8f676fb29" dependencies = [ - "bytes 0.5.6", - "futures-core", - "futures-sink", - "log 0.4.14", - "pin-project-lite 0.1.12", - "tokio 0.2.25", + "bytes 1.0.1", + "futures-core", + "futures-sink", + "log", + "pin-project-lite 0.2.4", + "tokio 1.1.1", ] [[package]] @@ -4229,64 +4065,41 @@ name = "toml" version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" -dependencies = ["serde"] +dependencies = [ + "serde", +] [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.26" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" +checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" dependencies = [ - "cfg-if 1.0.0", - "log 0.4.14", - "pin-project-lite 0.2.6", - "tracing-core", + "cfg-if 0.1.10", + "pin-project-lite 0.1.5", + "tracing-core", ] [[package]] name = "tracing-core" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9ff14f98b1a4b289c6248a023c1c2fa1491062964e9fed67ab29c4e4da4a052" -dependencies = ["lazy_static"] - -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = ["pin-project", "tracing"] - -[[package]] -name = "traitobject" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" - -[[package]] -name = "triedb" -version = "0.5.0" -source = "git+https://github.com/velas/triedb?branch=chore/bump-rocksdb#eba324ad94551de29145023d58d31a4a86def84e" -dependencies = ["primitive-types", "rlp", "rocksdb", "sha3 0.9.1", "thiserror"] - -[[package]] -name = "triehash" -version = "0.8.4" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1631b201eb031b563d2e85ca18ec8092508e262a3196ce9bd10a67ec87b9f5c" -dependencies = ["hash-db", "rlp"] +checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +dependencies = [ + "lazy_static", +] [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" [[package]] name = "tungstenite" @@ -4294,31 +4107,25 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfea31758bf674f990918962e8e5f07071a3161bd7c4138ed23e416e1ac4264e" dependencies = [ - "base64 0.11.0", - "byteorder 1.4.3", - "bytes 0.5.6", - "http 0.2.4", - "httparse", - "input_buffer", - "log 0.4.14", - "native-tls", - "rand 0.7.3", - "sha-1", - "url 2.2.2", - "utf-8", + "base64 0.11.0", + "byteorder 1.3.4", + "bytes 0.5.4", + "http", + "httparse", + "input_buffer", + "log", + "native-tls", + "rand 0.7.3", + "sha-1", + "url", + "utf-8", ] -[[package]] -name = "typeable" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" - [[package]] name = "typenum" -version = "1.13.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" +checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" [[package]] name = "ucd-trie" @@ -4326,40 +4133,23 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" -[[package]] -name = "uint" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e11fe9a9348741cf134085ad57c249508345fe16411b3d7fb4ff2da2f1d6382e" -dependencies = ["byteorder 1.4.3", "crunchy", "hex", "static_assertions"] - -[[package]] -name = "unicase" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" -dependencies = ["version_check 0.1.5"] - -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = ["version_check 0.9.3"] - [[package]] name = "unicode-bidi" -version = "0.3.5" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" -dependencies = ["matches"] +checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +dependencies = [ + "matches", +] [[package]] name = "unicode-normalization" -version = "0.1.17" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" -dependencies = ["tinyvec"] +checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" +dependencies = [ + "tinyvec", +] [[package]] name = "unicode-width" @@ -4375,16 +4165,18 @@ checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" [[package]] name = "unicode-xid" -version = "0.2.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" [[package]] name = "unreachable" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" -dependencies = ["void"] +dependencies = [ + "void", +] [[package]] name = "untrusted" @@ -4397,38 +4189,34 @@ name = "uriparse" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e515b1ada404168e145ac55afba3c42f04cf972201a8552d42e2abb17c1b7221" -dependencies = ["fnv", "lazy_static"] - -[[package]] -name = "url" -version = "1.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" -dependencies = ["idna 0.1.5", "matches", "percent-encoding 1.0.1"] +dependencies = [ + "fnv", + "lazy_static", +] [[package]] name = "url" -version = "2.2.2" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" dependencies = [ - "form_urlencoded", - "idna 0.2.3", - "matches", - "percent-encoding 2.1.0", + "form_urlencoded", + "idna", + "matches", + "percent-encoding", ] [[package]] name = "utf-8" -version = "0.7.6" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +checksum = "05e42f7c18b8f902290b009cde6d651262f956c98bc51bca4cd1d511c9cd85c7" [[package]] name = "vcpkg" -version = "0.2.13" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "025ce40a007e1907e58d5bc1a594def78e5573bb0b1160bc389634e8f12e4faa" +checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" [[package]] name = "vec_map" @@ -4436,28 +4224,11 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" -[[package]] -name = "velas-account-program" -version = "0.0.1" -dependencies = [ - "borsh", - "borsh-derive", - "rustc_version", - "serde", - "solana-sdk", -] - -[[package]] -name = "version_check" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" - [[package]] name = "version_check" -version = "0.9.3" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" [[package]] name = "void" @@ -4467,24 +4238,24 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "walkdir" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" -dependencies = ["same-file", "winapi 0.3.9", "winapi-util"] - -[[package]] -name = "want" -version = "0.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" -dependencies = ["futures", "log 0.4.14", "try-lock"] +checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" +dependencies = [ + "same-file", + "winapi 0.3.8", + "winapi-util", +] [[package]] name = "want" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -dependencies = ["log 0.4.14", "try-lock"] +dependencies = [ + "log", + "try-lock", +] [[package]] name = "wasi" @@ -4494,125 +4265,105 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.10.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" [[package]] name = "wasm-bindgen" -version = "0.2.74" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd" -dependencies = ["cfg-if 1.0.0", "serde", "serde_json", "wasm-bindgen-macro"] +checksum = "8fe8f61dba8e5d645a4d8132dc7a0a66861ed5e1045d2c0ed940fab33bac0fbe" +dependencies = [ + "cfg-if 1.0.0", + "serde", + "serde_json", + "wasm-bindgen-macro", +] [[package]] name = "wasm-bindgen-backend" -version = "0.2.74" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b33f6a0694ccfea53d94db8b2ed1c3a8a4c86dd936b13b9f0a15ec4a451b900" +checksum = "046ceba58ff062da072c7cb4ba5b22a37f00a302483f7e2a6cdc18fedbdc1fd3" dependencies = [ - "bumpalo", - "lazy_static", - "log 0.4.14", - "proc-macro2 1.0.27", - "quote 1.0.9", - "syn 1.0.72", - "wasm-bindgen-shared", + "bumpalo", + "lazy_static", + "log", + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.24" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fba7978c679d53ce2d0ac80c8c175840feb849a161664365d1287b41f2e67f1" -dependencies = ["cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys"] +checksum = "73157efb9af26fb564bb59a009afd1c7c334a44db171d280690d0c3faaec3468" +dependencies = [ + "cfg-if 1.0.0", + "js-sys", + "wasm-bindgen", + "web-sys", +] [[package]] name = "wasm-bindgen-macro" -version = "0.2.74" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "088169ca61430fe1e58b8096c24975251700e7b1f6fd91cc9d59b04fb9b18bd4" -dependencies = ["quote 1.0.9", "wasm-bindgen-macro-support"] +checksum = "0ef9aa01d36cda046f797c57959ff5f3c615c9cc63997a8d545831ec7976819b" +dependencies = [ + "quote 1.0.6", + "wasm-bindgen-macro-support", +] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.74" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be2241542ff3d9f241f5e2cb6dd09b37efe786df8851c54957683a49f0987a97" +checksum = "96eb45c1b2ee33545a813a92dbb53856418bf7eb54ab34f7f7ff1448a5b3735d" dependencies = [ - "proc-macro2 1.0.27", - "quote 1.0.9", - "syn 1.0.72", - "wasm-bindgen-backend", - "wasm-bindgen-shared", + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", + "wasm-bindgen-backend", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.74" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cff876b8f18eed75a66cf49b65e7f967cb354a7aa16003fb55dbfd25b44b4f" +checksum = "b7148f4696fb4960a346eaa60bbfb42a1ac4ebba21f750f75fc1375b098d5ffa" [[package]] name = "web-sys" -version = "0.3.51" +version = "0.3.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e828417b379f3df7111d3a2a9e5753706cae29c41f7c4029ee9fd77f3e09e582" -dependencies = ["js-sys", "wasm-bindgen"] +checksum = "7b72fe77fd39e4bd3eaa4412fd299a0be6b3dfe9d2597e2f1c20beb968f41d17" +dependencies = [ + "js-sys", + "wasm-bindgen", +] [[package]] name = "webpki" version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1f50e1972865d6b1adb54167d1c8ed48606004c2c9d0ea5f1eeb34d95e863ef" -dependencies = ["ring", "untrusted"] - -[[package]] -name = "webpki-roots" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f20dea7535251981a9670857150d571846545088359b28e4951d350bdaf179f" -dependencies = ["webpki"] - -[[package]] -name = "websocket" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413b37840b9e27b340ce91b319ede10731de8c72f5bc4cb0206ec1ca4ce581d0" dependencies = [ - "bytes 0.4.12", - "futures", - "hyper 0.10.16", - "native-tls", - "rand 0.6.1", - "tokio-codec", - "tokio-io", - "tokio-reactor", - "tokio-tcp", - "tokio-tls", - "unicase 1.4.2", - "url 1.7.2", - "websocket-base", + "ring", + "untrusted", ] [[package]] -name = "websocket-base" -version = "0.24.0" +name = "webpki-roots" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e3810f0d00c4dccb54c30a4eee815e703232819dec7b007db115791c42aa374" +checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" dependencies = [ - "base64 0.10.1", - "bitflags", - "byteorder 1.4.3", - "bytes 0.4.12", - "futures", - "native-tls", - "rand 0.6.1", - "sha1", - "tokio-codec", - "tokio-io", - "tokio-tcp", - "tokio-tls", + "webpki", ] [[package]] @@ -4623,10 +4374,13 @@ checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" [[package]] name = "winapi" -version = "0.3.9" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = ["winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu"] +checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] [[package]] name = "winapi-build" @@ -4645,7 +4399,9 @@ name = "winapi-util" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = ["winapi 0.3.9"] +dependencies = [ + "winapi 0.3.8", +] [[package]] name = "winapi-x86_64-pc-windows-gnu" @@ -4658,65 +4414,86 @@ name = "winreg" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" -dependencies = ["winapi 0.3.9"] +dependencies = [ + "winapi 0.3.8", +] [[package]] name = "ws2_32-sys" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = ["winapi 0.2.8", "winapi-build"] +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] [[package]] name = "xattr" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "244c3741f4240ef46274860397c7c74e50eb23624996930e484c16679633a54c" -dependencies = ["libc"] +dependencies = [ + "libc", +] [[package]] name = "yaml-rust" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = ["linked-hash-map"] +dependencies = [ + "linked-hash-map", +] [[package]] name = "zeroize" -version = "1.3.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" -dependencies = ["zeroize_derive"] +checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36" +dependencies = [ + "zeroize_derive", +] [[package]] name = "zeroize_derive" -version = "1.1.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" +checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" dependencies = [ - "proc-macro2 1.0.27", - "quote 1.0.9", - "syn 1.0.72", - "synstructure", + "proc-macro2 1.0.24", + "quote 1.0.6", + "syn 1.0.60", + "synstructure", ] [[package]] name = "zstd" -version = "0.5.4+zstd.1.4.7" +version = "0.5.3+zstd.1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" -dependencies = ["zstd-safe"] +checksum = "01b32eaf771efa709e8308605bbf9319bf485dc1503179ec0469b611937c0cd8" +dependencies = [ + "zstd-safe", +] [[package]] name = "zstd-safe" -version = "2.0.6+zstd.1.4.7" +version = "2.0.5+zstd.1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" -dependencies = ["libc", "zstd-sys"] +checksum = "1cfb642e0d27f64729a639c52db457e0ae906e7bc6f5fe8f5c453230400f1055" +dependencies = [ + "libc", + "zstd-sys", +] [[package]] name = "zstd-sys" -version = "1.4.18+zstd.1.4.7" +version = "1.4.17+zstd.1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" -dependencies = ["cc", "glob", "itertools 0.9.0", "libc"] +checksum = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b" +dependencies = [ + "cc", + "glob", + "itertools 0.9.0", + "libc", +] diff --git a/programs/bpf/Cargo.toml b/programs/bpf/Cargo.toml index c369cdddb2..20f92c132c 100644 --- a/programs/bpf/Cargo.toml +++ b/programs/bpf/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-bpf-programs" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" documentation = "https://docs.rs/solana" homepage = "https://solana.com/" readme = "README.md" @@ -25,14 +25,16 @@ elf = "0.0.10" itertools = "0.10.0" miow = "0.2.2" net2 = "0.2.37" -solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.5.19" } -solana-cli-output = { path = "../../cli-output", version = "=1.5.19" } -solana-logger = { path = "../../logger", version = "=1.5.19" } -solana-measure = { path = "../../measure", version = "=1.5.19" } -solana_rbpf = "=0.2.7" -solana-runtime = { path = "../../runtime", version = "=1.5.19" } -solana-sdk = { path = "../../sdk", version = "=1.5.19" } -solana-transaction-status = { path = "../../transaction-status", version = "=1.5.19" } +solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.6.14" } +solana-cli-output = { path = "../../cli-output", version = "=1.6.14" } +solana-logger = { path = "../../logger", version = "=1.6.14" } +solana-measure = { path = "../../measure", version = "=1.6.14" } +solana_rbpf = "=0.2.9" +solana-runtime = { path = "../../runtime", version = "=1.6.14" } +solana-sdk = { path = "../../sdk", version = "=1.6.14" } +solana-transaction-status = { path = "../../transaction-status", version = "=1.6.14" } +solana-account-decoder = { path = "../../account-decoder", version = "=1.6.14" } + [[bench]] name = "bpf_loader" @@ -50,6 +52,7 @@ members = [ "rust/dup_accounts", "rust/error_handling", "rust/external_spend", + "rust/finalize", "rust/instruction_introspection", "rust/invoke", "rust/invoke_and_error", @@ -65,13 +68,12 @@ members = [ "rust/param_passing", "rust/param_passing_dep", "rust/rand", - "rust/ristretto", "rust/ro_modify", "rust/sanity", - "rust/sha256", + "rust/sha", "rust/spoof1", "rust/spoof1_system", - "rust/sysval", + "rust/sysvar", "rust/upgradeable", "rust/upgraded", ] diff --git a/programs/bpf/benches/bpf_loader.rs b/programs/bpf/benches/bpf_loader.rs index 23e784c9d1..a52d8f03e5 100644 --- a/programs/bpf/benches/bpf_loader.rs +++ b/programs/bpf/benches/bpf_loader.rs @@ -19,7 +19,7 @@ use solana_runtime::{ loader_utils::load_program, }; use solana_sdk::{ - account::Account, + account::AccountSharedData, bpf_loader, client::SyncClient, entrypoint::SUCCESS, @@ -75,9 +75,12 @@ fn bench_program_create_executable(bencher: &mut Bencher) { let elf = load_elf("bench_alu").unwrap(); bencher.iter(|| { - let _ = - Executable::::from_elf(&elf, None, Config::default()) - .unwrap(); + let _ = >::from_elf( + &elf, + None, + Config::default(), + ) + .unwrap(); }); } @@ -95,7 +98,7 @@ fn bench_program_alu(bencher: &mut Bencher) { let elf = load_elf("bench_alu").unwrap(); let mut executable = - Executable::::from_elf(&elf, None, Config::default()) + >::from_elf(&elf, None, Config::default()) .unwrap(); executable.set_syscall_registry(register_syscalls(&mut invoke_context).unwrap()); executable.jit_compile().unwrap(); @@ -175,7 +178,8 @@ fn bench_program_execute_noop(bencher: &mut Bencher) { let mint_pubkey = mint_keypair.pubkey(); let account_metas = vec![AccountMeta::new(mint_pubkey, true)]; - let instruction = Instruction::new(invoke_program_id, &[u8::MAX, 0, 0, 0], account_metas); + let instruction = + Instruction::new_with_bincode(invoke_program_id, &[u8::MAX, 0, 0, 0], account_metas); let message = Message::new(&[instruction], Some(&mint_pubkey)); bank_client @@ -197,7 +201,7 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { let mut invoke_context = MockInvokeContext::default(); invoke_context.compute_meter.remaining = BUDGET; - let accounts = [RefCell::new(Account::new( + let accounts = [RefCell::new(AccountSharedData::new( 1, 10000001, &solana_sdk::pubkey::new_rand(), @@ -221,7 +225,7 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { let elf = load_elf("tuner").unwrap(); let mut executable = - Executable::::from_elf(&elf, None, Config::default()) + >::from_elf(&elf, None, Config::default()) .unwrap(); executable.set_syscall_registry(register_syscalls(&mut invoke_context).unwrap()); let compute_meter = invoke_context.get_compute_meter(); @@ -229,7 +233,7 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { let mut vm = create_vm( &loader_id, executable.as_ref(), - &mut serialized, + serialized.as_slice_mut(), &[], &mut invoke_context, ) diff --git a/programs/bpf/build.rs b/programs/bpf/build.rs index f7fa029c80..8304d3d2c8 100644 --- a/programs/bpf/build.rs +++ b/programs/bpf/build.rs @@ -68,6 +68,7 @@ fn main() { "dup_accounts", "error_handling", "external_spend", + "finalize", "instruction_introspection", "invoke", "invoke_and_error", @@ -81,13 +82,12 @@ fn main() { "panic", "param_passing", "rand", - "ristretto", "ro_modify", "sanity", - "sha256", + "sha", "spoof1", "spoof1_system", - "sysval", + "sysvar", "upgradeable", "upgraded", ]; diff --git a/programs/bpf/c/src/alloc/alloc.c b/programs/bpf/c/src/alloc/alloc.c index 190e15ea05..f7b3b93bbb 100644 --- a/programs/bpf/c/src/alloc/alloc.c +++ b/programs/bpf/c/src/alloc/alloc.c @@ -7,7 +7,7 @@ extern uint64_t entrypoint(const uint8_t *input) { { // Confirm large allocation fails - void *ptr = sol_calloc(1, UINT64_MAX); // TODO use max + void *ptr = sol_calloc(1, UINT64_MAX); if (ptr != NULL) { sol_log("Error: Alloc of very larger buffer should fail"); sol_panic(); @@ -16,7 +16,7 @@ extern uint64_t entrypoint(const uint8_t *input) { { // Confirm large allocation fails - void *ptr = sol_calloc(18446744073709551615U, 1); // TODO use max + void *ptr = sol_calloc(UINT64_MAX, 1); if (ptr != NULL) { sol_log("Error: Alloc of very larger buffer should fail"); sol_panic(); diff --git a/programs/bpf/c/src/dup_accounts/dup_accounts.c b/programs/bpf/c/src/dup_accounts/dup_accounts.c index b3914592ab..4f33ff8bec 100644 --- a/programs/bpf/c/src/dup_accounts/dup_accounts.c +++ b/programs/bpf/c/src/dup_accounts/dup_accounts.c @@ -1,5 +1,5 @@ /** - * @brief Example C-based BPF program that exercises duplicate keyed ka + * @brief Example C-based BPF program that exercises duplicate keyed accounts * passed to it */ #include @@ -9,46 +9,87 @@ */ extern uint64_t entrypoint(const uint8_t *input) { - SolAccountInfo ka[4]; - SolParameters params = (SolParameters) { .ka = ka }; + SolAccountInfo accounts[5]; + SolParameters params = (SolParameters){.ka = accounts}; - if (!sol_deserialize(input, ¶ms, SOL_ARRAY_SIZE(ka))) { + if (!sol_deserialize(input, ¶ms, SOL_ARRAY_SIZE(accounts))) { return ERROR_INVALID_ARGUMENT; } switch (params.data[0]) { - case(1): - sol_log("modify first account data"); - ka[2].data[0] = 1; - break; - case(2): - sol_log("modify first account data"); - ka[3].data[0] = 2; - break; - case(3): - sol_log("modify both account data"); - ka[2].data[0] += 1; - ka[3].data[0] += 2; - break; - case(4): - sol_log("modify first account lamports"); - *ka[1].lamports -= 1; - *ka[2].lamports += 1; - break; - case(5): - sol_log("modify first account lamports"); - *ka[1].lamports -= 2; - *ka[3].lamports += 2; - break; - case(6): - sol_log("modify both account lamports"); - *ka[1].lamports -= 3; - *ka[2].lamports += 1; - *ka[3].lamports += 2; - break; - default: - sol_log("Unrecognized command"); - return ERROR_INVALID_INSTRUCTION_DATA; + case (1): + sol_log("modify first account data"); + accounts[2].data[0] = 1; + break; + case (2): + sol_log("modify first account data"); + accounts[3].data[0] = 2; + break; + case (3): + sol_log("modify both account data"); + accounts[2].data[0] += 1; + accounts[3].data[0] += 2; + break; + case (4): + sol_log("modify first account lamports"); + *accounts[1].lamports -= 1; + *accounts[2].lamports += 1; + break; + case (5): + sol_log("modify first account lamports"); + *accounts[1].lamports -= 2; + *accounts[3].lamports += 2; + break; + case (6): + sol_log("modify both account lamports"); + *accounts[1].lamports -= 3; + *accounts[2].lamports += 1; + *accounts[3].lamports += 2; + break; + case (7): + sol_log("check account (0,1,2,3) privs"); + sol_assert(accounts[0].is_signer); + sol_assert(!accounts[1].is_signer); + sol_assert(accounts[2].is_signer); + sol_assert(accounts[3].is_signer); + + sol_assert(accounts[0].is_writable); + sol_assert(accounts[1].is_writable); + sol_assert(accounts[2].is_writable); + sol_assert(accounts[3].is_writable); + + if (params.ka_num > 4) { + { + SolAccountMeta arguments[] = {{accounts[0].key, true, true}, + {accounts[1].key, true, false}, + {accounts[2].key, true, false}, + {accounts[3].key, false, true}}; + uint8_t data[] = {7}; + const SolInstruction instruction = { + (SolPubkey *)params.program_id, arguments, + SOL_ARRAY_SIZE(arguments), data, SOL_ARRAY_SIZE(data)}; + sol_assert(SUCCESS == + sol_invoke(&instruction, accounts, params.ka_num)); + } + { + SolAccountMeta arguments[] = {{accounts[0].key, true, true}, + {accounts[1].key, true, false}, + {accounts[2].key, true, false}, + {accounts[3].key, true, false}}; + uint8_t data[] = {3}; + const SolInstruction instruction = { + (SolPubkey *)params.program_id, arguments, + SOL_ARRAY_SIZE(arguments), data, SOL_ARRAY_SIZE(data)}; + sol_assert(SUCCESS == + sol_invoke(&instruction, accounts, params.ka_num)); + } + sol_assert(accounts[2].data[0] == 3); + sol_assert(accounts[3].data[0] == 3); + } + break; + default: + sol_log("Unrecognized command"); + return ERROR_INVALID_INSTRUCTION_DATA; } return SUCCESS; } diff --git a/programs/bpf/c/src/invoke/invoke.c b/programs/bpf/c/src/invoke/invoke.c index 8b476bce56..d0bded8e6f 100644 --- a/programs/bpf/c/src/invoke/invoke.c +++ b/programs/bpf/c/src/invoke/invoke.c @@ -17,7 +17,8 @@ static const uint8_t TEST_INSTRUCTION_META_TOO_LARGE = 10; static const uint8_t TEST_RETURN_ERROR = 11; static const uint8_t TEST_PRIVILEGE_DEESCALATION_ESCALATION_SIGNER = 12; static const uint8_t TEST_PRIVILEGE_DEESCALATION_ESCALATION_WRITABLE = 13; -static const uint8_t TEST_WRITE_DEESCALATION = 14; +static const uint8_t TEST_WRITABLE_DEESCALATION_WRITABLE = 14; +static const uint8_t TEST_NESTED_INVOKE_TOO_DEEP = 15; static const int MINT_INDEX = 0; static const int ARGUMENT_INDEX = 1; @@ -31,6 +32,35 @@ static const int DERIVED_KEY3_INDEX = 8; static const int SYSTEM_PROGRAM_INDEX = 9; static const int FROM_INDEX = 10; +uint64_t do_nested_invokes(uint64_t num_nested_invokes, + SolAccountInfo *accounts, uint64_t num_accounts) { + sol_assert(accounts[ARGUMENT_INDEX].is_signer); + + *accounts[ARGUMENT_INDEX].lamports -= 5; + *accounts[INVOKED_ARGUMENT_INDEX].lamports += 5; + + SolAccountMeta arguments[] = { + {accounts[INVOKED_ARGUMENT_INDEX].key, true, true}, + {accounts[ARGUMENT_INDEX].key, true, true}, + {accounts[INVOKED_PROGRAM_INDEX].key, false, false}}; + uint8_t data[] = {NESTED_INVOKE, num_nested_invokes}; + const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, + arguments, SOL_ARRAY_SIZE(arguments), + data, SOL_ARRAY_SIZE(data)}; + + sol_log("First invoke"); + sol_assert(SUCCESS == sol_invoke(&instruction, accounts, num_accounts)); + sol_log("2nd invoke from first program"); + sol_assert(SUCCESS == sol_invoke(&instruction, accounts, num_accounts)); + + sol_assert(*accounts[ARGUMENT_INDEX].lamports == + 42 - 5 + (2 * num_nested_invokes)); + sol_assert(*accounts[INVOKED_ARGUMENT_INDEX].lamports == + 10 + 5 - (2 * num_nested_invokes)); + + return SUCCESS; +} + extern uint64_t entrypoint(const uint8_t *input) { sol_log("Invoke C program"); @@ -203,32 +233,9 @@ extern uint64_t entrypoint(const uint8_t *input) { sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts))); } - sol_log("Test invoke"); + sol_log("Test nested invoke"); { - sol_assert(accounts[ARGUMENT_INDEX].is_signer); - - *accounts[ARGUMENT_INDEX].lamports -= 5; - *accounts[INVOKED_ARGUMENT_INDEX].lamports += 5; - - SolAccountMeta arguments[] = { - {accounts[INVOKED_ARGUMENT_INDEX].key, true, true}, - {accounts[ARGUMENT_INDEX].key, true, true}, - {accounts[INVOKED_PROGRAM_DUP_INDEX].key, false, false}}; - uint8_t data[] = {NESTED_INVOKE}; - const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, - arguments, SOL_ARRAY_SIZE(arguments), - data, SOL_ARRAY_SIZE(data)}; - - sol_log("First invoke"); - sol_assert(SUCCESS == - sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts))); - sol_log("2nd invoke from first program"); - sol_assert(SUCCESS == - sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts))); - - sol_assert(*accounts[ARGUMENT_INDEX].lamports == 42 - 5 + 1 + 1 + 1 + 1); - sol_assert(*accounts[INVOKED_ARGUMENT_INDEX].lamports == - 10 + 5 - 1 - 1 - 1 - 1); + sol_assert(SUCCESS == do_nested_invokes(4, accounts, params.ka_num)); } sol_log("Test privilege deescalation"); @@ -474,7 +481,6 @@ extern uint64_t entrypoint(const uint8_t *input) { sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts)); break; } - case TEST_PRIVILEGE_DEESCALATION_ESCALATION_SIGNER: { sol_log("Test privilege deescalation escalation signer"); sol_assert(true == accounts[INVOKED_ARGUMENT_INDEX].is_signer); @@ -505,19 +511,30 @@ extern uint64_t entrypoint(const uint8_t *input) { sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts))); break; } + case TEST_WRITABLE_DEESCALATION_WRITABLE: { + sol_log("Test writable deescalation"); + uint8_t buffer[10]; + for (int i = 0; i < 10; i++) { + buffer[i] = accounts[INVOKED_ARGUMENT_INDEX].data[i]; + } + SolAccountMeta arguments[] = { + {accounts[INVOKED_ARGUMENT_INDEX].key, false, false}}; + uint8_t data[] = {WRITE_ACCOUNT, 10}; + const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, + arguments, SOL_ARRAY_SIZE(arguments), + data, SOL_ARRAY_SIZE(data)}; + sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts)); - case TEST_WRITE_DEESCALATION: { - sol_log("Test writable deescalation"); - - SolAccountMeta arguments[] = { - {accounts[INVOKED_ARGUMENT_INDEX].key, false, false}}; - uint8_t data[] = {WRITE_ACCOUNT, 10}; - const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, - arguments, SOL_ARRAY_SIZE(arguments), - data, SOL_ARRAY_SIZE(data)}; - sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts)); + for (int i = 0; i < 10; i++) { + sol_assert(buffer[i] == accounts[INVOKED_ARGUMENT_INDEX].data[i]); + } + break; + } + case TEST_NESTED_INVOKE_TOO_DEEP: { + do_nested_invokes(5, accounts, params.ka_num); break; } + default: sol_panic(); } diff --git a/programs/bpf/c/src/invoked/invoked.c b/programs/bpf/c/src/invoked/invoked.c index f554ca9a2a..273deb9d06 100644 --- a/programs/bpf/c/src/invoked/invoked.c +++ b/programs/bpf/c/src/invoked/invoked.c @@ -160,7 +160,7 @@ extern uint64_t entrypoint(const uint8_t *input) { } case VERIFY_PRIVILEGE_ESCALATION: { - sol_log("Should never get here!"); + sol_log("Verify privilege escalation"); break; } @@ -228,16 +228,17 @@ extern uint64_t entrypoint(const uint8_t *input) { *accounts[INVOKED_ARGUMENT_INDEX].lamports -= 1; *accounts[ARGUMENT_INDEX].lamports += 1; - if (params.ka_num == 3) { + uint8_t remaining_invokes = params.data[1]; + if (remaining_invokes > 1) { + sol_log("Invoke again"); SolAccountMeta arguments[] = { {accounts[INVOKED_ARGUMENT_INDEX].key, true, true}, - {accounts[ARGUMENT_INDEX].key, true, true}}; - uint8_t data[] = {NESTED_INVOKE}; + {accounts[ARGUMENT_INDEX].key, true, true}, + {accounts[INVOKED_PROGRAM_INDEX].key, false, false}}; + uint8_t data[] = {NESTED_INVOKE, remaining_invokes - 1}; const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, arguments, SOL_ARRAY_SIZE(arguments), data, SOL_ARRAY_SIZE(data)}; - - sol_log("Invoke again"); sol_assert(SUCCESS == sol_invoke(&instruction, accounts, params.ka_num)); } else { sol_log("Last invoked"); diff --git a/programs/bpf/c/src/ser/ser.c b/programs/bpf/c/src/ser/ser.c new file mode 100644 index 0000000000..4d09f9409d --- /dev/null +++ b/programs/bpf/c/src/ser/ser.c @@ -0,0 +1,36 @@ +/** + * @brief Example C-based BPF sanity rogram that prints out the parameters + * passed to it + */ +#include + +extern uint64_t entrypoint(const uint8_t *input) { + SolAccountInfo ka[2]; + SolParameters params = (SolParameters){.ka = ka}; + + sol_log(__FILE__); + + if (!sol_deserialize(input, ¶ms, SOL_ARRAY_SIZE(ka))) { + return ERROR_INVALID_ARGUMENT; + } + + char ka_data[] = {1, 2, 3}; + SolPubkey ka_owner; + sol_memset(ka_owner.x, 0, SIZE_PUBKEY); // set to system program + + sol_assert(params.ka_num == 2); + for (int i = 0; i < 2; i++) { + sol_assert(*params.ka[i].lamports == 42); + sol_assert(!sol_memcmp(params.ka[i].data, ka_data, 4)); + sol_assert(SolPubkey_same(params.ka[i].owner, &ka_owner)); + sol_assert(params.ka[i].is_signer == false); + sol_assert(params.ka[i].is_writable == false); + sol_assert(params.ka[i].executable == false); + } + + char data[] = {4, 5, 6, 7}; + sol_assert(params.data_len = 4); + sol_assert(!sol_memcmp(params.data, data, 4)); + + return SUCCESS; +} diff --git a/programs/bpf/c/src/sha/sha.c b/programs/bpf/c/src/sha/sha.c new file mode 100644 index 0000000000..b61a14de8f --- /dev/null +++ b/programs/bpf/c/src/sha/sha.c @@ -0,0 +1,47 @@ +/** + * @brief SHA256 Syscall test + */ +#include + +extern uint64_t entrypoint(const uint8_t *input) { + + // SHA256 + { + uint8_t result[SHA256_RESULT_LENGTH]; + uint8_t expected[] = {0x9f, 0xa2, 0x7e, 0x8f, 0x7b, 0xc1, 0xec, 0xe8, + 0xae, 0x7b, 0x9a, 0x91, 0x46, 0x53, 0x20, 0xf, + 0x1c, 0x22, 0x8e, 0x56, 0x10, 0x30, 0x59, 0xfd, + 0x35, 0x8d, 0x57, 0x54, 0x96, 0x47, 0x2c, 0xc9}; + + uint8_t bytes1[] = {'G', 'a', 'g', 'g', 'a', 'b', 'l', 'a', + 'g', 'h', 'b', 'l', 'a', 'g', 'h', '!'}; + uint8_t bytes2[] = {'f', 'l', 'u', 'r', 'b', 'o', 's'}; + const SolBytes bytes[] = {{bytes1, SOL_ARRAY_SIZE(bytes1)}, + {bytes2, SOL_ARRAY_SIZE(bytes2)}}; + + sol_sha256(bytes, SOL_ARRAY_SIZE(bytes), result); + + sol_assert(0 == sol_memcmp(result, expected, SHA256_RESULT_LENGTH)); + } + + // Keccak + { + uint8_t result[KECCAK_RESULT_LENGTH]; + uint8_t expected[] = {0xd1, 0x9a, 0x9d, 0xe2, 0x89, 0x7f, 0x7c, 0x9e, + 0x5, 0x32, 0x32, 0x22, 0xe8, 0xc6, 0xb4, 0x88, + 0x6b, 0x5b, 0xbb, 0xec, 0xd4, 0x42, 0xfd, 0x10, + 0x7d, 0xd5, 0x9a, 0x6f, 0x21, 0xd3, 0xb8, 0xa7}; + + uint8_t bytes1[] = {'G', 'a', 'g', 'g', 'a', 'b', 'l', 'a', + 'g', 'h', 'b', 'l', 'a', 'g', 'h', '!'}; + uint8_t bytes2[] = {'f', 'l', 'u', 'r', 'b', 'o', 's'}; + const SolBytes bytes[] = {{bytes1, SOL_ARRAY_SIZE(bytes1)}, + {bytes2, SOL_ARRAY_SIZE(bytes2)}}; + + sol_keccak256(bytes, SOL_ARRAY_SIZE(bytes), result); + + sol_assert(0 == sol_memcmp(result, expected, KECCAK_RESULT_LENGTH)); + } + + return SUCCESS; +} diff --git a/programs/bpf/c/src/sha256/sha256.c b/programs/bpf/c/src/sha256/sha256.c deleted file mode 100644 index 55decd0bd0..0000000000 --- a/programs/bpf/c/src/sha256/sha256.c +++ /dev/null @@ -1,25 +0,0 @@ -/** - * @brief SHA256 Syscall test - */ -#include - -extern uint64_t entrypoint(const uint8_t *input) { - - uint8_t result[SHA256_RESULT_LENGTH]; - uint8_t expected[] = {0x9f, 0xa2, 0x7e, 0x8f, 0x7b, 0xc1, 0xec, 0xe8, - 0xae, 0x7b, 0x9a, 0x91, 0x46, 0x53, 0x20, 0xf, - 0x1c, 0x22, 0x8e, 0x56, 0x10, 0x30, 0x59, 0xfd, - 0x35, 0x8d, 0x57, 0x54, 0x96, 0x47, 0x2c, 0xc9}; - - uint8_t bytes1[] = {'G', 'a', 'g', 'g', 'a', 'b', 'l', 'a', - 'g', 'h', 'b', 'l', 'a', 'g', 'h', '!'}; - uint8_t bytes2[] = {'f', 'l', 'u', 'r', 'b', 'o', 's'}; - const SolBytes bytes[] = {{bytes1, SOL_ARRAY_SIZE(bytes1)}, - {bytes2, SOL_ARRAY_SIZE(bytes2)}}; - - sol_sha256(bytes, SOL_ARRAY_SIZE(bytes), result); - - sol_assert(0 == sol_memcmp(result, expected, SHA256_RESULT_LENGTH)); - - return SUCCESS; -} diff --git a/programs/bpf/rust/128bit/Cargo.toml b/programs/bpf/rust/128bit/Cargo.toml index e1929bd410..c4a9cff5d6 100644 --- a/programs/bpf/rust/128bit/Cargo.toml +++ b/programs/bpf/rust/128bit/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-128bit" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-128bit" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } -solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } +solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/128bit/Xargo.toml b/programs/bpf/rust/128bit/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/128bit/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/128bit_dep/Cargo.toml b/programs/bpf/rust/128bit_dep/Cargo.toml index dc126562aa..07f4f9166c 100644 --- a/programs/bpf/rust/128bit_dep/Cargo.toml +++ b/programs/bpf/rust/128bit_dep/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-128bit-dep" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-128bit-dep" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/128bit_dep/Xargo.toml b/programs/bpf/rust/128bit_dep/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/128bit_dep/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/alloc/Cargo.toml b/programs/bpf/rust/alloc/Cargo.toml index 4afefebeb4..ad2cf1277f 100644 --- a/programs/bpf/rust/alloc/Cargo.toml +++ b/programs/bpf/rust/alloc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-alloc" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-alloc" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/alloc/Xargo.toml b/programs/bpf/rust/alloc/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/alloc/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/call_depth/Cargo.toml b/programs/bpf/rust/call_depth/Cargo.toml index 7397be1b05..fa48079dce 100644 --- a/programs/bpf/rust/call_depth/Cargo.toml +++ b/programs/bpf/rust/call_depth/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-call-depth" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-call-depth" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/call_depth/Xargo.toml b/programs/bpf/rust/call_depth/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/call_depth/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/caller_access/Cargo.toml b/programs/bpf/rust/caller_access/Cargo.toml index b609410459..18b5c67527 100644 --- a/programs/bpf/rust/caller_access/Cargo.toml +++ b/programs/bpf/rust/caller_access/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-caller-access" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-caller-access" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/caller_access/Xargo.toml b/programs/bpf/rust/caller_access/Xargo.toml deleted file mode 100644 index 475fb71ed1..0000000000 --- a/programs/bpf/rust/caller_access/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] diff --git a/programs/bpf/rust/caller_access/src/lib.rs b/programs/bpf/rust/caller_access/src/lib.rs index 36d436c2a8..6db20e433a 100644 --- a/programs/bpf/rust/caller_access/src/lib.rs +++ b/programs/bpf/rust/caller_access/src/lib.rs @@ -17,7 +17,7 @@ fn process_instruction( ) -> ProgramResult { if instruction_data.len() == 32 { let key = Pubkey::new_from_array(instruction_data.try_into().unwrap()); - let ix = Instruction::new(key, &[2], vec![]); + let ix = Instruction::new_with_bincode(key, &[2], vec![]); let mut lamports = accounts[0].lamports(); let owner = &accounts[0].owner; let mut data = accounts[0].try_borrow_mut_data()?; @@ -36,7 +36,7 @@ fn process_instruction( } else { match instruction_data[0] { 1 => { - let ix = Instruction::new( + let ix = Instruction::new_with_bincode( *program_id, &accounts[1].key.to_bytes(), vec![AccountMeta::new_readonly(*program_id, false)], diff --git a/programs/bpf/rust/custom_heap/Cargo.toml b/programs/bpf/rust/custom_heap/Cargo.toml index 4b4377d71c..35c292fce8 100644 --- a/programs/bpf/rust/custom_heap/Cargo.toml +++ b/programs/bpf/rust/custom_heap/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-custom-heap" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-custom-heap" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [features] default = ["custom-heap"] diff --git a/programs/bpf/rust/custom_heap/Xargo.toml b/programs/bpf/rust/custom_heap/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/custom_heap/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/dep_crate/Cargo.toml b/programs/bpf/rust/dep_crate/Cargo.toml index 72595fab79..8ea44a81a0 100644 --- a/programs/bpf/rust/dep_crate/Cargo.toml +++ b/programs/bpf/rust/dep_crate/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-dep-crate" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,7 +11,7 @@ edition = "2018" [dependencies] byteorder = { version = "1", default-features = false } -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/dep_crate/Xargo.toml b/programs/bpf/rust/dep_crate/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/dep_crate/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/deprecated_loader/Cargo.toml b/programs/bpf/rust/deprecated_loader/Cargo.toml index 10dfec977c..0cc6c01637 100644 --- a/programs/bpf/rust/deprecated_loader/Cargo.toml +++ b/programs/bpf/rust/deprecated_loader/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-deprecated-loader" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-deprecated-loader" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/deprecated_loader/Xargo.toml b/programs/bpf/rust/deprecated_loader/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/deprecated_loader/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/dup_accounts/Cargo.toml b/programs/bpf/rust/dup_accounts/Cargo.toml index 22dd6c87d6..7176c15986 100644 --- a/programs/bpf/rust/dup_accounts/Cargo.toml +++ b/programs/bpf/rust/dup_accounts/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-dup-accounts" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-dup-accounts" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/dup_accounts/Xargo.toml b/programs/bpf/rust/dup_accounts/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/dup_accounts/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/dup_accounts/src/lib.rs b/programs/bpf/rust/dup_accounts/src/lib.rs index bc831cfaa6..25c1f8bd26 100644 --- a/programs/bpf/rust/dup_accounts/src/lib.rs +++ b/programs/bpf/rust/dup_accounts/src/lib.rs @@ -2,46 +2,92 @@ extern crate solana_program; use solana_program::{ - account_info::AccountInfo, entrypoint, entrypoint::ProgramResult, msg, - program_error::ProgramError, pubkey::Pubkey, + account_info::AccountInfo, + entrypoint, + entrypoint::ProgramResult, + instruction::{AccountMeta, Instruction}, + msg, + program::invoke, + program_error::ProgramError, + pubkey::Pubkey, }; entrypoint!(process_instruction); fn process_instruction( - _program_id: &Pubkey, + program_id: &Pubkey, accounts: &[AccountInfo], instruction_data: &[u8], ) -> ProgramResult { match instruction_data[0] { 1 => { - msg!("modify first account data"); + msg!("modify account (2) data"); accounts[2].data.borrow_mut()[0] = 1; } 2 => { - msg!("modify first account data"); + msg!("modify account (3) data"); accounts[3].data.borrow_mut()[0] = 2; } 3 => { - msg!("modify both account data"); + msg!("modify account (2,3) data"); accounts[2].data.borrow_mut()[0] += 1; accounts[3].data.borrow_mut()[0] += 2; } 4 => { - msg!("modify first account lamports"); + msg!("modify account (1,2) lamports"); **accounts[1].lamports.borrow_mut() -= 1; **accounts[2].lamports.borrow_mut() += 1; } 5 => { - msg!("modify first account lamports"); + msg!("modify account (1,3) lamports"); **accounts[1].lamports.borrow_mut() -= 2; **accounts[3].lamports.borrow_mut() += 2; } 6 => { - msg!("modify both account lamports"); + msg!("modify account (1,2,3) lamports"); **accounts[1].lamports.borrow_mut() -= 3; **accounts[2].lamports.borrow_mut() += 1; **accounts[3].lamports.borrow_mut() += 2; } + 7 => { + msg!("check account (0,1,2,3) privs"); + assert!(accounts[0].is_signer); + assert!(!accounts[1].is_signer); + assert!(accounts[2].is_signer); + assert!(accounts[3].is_signer); + + assert!(accounts[0].is_writable); + assert!(accounts[1].is_writable); + assert!(accounts[2].is_writable); + assert!(accounts[3].is_writable); + + if accounts.len() > 4 { + let instruction = Instruction::new_with_bytes( + *program_id, + &[7], + vec![ + AccountMeta::new(*accounts[0].key, true), + AccountMeta::new(*accounts[1].key, false), + AccountMeta::new(*accounts[2].key, false), + AccountMeta::new_readonly(*accounts[3].key, true), + ], + ); + invoke(&instruction, &accounts)?; + + let instruction = Instruction::new_with_bytes( + *program_id, + &[3], + vec![ + AccountMeta::new(*accounts[0].key, true), + AccountMeta::new(*accounts[1].key, false), + AccountMeta::new(*accounts[2].key, false), + AccountMeta::new(*accounts[3].key, false), + ], + ); + invoke(&instruction, &accounts)?; + assert_eq!(accounts[2].try_borrow_mut_data()?[0], 3); + assert_eq!(accounts[3].try_borrow_mut_data()?[0], 3); + } + } _ => { msg!("Unrecognized command"); return Err(ProgramError::InvalidArgument); diff --git a/programs/bpf/rust/error_handling/Cargo.toml b/programs/bpf/rust/error_handling/Cargo.toml index 08f8069431..848988b100 100644 --- a/programs/bpf/rust/error_handling/Cargo.toml +++ b/programs/bpf/rust/error_handling/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-error-handling" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,7 +12,7 @@ edition = "2018" [dependencies] num-derive = "0.2" num-traits = "0.2" -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } thiserror = "1.0" [lib] diff --git a/programs/bpf/rust/error_handling/Xargo.toml b/programs/bpf/rust/error_handling/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/error_handling/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/external_spend/Cargo.toml b/programs/bpf/rust/external_spend/Cargo.toml index 1f791e53e0..22be155680 100644 --- a/programs/bpf/rust/external_spend/Cargo.toml +++ b/programs/bpf/rust/external_spend/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-external-spend" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-external-spend" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/external_spend/Xargo.toml b/programs/bpf/rust/external_spend/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/external_spend/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/ristretto/Cargo.toml b/programs/bpf/rust/finalize/Cargo.toml similarity index 67% rename from programs/bpf/rust/ristretto/Cargo.toml rename to programs/bpf/rust/finalize/Cargo.toml index 5baf2c7649..21b185e1a1 100644 --- a/programs/bpf/rust/ristretto/Cargo.toml +++ b/programs/bpf/rust/finalize/Cargo.toml @@ -1,18 +1,16 @@ [package] -name = "solana-bpf-rust-ristretto" -version = "1.5.19" +name = "solana-bpf-rust-finalize" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" -documentation = "https://docs.rs/solana-bpf-rust-ristretto" +documentation = "https://docs.rs/solana-bpf-rust-finalize" edition = "2018" [dependencies] -curve25519-dalek = "3" -getrandom = { version = "0.1.14", features = ["dummy"] } -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/finalize/src/lib.rs b/programs/bpf/rust/finalize/src/lib.rs new file mode 100644 index 0000000000..50e947afdd --- /dev/null +++ b/programs/bpf/rust/finalize/src/lib.rs @@ -0,0 +1,25 @@ +//! @brief Example Rust-based BPF sanity program that finalizes a BPF program + +#![allow(unreachable_code)] + +extern crate solana_program; +use solana_program::{ + account_info::AccountInfo, bpf_loader, entrypoint, entrypoint::ProgramResult, + loader_instruction, msg, program::invoke, pubkey::Pubkey, +}; + +entrypoint!(process_instruction); +fn process_instruction( + _program_id: &Pubkey, + accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + msg!("Finalize a program"); + invoke( + &loader_instruction::finalize(&accounts[0].key.clone(), &bpf_loader::id()), + accounts, + )?; + msg!("check executable"); + assert!(accounts[0].executable); + Ok(()) +} diff --git a/programs/bpf/rust/instruction_introspection/Cargo.toml b/programs/bpf/rust/instruction_introspection/Cargo.toml index 41265492c1..a4f04e06e6 100644 --- a/programs/bpf/rust/instruction_introspection/Cargo.toml +++ b/programs/bpf/rust/instruction_introspection/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-instruction-introspection" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-instruction-introspection" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/instruction_introspection/Xargo.toml b/programs/bpf/rust/instruction_introspection/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/instruction_introspection/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/invoke/Cargo.toml b/programs/bpf/rust/invoke/Cargo.toml index ada9348dba..9652de7c0d 100644 --- a/programs/bpf/rust/invoke/Cargo.toml +++ b/programs/bpf/rust/invoke/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,7 +11,7 @@ edition = "2018" [dependencies] solana-bpf-rust-invoked = { path = "../invoked", default-features = false } -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke/Xargo.toml b/programs/bpf/rust/invoke/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/invoke/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/invoke/src/lib.rs b/programs/bpf/rust/invoke/src/lib.rs index e5b119b794..3bee6378b0 100644 --- a/programs/bpf/rust/invoke/src/lib.rs +++ b/programs/bpf/rust/invoke/src/lib.rs @@ -29,20 +29,54 @@ const TEST_INSTRUCTION_META_TOO_LARGE: u8 = 10; const TEST_RETURN_ERROR: u8 = 11; const TEST_PRIVILEGE_DEESCALATION_ESCALATION_SIGNER: u8 = 12; const TEST_PRIVILEGE_DEESCALATION_ESCALATION_WRITABLE: u8 = 13; -const TEST_WRITE_DEESCALATION: u8 = 14; +const TEST_WRITABLE_DEESCALATION_WRITABLE: u8 = 14; +const TEST_NESTED_INVOKE_TOO_DEEP: u8 = 15; -// const MINT_INDEX: usize = 0; +// const MINT_INDEX: usize = 0; // unused placeholder const ARGUMENT_INDEX: usize = 1; const INVOKED_PROGRAM_INDEX: usize = 2; const INVOKED_ARGUMENT_INDEX: usize = 3; const INVOKED_PROGRAM_DUP_INDEX: usize = 4; -// const ARGUMENT_DUP_INDEX: usize = 5; +// const ARGUMENT_DUP_INDEX: usize = 5; unused placeholder const DERIVED_KEY1_INDEX: usize = 6; const DERIVED_KEY2_INDEX: usize = 7; const DERIVED_KEY3_INDEX: usize = 8; const SYSTEM_PROGRAM_INDEX: usize = 9; const FROM_INDEX: usize = 10; +fn do_nested_invokes(num_nested_invokes: u64, accounts: &[AccountInfo]) -> ProgramResult { + assert!(accounts[ARGUMENT_INDEX].is_signer); + + let pre_argument_lamports = accounts[ARGUMENT_INDEX].lamports(); + let pre_invoke_argument_lamports = accounts[INVOKED_ARGUMENT_INDEX].lamports(); + **accounts[ARGUMENT_INDEX].lamports.borrow_mut() -= 5; + **accounts[INVOKED_ARGUMENT_INDEX].lamports.borrow_mut() += 5; + + msg!("First invoke"); + let instruction = create_instruction( + *accounts[INVOKED_PROGRAM_INDEX].key, + &[ + (accounts[ARGUMENT_INDEX].key, true, true), + (accounts[INVOKED_ARGUMENT_INDEX].key, true, true), + (accounts[INVOKED_PROGRAM_INDEX].key, false, false), + ], + vec![NESTED_INVOKE, num_nested_invokes as u8], + ); + invoke(&instruction, accounts)?; + msg!("2nd invoke from first program"); + invoke(&instruction, accounts)?; + + assert_eq!( + accounts[ARGUMENT_INDEX].lamports(), + pre_argument_lamports - 5 + (2 * num_nested_invokes) + ); + assert_eq!( + accounts[INVOKED_ARGUMENT_INDEX].lamports(), + pre_invoke_argument_lamports + 5 - (2 * num_nested_invokes) + ); + Ok(()) +} + entrypoint!(process_instruction); fn process_instruction( program_id: &Pubkey, @@ -227,8 +261,10 @@ fn process_instruction( )?, accounts[DERIVED_KEY1_INDEX].key ); + let not_native_program_id = Pubkey::new_from_array([6u8; 32]); + assert!(!not_native_program_id.is_native_program_id()); assert_eq!( - Pubkey::create_program_address(&[b"You pass butter"], &Pubkey::default()) + Pubkey::create_program_address(&[b"You pass butter"], ¬_native_program_id) .unwrap_err(), PubkeyError::InvalidSeeds ); @@ -240,8 +276,10 @@ fn process_instruction( Pubkey::try_find_program_address(&[b"You pass butter"], program_id).unwrap(); assert_eq!(&address, accounts[DERIVED_KEY1_INDEX].key); assert_eq!(bump_seed, bump_seed1); + let not_native_program_id = Pubkey::new_from_array([6u8; 32]); + assert!(!not_native_program_id.is_native_program_id()); assert_eq!( - Pubkey::create_program_address(&[b"You pass butter"], &Pubkey::default()) + Pubkey::create_program_address(&[b"You pass butter"], ¬_native_program_id) .unwrap_err(), PubkeyError::InvalidSeeds ); @@ -282,31 +320,7 @@ fn process_instruction( msg!("Test nested invoke"); { - assert!(accounts[ARGUMENT_INDEX].is_signer); - - **accounts[ARGUMENT_INDEX].lamports.borrow_mut() -= 5; - **accounts[INVOKED_ARGUMENT_INDEX].lamports.borrow_mut() += 5; - - msg!("First invoke"); - let instruction = create_instruction( - *accounts[INVOKED_PROGRAM_INDEX].key, - &[ - (accounts[ARGUMENT_INDEX].key, true, true), - (accounts[INVOKED_ARGUMENT_INDEX].key, true, true), - (accounts[INVOKED_PROGRAM_DUP_INDEX].key, false, false), - (accounts[INVOKED_PROGRAM_DUP_INDEX].key, false, false), - ], - vec![NESTED_INVOKE], - ); - invoke(&instruction, accounts)?; - msg!("2nd invoke from first program"); - invoke(&instruction, accounts)?; - - assert_eq!(accounts[ARGUMENT_INDEX].lamports(), 42 - 5 + 1 + 1 + 1 + 1); - assert_eq!( - accounts[INVOKED_ARGUMENT_INDEX].lamports(), - 10 + 5 - 1 - 1 - 1 - 1 - ); + do_nested_invokes(4, accounts)?; } msg!("Test privilege deescalation"); @@ -354,6 +368,32 @@ fn process_instruction( assert_eq!(data[i as usize], 42); } } + + msg!("Create account and init data"); + { + let from_lamports = accounts[FROM_INDEX].lamports(); + let to_lamports = accounts[DERIVED_KEY2_INDEX].lamports(); + + let instruction = create_instruction( + *accounts[INVOKED_PROGRAM_INDEX].key, + &[ + (accounts[FROM_INDEX].key, true, true), + (accounts[DERIVED_KEY2_INDEX].key, true, false), + (accounts[SYSTEM_PROGRAM_INDEX].key, false, false), + ], + vec![CREATE_AND_INIT, bump_seed2], + ); + invoke(&instruction, accounts)?; + + assert_eq!(accounts[FROM_INDEX].lamports(), from_lamports - 1); + assert_eq!(accounts[DERIVED_KEY2_INDEX].lamports(), to_lamports + 1); + let data = accounts[DERIVED_KEY2_INDEX].try_borrow_mut_data()?; + assert_eq!(data[0], 0x0e); + assert_eq!(data[MAX_PERMITTED_DATA_INCREASE - 1], 0x0f); + for i in 1..20 { + assert_eq!(data[i], i as u8); + } + } } TEST_PRIVILEGE_ESCALATION_SIGNER => { msg!("Test privilege escalation signer"); @@ -557,14 +597,27 @@ fn process_instruction( ); invoke(&invoked_instruction, accounts)?; } - TEST_WRITE_DEESCALATION => { - msg!("Test writable deescalation"); + TEST_WRITABLE_DEESCALATION_WRITABLE => { + msg!("Test writable deescalation writable"); + const NUM_BYTES: usize = 10; + let mut buffer = [0; NUM_BYTES]; + buffer + .copy_from_slice(&accounts[INVOKED_ARGUMENT_INDEX].data.borrow_mut()[..NUM_BYTES]); + let instruction = create_instruction( *accounts[INVOKED_PROGRAM_INDEX].key, &[(accounts[INVOKED_ARGUMENT_INDEX].key, false, false)], - vec![WRITE_ACCOUNT, 10], + vec![WRITE_ACCOUNT, NUM_BYTES as u8], ); let _ = invoke(&instruction, accounts); + + assert_eq!( + buffer, + accounts[INVOKED_ARGUMENT_INDEX].data.borrow_mut()[..NUM_BYTES] + ); + } + TEST_NESTED_INVOKE_TOO_DEEP => { + let _ = do_nested_invokes(5, accounts); } _ => panic!(), } diff --git a/programs/bpf/rust/invoke_and_error/Cargo.toml b/programs/bpf/rust/invoke_and_error/Cargo.toml index 62b7da6749..5fc73da0c8 100644 --- a/programs/bpf/rust/invoke_and_error/Cargo.toml +++ b/programs/bpf/rust/invoke_and_error/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke-and-error" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoke-and-error" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke_and_error/Xargo.toml b/programs/bpf/rust/invoke_and_error/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/invoke_and_error/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/invoke_and_ok/Cargo.toml b/programs/bpf/rust/invoke_and_ok/Cargo.toml index ea0a00781a..d37556453e 100644 --- a/programs/bpf/rust/invoke_and_ok/Cargo.toml +++ b/programs/bpf/rust/invoke_and_ok/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke-and-ok" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoke-and-ok" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke_and_ok/Xargo.toml b/programs/bpf/rust/invoke_and_ok/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/invoke_and_ok/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/invoke_and_return/Cargo.toml b/programs/bpf/rust/invoke_and_return/Cargo.toml index e78bf3d8c5..e8e67752d1 100644 --- a/programs/bpf/rust/invoke_and_return/Cargo.toml +++ b/programs/bpf/rust/invoke_and_return/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke-and-return" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoke-and-return" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoked/Cargo.toml b/programs/bpf/rust/invoked/Cargo.toml index 3df5930497..4d66e1b888 100644 --- a/programs/bpf/rust/invoked/Cargo.toml +++ b/programs/bpf/rust/invoked/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoked" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoked" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [features] default = ["program"] diff --git a/programs/bpf/rust/invoked/Xargo.toml b/programs/bpf/rust/invoked/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/invoked/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/invoked/src/instruction.rs b/programs/bpf/rust/invoked/src/instruction.rs index 770ea08619..a6c16dc8cc 100644 --- a/programs/bpf/rust/invoked/src/instruction.rs +++ b/programs/bpf/rust/invoked/src/instruction.rs @@ -17,6 +17,7 @@ pub const VERIFY_PRIVILEGE_DEESCALATION: u8 = 8; pub const VERIFY_PRIVILEGE_DEESCALATION_ESCALATION_SIGNER: u8 = 9; pub const VERIFY_PRIVILEGE_DEESCALATION_ESCALATION_WRITABLE: u8 = 10; pub const WRITE_ACCOUNT: u8 = 11; +pub const CREATE_AND_INIT: u8 = 12; pub fn create_instruction( program_id: Pubkey, diff --git a/programs/bpf/rust/invoked/src/processor.rs b/programs/bpf/rust/invoked/src/processor.rs index 120160fd3a..892ba90e63 100644 --- a/programs/bpf/rust/invoked/src/processor.rs +++ b/programs/bpf/rust/invoked/src/processor.rs @@ -6,11 +6,12 @@ use crate::instruction::*; use solana_program::{ account_info::AccountInfo, bpf_loader, entrypoint, - entrypoint::ProgramResult, + entrypoint::{ProgramResult, MAX_PERMITTED_DATA_INCREASE}, msg, program::{invoke, invoke_signed}, program_error::ProgramError, pubkey::Pubkey, + system_instruction, }; entrypoint!(process_instruction); @@ -199,24 +200,26 @@ fn process_instruction( } NESTED_INVOKE => { msg!("nested invoke"); - const ARGUMENT_INDEX: usize = 0; const INVOKED_ARGUMENT_INDEX: usize = 1; - const INVOKED_PROGRAM_INDEX: usize = 3; + const INVOKED_PROGRAM_INDEX: usize = 2; assert!(accounts[INVOKED_ARGUMENT_INDEX].is_signer); + assert!(instruction_data.len() > 1); **accounts[INVOKED_ARGUMENT_INDEX].lamports.borrow_mut() -= 1; **accounts[ARGUMENT_INDEX].lamports.borrow_mut() += 1; - if accounts.len() > 2 { + let remaining_invokes = instruction_data[1]; + if remaining_invokes > 1 { msg!("Invoke again"); let invoked_instruction = create_instruction( *accounts[INVOKED_PROGRAM_INDEX].key, &[ (accounts[ARGUMENT_INDEX].key, true, true), (accounts[INVOKED_ARGUMENT_INDEX].key, true, true), + (accounts[INVOKED_PROGRAM_INDEX].key, false, false), ], - vec![NESTED_INVOKE], + vec![NESTED_INVOKE, remaining_invokes - 1], ); invoke(&invoked_instruction, accounts)?; } else { @@ -231,8 +234,56 @@ fn process_instruction( } WRITE_ACCOUNT => { msg!("write account"); + const ARGUMENT_INDEX: usize = 0; + for i in 0..instruction_data[1] { - accounts[0].data.borrow_mut()[i as usize] = instruction_data[1]; + accounts[ARGUMENT_INDEX].data.borrow_mut()[i as usize] = instruction_data[1]; + } + } + CREATE_AND_INIT => { + msg!("Create and init data"); + { + const FROM_INDEX: usize = 0; + const DERIVED_KEY2_INDEX: usize = 1; + + let from_lamports = accounts[FROM_INDEX].lamports(); + let to_lamports = accounts[DERIVED_KEY2_INDEX].lamports(); + assert_eq!(accounts[DERIVED_KEY2_INDEX].data_len(), 0); + assert!(solana_program::system_program::check_id( + accounts[DERIVED_KEY2_INDEX].owner + )); + + let bump_seed2 = instruction_data[1]; + let instruction = system_instruction::create_account( + accounts[FROM_INDEX].key, + accounts[DERIVED_KEY2_INDEX].key, + 1, + MAX_PERMITTED_DATA_INCREASE as u64, + program_id, + ); + invoke_signed( + &instruction, + accounts, + &[&[b"Lil'", b"Bits", &[bump_seed2]]], + )?; + + assert_eq!(accounts[FROM_INDEX].lamports(), from_lamports - 1); + assert_eq!(accounts[DERIVED_KEY2_INDEX].lamports(), to_lamports + 1); + assert_eq!(program_id, accounts[DERIVED_KEY2_INDEX].owner); + assert_eq!( + accounts[DERIVED_KEY2_INDEX].data_len(), + MAX_PERMITTED_DATA_INCREASE + ); + let mut data = accounts[DERIVED_KEY2_INDEX].try_borrow_mut_data()?; + assert_eq!(data[0], 0); + data[0] = 0x0e; + assert_eq!(data[0], 0x0e); + assert_eq!(data[MAX_PERMITTED_DATA_INCREASE - 1], 0); + data[MAX_PERMITTED_DATA_INCREASE - 1] = 0x0f; + assert_eq!(data[MAX_PERMITTED_DATA_INCREASE - 1], 0x0f); + for i in 1..20 { + data[i] = i as u8; + } } } _ => panic!(), diff --git a/programs/bpf/rust/iter/Cargo.toml b/programs/bpf/rust/iter/Cargo.toml index eb6a1d3d0a..1e59b4d1ea 100644 --- a/programs/bpf/rust/iter/Cargo.toml +++ b/programs/bpf/rust/iter/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-iter" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-iter" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/iter/Xargo.toml b/programs/bpf/rust/iter/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/iter/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/many_args/Cargo.toml b/programs/bpf/rust/many_args/Cargo.toml index 10f808a545..418c145355 100644 --- a/programs/bpf/rust/many_args/Cargo.toml +++ b/programs/bpf/rust/many_args/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-many-args" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-many-args" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } -solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } +solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/many_args/Xargo.toml b/programs/bpf/rust/many_args/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/many_args/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/many_args_dep/Cargo.toml b/programs/bpf/rust/many_args_dep/Cargo.toml index ce2f804c3a..418832310e 100644 --- a/programs/bpf/rust/many_args_dep/Cargo.toml +++ b/programs/bpf/rust/many_args_dep/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-many-args-dep" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-many-args-dep" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/many_args_dep/Xargo.toml b/programs/bpf/rust/many_args_dep/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/many_args_dep/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/mem/Cargo.toml b/programs/bpf/rust/mem/Cargo.toml index 0a70eeb756..662ae85969 100644 --- a/programs/bpf/rust/mem/Cargo.toml +++ b/programs/bpf/rust/mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-mem" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-mem" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/mem/Xargo.toml b/programs/bpf/rust/mem/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/mem/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/mem/src/lib.rs b/programs/bpf/rust/mem/src/lib.rs index 860ff16a24..3817010ad2 100644 --- a/programs/bpf/rust/mem/src/lib.rs +++ b/programs/bpf/rust/mem/src/lib.rs @@ -1,10 +1,10 @@ //! @brief Test builtin mem functions #![cfg(target_arch = "bpf")] -#![feature(compiler_builtins_lib)] +#![feature(rustc_private)] extern crate compiler_builtins; -use solana_program::{custom_panic_default, entrypoint::SUCCESS, info}; +use solana_program::{custom_panic_default, entrypoint::SUCCESS}; #[no_mangle] pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { @@ -58,6 +58,9 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { let buf = &mut [1_u8, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]; compiler_builtins::mem::memmove(&mut buf[0] as *mut u8, &mut buf[9] as *mut u8, 9); assert_eq!(buf[..9], buf[9..]); + let buf = &mut [0_u8, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + compiler_builtins::mem::memmove(&mut buf[1] as *mut u8, &mut buf[0] as *mut u8, 9); + assert_eq!(&mut [0_u8, 0, 1, 2, 3, 4, 5, 6, 7, 8], buf); let buf = &mut [1_u8, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]; compiler_builtins::mem::memmove(&mut buf[9] as *mut u8, &mut buf[0] as *mut u8, 9); assert_eq!(buf[..9], buf[9..]); diff --git a/programs/bpf/rust/noop/Cargo.toml b/programs/bpf/rust/noop/Cargo.toml index 7cccec1662..efc4aec80c 100644 --- a/programs/bpf/rust/noop/Cargo.toml +++ b/programs/bpf/rust/noop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-noop" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-noop" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/noop/Xargo.toml b/programs/bpf/rust/noop/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/noop/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/panic/Cargo.toml b/programs/bpf/rust/panic/Cargo.toml index 2ea8e94b54..9781d378e4 100644 --- a/programs/bpf/rust/panic/Cargo.toml +++ b/programs/bpf/rust/panic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-panic" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-panic" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [features] default = ["custom-panic"] diff --git a/programs/bpf/rust/panic/Xargo.toml b/programs/bpf/rust/panic/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/panic/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/param_passing/Cargo.toml b/programs/bpf/rust/param_passing/Cargo.toml index 4ccb0c610b..91280bb873 100644 --- a/programs/bpf/rust/param_passing/Cargo.toml +++ b/programs/bpf/rust/param_passing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-param-passing" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-param-passing" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } -solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } +solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/param_passing/Xargo.toml b/programs/bpf/rust/param_passing/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/param_passing/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/param_passing_dep/Cargo.toml b/programs/bpf/rust/param_passing_dep/Cargo.toml index 724366df56..e4571acf38 100644 --- a/programs/bpf/rust/param_passing_dep/Cargo.toml +++ b/programs/bpf/rust/param_passing_dep/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-param-passing-dep" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-param-passing-dep" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/param_passing_dep/Xargo.toml b/programs/bpf/rust/param_passing_dep/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/param_passing_dep/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/rand/Cargo.toml b/programs/bpf/rust/rand/Cargo.toml index 882194242b..c5a10e546d 100644 --- a/programs/bpf/rust/rand/Cargo.toml +++ b/programs/bpf/rust/rand/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-rand" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,7 +12,7 @@ edition = "2018" [dependencies] getrandom = { version = "0.1.14", features = ["dummy"] } rand = "0.7" -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/rand/Xargo.toml b/programs/bpf/rust/rand/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/rand/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/ristretto/Xargo.toml b/programs/bpf/rust/ristretto/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/ristretto/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/ristretto/src/lib.rs b/programs/bpf/rust/ristretto/src/lib.rs deleted file mode 100644 index 45167fb277..0000000000 --- a/programs/bpf/rust/ristretto/src/lib.rs +++ /dev/null @@ -1,46 +0,0 @@ -//! @brief Example Rust-based BPF program that performs a ristretto multiply - -pub mod ristretto; - -use crate::ristretto::ristretto_mul; -use curve25519_dalek::{constants::RISTRETTO_BASEPOINT_POINT, scalar::Scalar}; -use solana_program::{ - account_info::AccountInfo, entrypoint, entrypoint::ProgramResult, msg, pubkey::Pubkey, -}; - -fn test_ristretto_mul() -> ProgramResult { - let point = RISTRETTO_BASEPOINT_POINT; - let scalar = Scalar::zero(); - let result = ristretto_mul(&point, &scalar)?; - assert_ne!(point, result); - - let point = RISTRETTO_BASEPOINT_POINT; - let scalar = Scalar::one(); - let result = ristretto_mul(&point, &scalar)?; - assert_eq!(point, result); - - Ok(()) -} - -entrypoint!(process_instruction); -fn process_instruction( - _program_id: &Pubkey, - _accounts: &[AccountInfo], - _instruction_data: &[u8], -) -> ProgramResult { - msg!("Ristretto multiply"); - - test_ristretto_mul()?; - - Ok(()) -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_ristretto() { - test_ristretto_mul().unwrap(); - } -} diff --git a/programs/bpf/rust/ristretto/src/ristretto.rs b/programs/bpf/rust/ristretto/src/ristretto.rs deleted file mode 100644 index 9286e09798..0000000000 --- a/programs/bpf/rust/ristretto/src/ristretto.rs +++ /dev/null @@ -1,44 +0,0 @@ -use curve25519_dalek::{ristretto::RistrettoPoint, scalar::Scalar}; -use solana_program::program_error::ProgramError; - -/// Multiply a ristretto point with a scalar -/// -/// @param point - Ristretto point -/// @param scalar - Scalar to mulitply against -/// @return - result of the multiplication -#[inline] -pub fn ristretto_mul( - point: &RistrettoPoint, - scalar: &Scalar, -) -> Result { - // Perform the calculation inline, calling this from within a program is - // not supported - #[cfg(not(target_arch = "bpf"))] - { - Ok(point * scalar) - } - // Call via a system call to perform the calculation - #[cfg(target_arch = "bpf")] - { - extern "C" { - fn sol_ristretto_mul( - point_addr: *const u8, - scalar_addr: *const u8, - result_addr: *mut u8, - ) -> u64; - } - - let mut result = RistrettoPoint::default(); - let status = unsafe { - sol_ristretto_mul( - point as *const _ as *const u8, - scalar as *const _ as *const u8, - &mut result as *const _ as *mut u8, - ) - }; - match status { - solana_program::entrypoint::SUCCESS => Ok(result), - _ => Err(status.into()), - } - } -} diff --git a/programs/bpf/rust/ro_modify/Cargo.toml b/programs/bpf/rust/ro_modify/Cargo.toml index 54290cf592..7602acf1cb 100644 --- a/programs/bpf/rust/ro_modify/Cargo.toml +++ b/programs/bpf/rust/ro_modify/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-ro-modify" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-ro-modify" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/ro_modify/Xargo.toml b/programs/bpf/rust/ro_modify/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/ro_modify/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/sanity/Cargo.toml b/programs/bpf/rust/sanity/Cargo.toml index c45471aded..cbe113ddb1 100644 --- a/programs/bpf/rust/sanity/Cargo.toml +++ b/programs/bpf/rust/sanity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-sanity" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-sanity" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/sanity/Xargo.toml b/programs/bpf/rust/sanity/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/sanity/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/sysval/Cargo.toml b/programs/bpf/rust/sha/Cargo.toml similarity index 78% rename from programs/bpf/rust/sysval/Cargo.toml rename to programs/bpf/rust/sha/Cargo.toml index 982f45570e..8671219817 100644 --- a/programs/bpf/rust/sysval/Cargo.toml +++ b/programs/bpf/rust/sha/Cargo.toml @@ -1,16 +1,16 @@ [package] -name = "solana-bpf-rust-sysval" -version = "1.5.19" +name = "solana-bpf-rust-sha" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" -documentation = "https://docs.rs/solana-bpf-rust-sysval" +documentation = "https://docs.rs/solana-bpf-rust-sha" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/sha/src/lib.rs b/programs/bpf/rust/sha/src/lib.rs new file mode 100644 index 0000000000..6ee55cc9ab --- /dev/null +++ b/programs/bpf/rust/sha/src/lib.rs @@ -0,0 +1,43 @@ +//! @brief SHA Syscall test + +extern crate solana_program; +use solana_program::{custom_panic_default, msg}; + +fn test_sha256_hasher() { + use solana_program::hash::{hashv, Hasher}; + let vals = &["Gaggablaghblagh!".as_ref(), "flurbos".as_ref()]; + let mut hasher = Hasher::default(); + hasher.hashv(vals); + assert_eq!(hashv(vals), hasher.result()); +} + +fn test_keccak256_hasher() { + use solana_program::keccak::{hashv, Hasher}; + let vals = &["Gaggablaghblagh!".as_ref(), "flurbos".as_ref()]; + let mut hasher = Hasher::default(); + hasher.hashv(vals); + assert_eq!(hashv(vals), hasher.result()); +} + +#[no_mangle] +pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { + msg!("sha"); + + test_sha256_hasher(); + test_keccak256_hasher(); + + 0 +} + +custom_panic_default!(); + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_sha() { + test_sha256_hasher(); + test_keccak256_hasher(); + } +} diff --git a/programs/bpf/rust/sha256/Xargo.toml b/programs/bpf/rust/sha256/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/sha256/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/sha256/src/lib.rs b/programs/bpf/rust/sha256/src/lib.rs deleted file mode 100644 index fc5c55b3e6..0000000000 --- a/programs/bpf/rust/sha256/src/lib.rs +++ /dev/null @@ -1,36 +0,0 @@ -//! @brief SHA256 Syscall test - -extern crate solana_program; -use solana_program::{ - custom_panic_default, - hash::{hashv, Hasher}, - msg, -}; - -fn test_hasher() { - let vals = &["Gaggablaghblagh!".as_ref(), "flurbos".as_ref()]; - let mut hasher = Hasher::default(); - hasher.hashv(vals); - assert_eq!(hashv(vals), hasher.result()); -} - -#[no_mangle] -pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { - msg!("sha256"); - - test_hasher(); - - 0 -} - -custom_panic_default!(); - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_sha256() { - test_hasher(); - } -} diff --git a/programs/bpf/rust/spoof1/Cargo.toml b/programs/bpf/rust/spoof1/Cargo.toml index 688b837fb2..6fdffd04f3 100644 --- a/programs/bpf/rust/spoof1/Cargo.toml +++ b/programs/bpf/rust/spoof1/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-spoof1" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-spoof1" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/spoof1/Xargo.toml b/programs/bpf/rust/spoof1/Xargo.toml deleted file mode 100644 index 475fb71ed1..0000000000 --- a/programs/bpf/rust/spoof1/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] diff --git a/programs/bpf/rust/spoof1/src/lib.rs b/programs/bpf/rust/spoof1/src/lib.rs index 8f2e9bf170..d4b00c602a 100644 --- a/programs/bpf/rust/spoof1/src/lib.rs +++ b/programs/bpf/rust/spoof1/src/lib.rs @@ -36,7 +36,7 @@ fn process_instruction( AccountMeta::new(*target.key, false), AccountMeta::new(*me.key, false), ]; - let ix = Instruction::new( + let ix = Instruction::new_with_bincode( system_program::id(), &SystemInstruction::Transfer { lamports: 1 }, account_metas, diff --git a/programs/bpf/rust/spoof1_system/Cargo.toml b/programs/bpf/rust/spoof1_system/Cargo.toml index 14709529bf..8d28c59f15 100644 --- a/programs/bpf/rust/spoof1_system/Cargo.toml +++ b/programs/bpf/rust/spoof1_system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-spoof1-system" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-spoof1-system" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/spoof1_system/Xargo.toml b/programs/bpf/rust/spoof1_system/Xargo.toml deleted file mode 100644 index 475fb71ed1..0000000000 --- a/programs/bpf/rust/spoof1_system/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] diff --git a/programs/bpf/rust/sysval/Xargo.toml b/programs/bpf/rust/sysval/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/sysval/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/sysval/src/lib.rs b/programs/bpf/rust/sysval/src/lib.rs deleted file mode 100644 index 062a6fc372..0000000000 --- a/programs/bpf/rust/sysval/src/lib.rs +++ /dev/null @@ -1,61 +0,0 @@ -//! @brief Example Rust-based BPF program that tests sysval use - -extern crate solana_program; -use solana_program::{ - account_info::AccountInfo, - clock::DEFAULT_SLOTS_PER_EPOCH, - entrypoint, - entrypoint::ProgramResult, - msg, - pubkey::Pubkey, - rent, - sysvar::{ - self, clock::Clock, fees::Fees, rent::Rent, slot_hashes::SlotHashes, - stake_history::StakeHistory, Sysvar, - }, -}; - -entrypoint!(process_instruction); -#[allow(clippy::unnecessary_wraps)] -fn process_instruction( - _program_id: &Pubkey, - accounts: &[AccountInfo], - _instruction_data: &[u8], -) -> ProgramResult { - // Clock - msg!("Clock identifier:"); - sysvar::clock::id().log(); - let clock = Clock::from_account_info(&accounts[2]).expect("clock"); - assert_eq!(clock.slot, DEFAULT_SLOTS_PER_EPOCH + 1); - - // Fees - msg!("Fees identifier:"); - sysvar::fees::id().log(); - let fees = Fees::from_account_info(&accounts[3]).expect("fees"); - let fee_calculator = fees.fee_calculator; - assert_eq!(fee_calculator.lamports_per_signature, 0); - - // Slot Hashes - msg!("SlotHashes identifier:"); - sysvar::slot_hashes::id().log(); - let slot_hashes = SlotHashes::from_account_info(&accounts[4]).expect("slot_hashes"); - assert!(slot_hashes.len() >= 1); - - // Stake History - msg!("StakeHistory identifier:"); - sysvar::stake_history::id().log(); - let stake_history = StakeHistory::from_account_info(&accounts[5]).expect("stake_history"); - assert!(stake_history.len() >= 1); - - let rent = Rent::from_account_info(&accounts[6]).unwrap(); - assert_eq!( - rent.due( - rent::DEFAULT_LAMPORTS_PER_BYTE_YEAR * rent::DEFAULT_EXEMPTION_THRESHOLD as u64, - 1, - 1.0 - ), - (0, true) - ); - - Ok(()) -} diff --git a/programs/bpf/rust/sha256/Cargo.toml b/programs/bpf/rust/sysvar/Cargo.toml similarity index 78% rename from programs/bpf/rust/sha256/Cargo.toml rename to programs/bpf/rust/sysvar/Cargo.toml index ff388134a5..f6ac741430 100644 --- a/programs/bpf/rust/sha256/Cargo.toml +++ b/programs/bpf/rust/sysvar/Cargo.toml @@ -1,16 +1,16 @@ [package] -name = "solana-bpf-rust-sha256" -version = "1.5.19" +name = "solana-bpf-rust-sysvar" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" -documentation = "https://docs.rs/solana-bpf-rust-sha256" +documentation = "https://docs.rs/solana-bpf-rust-sysvar" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/sysvar/src/lib.rs b/programs/bpf/rust/sysvar/src/lib.rs new file mode 100644 index 0000000000..26ec6b6712 --- /dev/null +++ b/programs/bpf/rust/sysvar/src/lib.rs @@ -0,0 +1,99 @@ +//! @brief Example Rust-based BPF program that tests sysvar use + +extern crate solana_program; +use solana_program::{ + account_info::AccountInfo, + entrypoint, + entrypoint::ProgramResult, + msg, + program_error::ProgramError, + pubkey::Pubkey, + sysvar::{ + self, clock::Clock, epoch_schedule::EpochSchedule, fees::Fees, instructions, + recent_blockhashes::RecentBlockhashes, rent::Rent, slot_hashes::SlotHashes, + slot_history::SlotHistory, stake_history::StakeHistory, Sysvar, + }, +}; + +entrypoint!(process_instruction); +#[allow(clippy::unnecessary_wraps)] +pub fn process_instruction( + _program_id: &Pubkey, + accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + // Clock + { + msg!("Clock identifier:"); + sysvar::clock::id().log(); + let clock = Clock::from_account_info(&accounts[2]).unwrap(); + assert_ne!(clock, Clock::default()); + let got_clock = Clock::get()?; + assert_eq!(clock, got_clock); + } + + // Epoch Schedule + { + msg!("EpochSchedule identifier:"); + sysvar::epoch_schedule::id().log(); + let epoch_schedule = EpochSchedule::from_account_info(&accounts[3]).unwrap(); + assert_eq!(epoch_schedule, EpochSchedule::default()); + let got_epoch_schedule = EpochSchedule::get()?; + assert_eq!(epoch_schedule, got_epoch_schedule); + } + + // Fees + { + msg!("Fees identifier:"); + sysvar::fees::id().log(); + let fees = Fees::from_account_info(&accounts[4]).unwrap(); + let got_fees = Fees::get()?; + assert_eq!(fees, got_fees); + } + + // Instructions + msg!("Instructions identifier:"); + sysvar::instructions::id().log(); + let index = instructions::load_current_index(&accounts[5].try_borrow_data()?); + assert_eq!(0, index); + + // Recent Blockhashes + { + msg!("RecentBlockhashes identifier:"); + sysvar::recent_blockhashes::id().log(); + let recent_blockhashes = RecentBlockhashes::from_account_info(&accounts[6]).unwrap(); + assert_ne!(recent_blockhashes, RecentBlockhashes::default()); + } + + // Rent + { + msg!("Rent identifier:"); + sysvar::rent::id().log(); + let rent = Rent::from_account_info(&accounts[7]).unwrap(); + let got_rent = Rent::get()?; + assert_eq!(rent, got_rent); + } + + // Slot Hashes + msg!("SlotHashes identifier:"); + sysvar::slot_hashes::id().log(); + assert_eq!( + Err(ProgramError::UnsupportedSysvar), + SlotHashes::from_account_info(&accounts[8]) + ); + + // Slot History + msg!("SlotHistory identifier:"); + sysvar::slot_history::id().log(); + assert_eq!( + Err(ProgramError::UnsupportedSysvar), + SlotHistory::from_account_info(&accounts[9]) + ); + + // Stake History + msg!("StakeHistory identifier:"); + sysvar::stake_history::id().log(); + let _ = StakeHistory::from_account_info(&accounts[10]).unwrap(); + + Ok(()) +} diff --git a/programs/bpf/rust/upgradeable/Cargo.toml b/programs/bpf/rust/upgradeable/Cargo.toml index eceb7a146f..db262193c2 100644 --- a/programs/bpf/rust/upgradeable/Cargo.toml +++ b/programs/bpf/rust/upgradeable/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-upgradeable" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-upgradeable" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] name = "solana_bpf_rust_upgradeable" diff --git a/programs/bpf/rust/upgradeable/Xargo.toml b/programs/bpf/rust/upgradeable/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/upgradeable/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/rust/upgraded/Cargo.toml b/programs/bpf/rust/upgraded/Cargo.toml index 86c5fccfbb..3b6b1c7b30 100644 --- a/programs/bpf/rust/upgraded/Cargo.toml +++ b/programs/bpf/rust/upgraded/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-upgraded" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-upgraded" edition = "2018" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.5.19" } +solana-program = { path = "../../../../sdk/program", version = "=1.6.14" } [lib] name = "solana_bpf_rust_upgraded" diff --git a/programs/bpf/rust/upgraded/Xargo.toml b/programs/bpf/rust/upgraded/Xargo.toml deleted file mode 100644 index 1744f098ae..0000000000 --- a/programs/bpf/rust/upgraded/Xargo.toml +++ /dev/null @@ -1,2 +0,0 @@ -[target.bpfel-unknown-unknown.dependencies.std] -features = [] \ No newline at end of file diff --git a/programs/bpf/tests/programs.rs b/programs/bpf/tests/programs.rs index 15b7b25a81..79d1cfa172 100644 --- a/programs/bpf/tests/programs.rs +++ b/programs/bpf/tests/programs.rs @@ -4,6 +4,9 @@ extern crate solana_bpf_loader_program; use itertools::izip; +use solana_account_decoder::parse_bpf_loader::{ + parse_bpf_upgradeable_loader, BpfUpgradeableLoaderAccountType, +}; use solana_bpf_loader_program::{ create_vm, serialization::{deserialize_parameters, serialize_parameters}, @@ -22,12 +25,11 @@ use solana_runtime::{ }, }; use solana_sdk::{ - account::Account, + account::AccountSharedData, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, client::SyncClient, clock::{DEFAULT_SLOTS_PER_EPOCH, MAX_PROCESSING_AGE}, entrypoint::{MAX_PERMITTED_DATA_INCREASE, SUCCESS}, - feature_set::try_find_program_address_syscall_enabled, instruction::{AccountMeta, CompiledInstruction, Instruction, InstructionError}, keyed_account::KeyedAccount, message::Message, @@ -35,14 +37,20 @@ use solana_sdk::{ pubkey::Pubkey, signature::{keypair_from_seed, Keypair, Signer}, system_instruction, - sysvar::{clock, fees, rent, slot_hashes, stake_history}, + sysvar::{ + clock, epoch_schedule, fees, instructions, recent_blockhashes, rent, slot_hashes, + slot_history, stake_history, + }, transaction::{Transaction, TransactionError}, }; use solana_transaction_status::{ token_balances::collect_token_balances, ConfirmedTransaction, InnerInstructions, TransactionStatusMeta, TransactionWithStatusMeta, UiTransactionEncoding, }; -use std::{cell::RefCell, collections::HashMap, env, fs::File, io::Read, path::PathBuf, sync::Arc}; +use std::{ + cell::RefCell, collections::HashMap, env, fs::File, io::Read, path::PathBuf, str::FromStr, + sync::Arc, +}; /// BPF program file extension const PLATFORM_FILE_EXTENSION_BPF: &str = "so"; @@ -216,7 +224,7 @@ fn run_program( let mut vm = create_vm( &loader_id, executable.as_ref(), - &mut parameter_bytes, + parameter_bytes.as_slice_mut(), parameter_accounts, &mut invoke_context, ) @@ -227,7 +235,13 @@ fn run_program( vm.execute_program_jit(&mut instruction_meter) }; assert_eq!(SUCCESS, result.unwrap()); - deserialize_parameters(&bpf_loader::id(), parameter_accounts, ¶meter_bytes).unwrap(); + deserialize_parameters( + &bpf_loader::id(), + parameter_accounts, + parameter_bytes.as_slice(), + true, + ) + .unwrap(); if i == 1 { assert_eq!(instruction_count, vm.get_total_instruction_count()); } @@ -368,6 +382,7 @@ fn execute_transactions(bank: &Bank, txs: &[Transaction]) -> Vec vec![ solana_sdk::system_program::id(), @@ -857,6 +894,12 @@ fn test_program_bpf_invoke_sanity() { invoked_program_id.clone(), invoked_program_id.clone(), invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), + solana_sdk::system_program::id(), ], }; assert_eq!(invoked_programs.len(), expected_invoked_programs.len()); @@ -879,8 +922,11 @@ fn test_program_bpf_invoke_sanity() { &invoked_argument_keypair, &from_keypair, ]; - let instruction = - Instruction::new(invoke_program_id, instruction_data, account_metas.clone()); + let instruction = Instruction::new_with_bytes( + invoke_program_id, + instruction_data, + account_metas.clone(), + ); let message = Message::new(&[instruction], Some(&mint_pubkey)); let tx = Transaction::new(&signers, message.clone(), bank.last_blockhash()); let (result, inner_instructions) = process_transaction_and_record_inner(&bank, tx); @@ -959,11 +1005,23 @@ fn test_program_bpf_invoke_sanity() { ); do_invoke_failure_test_local( - TEST_WRITE_DEESCALATION, + TEST_WRITABLE_DEESCALATION_WRITABLE, TransactionError::InstructionError(0, InstructionError::ReadonlyDataModified), &[invoked_program_id.clone()], ); + do_invoke_failure_test_local( + TEST_NESTED_INVOKE_TOO_DEEP, + TransactionError::InstructionError(0, InstructionError::CallDepth), + &[ + invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), + invoked_program_id.clone(), + ], + ); + // Check resulting state assert_eq!(43, bank.get_balance(&derived_key1)); @@ -978,10 +1036,10 @@ fn test_program_bpf_invoke_sanity() { } // Attempt to realloc into unauthorized address space - let account = Account::new(84, 0, &solana_sdk::system_program::id()); + let account = AccountSharedData::new(84, 0, &solana_sdk::system_program::id()); bank.store_account(&from_keypair.pubkey(), &account); - bank.store_account(&derived_key1, &Account::default()); - let instruction = Instruction::new( + bank.store_account(&derived_key1, &AccountSharedData::default()); + let instruction = Instruction::new_with_bytes( invoke_program_id, &[ TEST_ALLOC_ACCESS_VIOLATION, @@ -1043,11 +1101,11 @@ fn test_program_bpf_program_id_spoofing() { ); let from_pubkey = Pubkey::new_unique(); - let account = Account::new(10, 0, &solana_sdk::system_program::id()); + let account = AccountSharedData::new(10, 0, &solana_sdk::system_program::id()); bank.store_account(&from_pubkey, &account); let to_pubkey = Pubkey::new_unique(); - let account = Account::new(0, 0, &solana_sdk::system_program::id()); + let account = AccountSharedData::new(0, 0, &solana_sdk::system_program::id()); bank.store_account(&to_pubkey, &account); let account_metas = vec![ @@ -1057,7 +1115,8 @@ fn test_program_bpf_program_id_spoofing() { AccountMeta::new(to_pubkey, false), ]; - let instruction = Instruction::new(malicious_swap_pubkey, &(), account_metas.clone()); + let instruction = + Instruction::new_with_bytes(malicious_swap_pubkey, &[], account_metas.clone()); let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); assert_eq!( result.unwrap_err().unwrap(), @@ -1097,7 +1156,7 @@ fn test_program_bpf_caller_has_access_to_cpi_program() { AccountMeta::new_readonly(caller_pubkey, false), AccountMeta::new_readonly(caller2_pubkey, false), ]; - let instruction = Instruction::new(caller_pubkey, &[1_u8], account_metas.clone()); + let instruction = Instruction::new_with_bytes(caller_pubkey, &[1], account_metas.clone()); let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); assert_eq!( result.unwrap_err().unwrap(), @@ -1129,7 +1188,7 @@ fn test_program_bpf_ro_modify() { ); let test_keypair = Keypair::new(); - let account = Account::new(10, 0, &solana_sdk::system_program::id()); + let account = AccountSharedData::new(10, 0, &solana_sdk::system_program::id()); bank.store_account(&test_keypair.pubkey(), &account); let account_metas = vec![ @@ -1137,7 +1196,7 @@ fn test_program_bpf_ro_modify() { AccountMeta::new(test_keypair.pubkey(), true), ]; - let instruction = Instruction::new(program_pubkey, &[1_u8], account_metas.clone()); + let instruction = Instruction::new_with_bytes(program_pubkey, &[1], account_metas.clone()); let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); let result = bank_client.send_and_confirm_message(&[&mint_keypair, &test_keypair], message); assert_eq!( @@ -1145,7 +1204,7 @@ fn test_program_bpf_ro_modify() { TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete) ); - let instruction = Instruction::new(program_pubkey, &[3_u8], account_metas.clone()); + let instruction = Instruction::new_with_bytes(program_pubkey, &[3], account_metas.clone()); let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); let result = bank_client.send_and_confirm_message(&[&mint_keypair, &test_keypair], message); assert_eq!( @@ -1153,7 +1212,7 @@ fn test_program_bpf_ro_modify() { TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete) ); - let instruction = Instruction::new(program_pubkey, &[4_u8], account_metas.clone()); + let instruction = Instruction::new_with_bytes(program_pubkey, &[4], account_metas.clone()); let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); let result = bank_client.send_and_confirm_message(&[&mint_keypair, &test_keypair], message); assert_eq!( @@ -1187,7 +1246,7 @@ fn test_program_bpf_call_depth() { "solana_bpf_rust_call_depth", ); - let instruction = Instruction::new( + let instruction = Instruction::new_with_bincode( program_id, &(BpfComputeBudget::default().max_call_depth - 1), vec![], @@ -1195,7 +1254,7 @@ fn test_program_bpf_call_depth() { let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); assert!(result.is_ok()); - let instruction = Instruction::new( + let instruction = Instruction::new_with_bincode( program_id, &BpfComputeBudget::default().max_call_depth, vec![], @@ -1214,10 +1273,11 @@ fn assert_instruction_count() { programs.extend_from_slice(&[ ("bpf_to_bpf", 13), ("multiple_static", 8), - ("noop", 57), + ("noop", 45), ("relative_call", 10), - ("sanity", 176), - ("sanity++", 176), + ("sanity", 175), + ("sanity++", 177), + ("sha", 694), ("struct_pass", 8), ("struct_ret", 22), ]); @@ -1226,28 +1286,33 @@ fn assert_instruction_count() { { programs.extend_from_slice(&[ ("solana_bpf_rust_128bit", 572), - ("solana_bpf_rust_alloc", 12919), + ("solana_bpf_rust_alloc", 8906), ("solana_bpf_rust_dep_crate", 2), - ("solana_bpf_rust_external_spend", 514), + ("solana_bpf_rust_external_spend", 521), ("solana_bpf_rust_iter", 724), ("solana_bpf_rust_many_args", 237), - ("solana_bpf_rust_noop", 488), - ("solana_bpf_rust_param_passing", 48), - ("solana_bpf_rust_ristretto", 19409), - ("solana_bpf_rust_sanity", 938), + ("solana_bpf_rust_noop", 495), + ("solana_bpf_rust_param_passing", 46), + ("solana_bpf_rust_sanity", 917), + ("solana_bpf_rust_sha", 29099), ]); } + let mut passed = true; + println!("\n {:30} expected actual diff", "BPF program"); for program in programs.iter() { - println!("Test program: {:?}", program.0); let program_id = solana_sdk::pubkey::new_rand(); let key = solana_sdk::pubkey::new_rand(); - let mut account = RefCell::new(Account::default()); + let mut account = RefCell::new(AccountSharedData::default()); let parameter_accounts = vec![KeyedAccount::new(&key, false, &mut account)]; let count = run_program(program.0, &program_id, ¶meter_accounts[..], &[]).unwrap(); - println!(" {} : {:?} ({:?})", program.0, count, program.1,); - assert!(count <= program.1); + let diff: i64 = count as i64 - program.1 as i64; + println!(" {:30} {:8} {:6} {:+4}", program.0, program.1, count, diff); + if count > program.1 { + passed = false; + } } + assert!(passed); } #[cfg(any(feature = "bpf_rust"))] @@ -1279,9 +1344,9 @@ fn test_program_bpf_instruction_introspection() { solana_sdk::sysvar::instructions::id(), false, )]; - let instruction0 = Instruction::new(program_id, &[0u8, 0u8], account_metas.clone()); - let instruction1 = Instruction::new(program_id, &[0u8, 1u8], account_metas.clone()); - let instruction2 = Instruction::new(program_id, &[0u8, 2u8], account_metas); + let instruction0 = Instruction::new_with_bytes(program_id, &[0u8, 0u8], account_metas.clone()); + let instruction1 = Instruction::new_with_bytes(program_id, &[0u8, 1u8], account_metas.clone()); + let instruction2 = Instruction::new_with_bytes(program_id, &[0u8, 2u8], account_metas); let message = Message::new( &[instruction0, instruction1, instruction2], Some(&mint_keypair.pubkey()), @@ -1294,15 +1359,17 @@ fn test_program_bpf_instruction_introspection() { solana_sdk::sysvar::instructions::id(), false, )]; - let instruction = Instruction::new(program_id, &0u8, account_metas); + let instruction = Instruction::new_with_bytes(program_id, &[0], account_metas); let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); assert_eq!( result.unwrap_err().unwrap(), - TransactionError::InvalidAccountIndex + // sysvar write locks are demoted to read only. So this will no longer + // cause InvalidAccountIndex error. + TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete), ); // No accounts, should error - let instruction = Instruction::new(program_id, &0u8, vec![]); + let instruction = Instruction::new_with_bytes(program_id, &[0], vec![]); let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); assert!(result.is_err()); assert_eq!( @@ -1369,7 +1436,7 @@ fn test_program_bpf_test_use_latest_executor() { let message = Message::new( &[ loader_instruction::finalize(&program_keypair.pubkey(), &bpf_loader::id()), - Instruction::new(panic_id, &0u8, vec![]), + Instruction::new_with_bytes(panic_id, &[0], vec![]), ], Some(&mint_keypair.pubkey()), ); @@ -1401,7 +1468,11 @@ fn test_program_bpf_test_use_latest_executor() { // Call the noop program, should get noop not panic let message = Message::new( - &[Instruction::new(program_keypair.pubkey(), &0u8, vec![])], + &[Instruction::new_with_bytes( + program_keypair.pubkey(), + &[0], + vec![], + )], Some(&mint_keypair.pubkey()), ); assert!(bank_client @@ -1491,7 +1562,11 @@ fn test_program_bpf_test_use_latest_executor2() { // invoke program, verify not found let message = Message::new( - &[Instruction::new(program_keypair.pubkey(), &0u8, vec![])], + &[Instruction::new_with_bytes( + program_keypair.pubkey(), + &[0], + vec![], + )], Some(&mint_keypair.pubkey()), ); assert_eq!( @@ -1526,7 +1601,11 @@ fn test_program_bpf_test_use_latest_executor2() { // Call the program, should get noop, not panic let message = Message::new( - &[Instruction::new(program_keypair.pubkey(), &0u8, vec![])], + &[Instruction::new_with_bytes( + program_keypair.pubkey(), + &[0], + vec![], + )], Some(&mint_keypair.pubkey()), ); assert!(bank_client @@ -1563,7 +1642,7 @@ fn test_program_bpf_upgrade() { "solana_bpf_rust_upgradeable", ); - let mut instruction = Instruction::new( + let mut instruction = Instruction::new_with_bytes( program_id, &[0], vec![ @@ -1659,7 +1738,7 @@ fn test_program_bpf_upgrade_and_invoke_in_same_tx() { "solana_bpf_rust_noop", ); - let invoke_instruction = Instruction::new( + let invoke_instruction = Instruction::new_with_bytes( program_id, &[0], vec![ @@ -1747,7 +1826,7 @@ fn test_program_bpf_invoke_upgradeable_via_cpi() { "solana_bpf_rust_upgradeable", ); - let mut instruction = Instruction::new( + let mut instruction = Instruction::new_with_bytes( invoke_and_return, &[0], vec![ @@ -1848,7 +1927,8 @@ fn test_program_bpf_disguised_as_bpf_loader() { program, ); let account_metas = vec![AccountMeta::new_readonly(program_id, false)]; - let instruction = Instruction::new(bpf_loader_deprecated::id(), &1u8, account_metas); + let instruction = + Instruction::new_with_bytes(bpf_loader_deprecated::id(), &[1], account_metas); let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); assert_eq!( result.unwrap_err().unwrap(), @@ -1857,6 +1937,38 @@ fn test_program_bpf_disguised_as_bpf_loader() { } } +#[test] +#[cfg(feature = "bpf_c")] +fn test_program_bpf_c_dup() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(50); + let mut bank = Bank::new(&genesis_config); + let (name, id, entrypoint) = solana_bpf_loader_program!(); + bank.add_builtin(&name, id, entrypoint); + + let account_address = Pubkey::new_unique(); + let account = + AccountSharedData::new_data(42, &[1_u8, 2, 3], &solana_sdk::system_program::id()).unwrap(); + bank.store_account(&account_address, &account); + + let bank_client = BankClient::new(bank); + + let program_id = load_bpf_program(&bank_client, &bpf_loader::id(), &mint_keypair, "ser"); + let account_metas = vec![ + AccountMeta::new_readonly(account_address, false), + AccountMeta::new_readonly(account_address, false), + ]; + let instruction = Instruction::new_with_bytes(program_id, &[4, 5, 6, 7], account_metas); + bank_client + .send_and_confirm_instruction(&mint_keypair, instruction) + .unwrap(); +} + #[cfg(feature = "bpf_rust")] #[test] fn test_program_bpf_upgrade_via_cpi() { @@ -1894,7 +2006,7 @@ fn test_program_bpf_upgrade_via_cpi() { "solana_bpf_rust_upgradeable", ); - let mut instruction = Instruction::new( + let mut instruction = Instruction::new_with_bytes( invoke_and_return, &[0], vec![ @@ -1992,7 +2104,7 @@ fn test_program_bpf_upgrade_self_via_cpi() { "solana_bpf_rust_invoke_and_return", ); - let mut invoke_instruction = Instruction::new( + let mut invoke_instruction = Instruction::new_with_bytes( program_id, &[0], vec![ @@ -2045,6 +2157,94 @@ fn test_program_bpf_upgrade_self_via_cpi() { ); } +#[cfg(feature = "bpf_rust")] +#[test] +fn test_program_bpf_set_upgrade_authority_via_cpi() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(50); + let mut bank = Bank::new(&genesis_config); + let (name, id, entrypoint) = solana_bpf_loader_program!(); + bank.add_builtin(&name, id, entrypoint); + let (name, id, entrypoint) = solana_bpf_loader_upgradeable_program!(); + bank.add_builtin(&name, id, entrypoint); + let bank_client = BankClient::new(bank); + + // Deploy CPI invoker program + let invoke_and_return = load_bpf_program( + &bank_client, + &bpf_loader::id(), + &mint_keypair, + "solana_bpf_rust_invoke_and_return", + ); + + // Deploy upgradeable program + let buffer_keypair = Keypair::new(); + let program_keypair = Keypair::new(); + let program_id = program_keypair.pubkey(); + let authority_keypair = Keypair::new(); + load_upgradeable_bpf_program( + &bank_client, + &mint_keypair, + &buffer_keypair, + &program_keypair, + &authority_keypair, + "solana_bpf_rust_upgradeable", + ); + + // Set program upgrade authority instruction to invoke via CPI + let new_upgrade_authority_key = Keypair::new().pubkey(); + let mut set_upgrade_authority_instruction = bpf_loader_upgradeable::set_upgrade_authority( + &program_id, + &authority_keypair.pubkey(), + Some(&new_upgrade_authority_key), + ); + + // Invoke set_upgrade_authority via CPI invoker program + set_upgrade_authority_instruction.program_id = invoke_and_return; + set_upgrade_authority_instruction + .accounts + .insert(0, AccountMeta::new(bpf_loader_upgradeable::id(), false)); + + let message = Message::new( + &[set_upgrade_authority_instruction], + Some(&mint_keypair.pubkey()), + ); + bank_client + .send_and_confirm_message(&[&mint_keypair, &authority_keypair], message) + .unwrap(); + + // Assert upgrade authority was changed + let program_account_data = bank_client.get_account_data(&program_id).unwrap().unwrap(); + let program_account = parse_bpf_upgradeable_loader(&program_account_data).unwrap(); + + let upgrade_authority_key = match program_account { + BpfUpgradeableLoaderAccountType::Program(ui_program) => { + let program_data_account_key = Pubkey::from_str(&ui_program.program_data).unwrap(); + let program_data_account_data = bank_client + .get_account_data(&program_data_account_key) + .unwrap() + .unwrap(); + let program_data_account = + parse_bpf_upgradeable_loader(&program_data_account_data).unwrap(); + + match program_data_account { + BpfUpgradeableLoaderAccountType::ProgramData(ui_program_data) => ui_program_data + .authority + .map(|a| Pubkey::from_str(&a).unwrap()), + _ => None, + } + } + _ => None, + }; + + assert_eq!(Some(new_upgrade_authority_key), upgrade_authority_key); +} + #[cfg(feature = "bpf_rust")] #[test] fn test_program_upgradeable_locks() { @@ -2104,9 +2304,9 @@ fn test_program_upgradeable_locks() { let invoke_tx = Transaction::new( &[payer_keypair], Message::new( - &[Instruction::new( + &[Instruction::new_with_bytes( program_keypair.pubkey(), - &[0u8; 0], + &[0; 0], vec![], )], Some(&payer_keypair.pubkey()), @@ -2187,7 +2387,7 @@ fn test_program_upgradeable_locks() { #[cfg(feature = "bpf_rust")] #[test] -fn test_program_bpf_syscall_feature_activation() { +fn test_program_bpf_finalize() { solana_logger::setup(); let GenesisConfigInfo { @@ -2196,29 +2396,53 @@ fn test_program_bpf_syscall_feature_activation() { .. } = create_genesis_config(50); let mut bank = Bank::new(&genesis_config); - bank.deactivate_feature(&try_find_program_address_syscall_enabled::id()); let (name, id, entrypoint) = solana_bpf_loader_program!(); bank.add_builtin(&name, id, entrypoint); let bank = Arc::new(bank); let bank_client = BankClient::new_shared(&bank); - let program_id = load_bpf_program( + let program_pubkey = load_bpf_program( &bank_client, &bpf_loader::id(), &mint_keypair, - "solana_bpf_rust_noop", + "solana_bpf_rust_finalize", ); - let instruction = Instruction::new(program_id, &0u8, vec![]); - let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); - assert!(result.is_ok()); - let mut bank = Bank::new_from_parent(&bank, &Pubkey::default(), 1); - bank.activate_feature(&try_find_program_address_syscall_enabled::id()); + let noop_keypair = Keypair::new(); - let bank = Arc::new(bank); - let bank_client = BankClient::new_shared(&bank); - let instruction = Instruction::new(program_id, &1u8, vec![]); - let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); - println!("result: {:?}", result); - assert!(result.is_ok()); + // Write the noop program into the same program account + let elf = read_bpf_program("solana_bpf_rust_noop"); + let message = Message::new( + &[system_instruction::create_account( + &mint_keypair.pubkey(), + &noop_keypair.pubkey(), + 1, + elf.len() as u64 * 2, + &bpf_loader::id(), + )], + Some(&mint_keypair.pubkey()), + ); + assert!(bank_client + .send_and_confirm_message(&[&mint_keypair, &noop_keypair], message) + .is_ok()); + write_bpf_program( + &bank_client, + &bpf_loader::id(), + &mint_keypair, + &noop_keypair, + &elf, + ); + + let account_metas = vec![ + AccountMeta::new(noop_keypair.pubkey(), true), + AccountMeta::new_readonly(bpf_loader::id(), false), + AccountMeta::new(rent::id(), false), + ]; + let instruction = Instruction::new_with_bytes(program_pubkey, &[], account_metas.clone()); + let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); + let result = bank_client.send_and_confirm_message(&[&mint_keypair, &noop_keypair], message); + assert_eq!( + result.unwrap_err().unwrap(), + TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete) + ); } diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index 537f9bf0e4..38b5024837 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-loader-program" -version = "1.5.19" +version = "1.6.14" description = "Solana BPF loader" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,14 +12,15 @@ edition = "2018" [dependencies] bincode = "1.3.1" byteorder = "1.3.4" -curve25519-dalek = "3" log = "0.4.11" num-derive = "0.3" num-traits = "0.2" rand_core = "0.6.2" -solana-runtime = { path = "../../runtime", version = "=1.5.19" } -solana-sdk = { path = "../../sdk", version = "=1.5.19" } -solana_rbpf = "=0.2.7" +sha3 = "0.9.1" +solana-measure = { path = "../../measure", version = "=1.6.14" } +solana-runtime = { path = "../../runtime", version = "=1.6.14" } +solana-sdk = { path = "../../sdk", version = "=1.6.14" } +solana_rbpf = "=0.2.9" thiserror = "1.0" [dev-dependencies] diff --git a/programs/bpf_loader/benches/serialization.rs b/programs/bpf_loader/benches/serialization.rs new file mode 100644 index 0000000000..e99c953b49 --- /dev/null +++ b/programs/bpf_loader/benches/serialization.rs @@ -0,0 +1,141 @@ +#![feature(test)] + +extern crate test; + +use solana_bpf_loader_program::serialization::{ + serialize_parameters_aligned, serialize_parameters_unaligned, +}; +use solana_sdk::{ + account::{Account, AccountSharedData}, + bpf_loader, +}; +use solana_sdk::{keyed_account::KeyedAccount, pubkey::Pubkey}; +use std::cell::RefCell; +use test::Bencher; + +fn create_inputs() -> ( + Pubkey, + Vec, + Vec>, + Vec, +) { + let program_id = solana_sdk::pubkey::new_rand(); + let dup_key = solana_sdk::pubkey::new_rand(); + let dup_key2 = solana_sdk::pubkey::new_rand(); + let keys = vec![ + dup_key, + dup_key, + solana_sdk::pubkey::new_rand(), + solana_sdk::pubkey::new_rand(), + dup_key2, + dup_key2, + solana_sdk::pubkey::new_rand(), + solana_sdk::pubkey::new_rand(), + ]; + let accounts = vec![ + RefCell::new(AccountSharedData::from(Account { + lamports: 1, + data: vec![1u8, 2, 3, 4, 5], + owner: bpf_loader::id(), + executable: false, + rent_epoch: 100, + })), + // dup + RefCell::new(AccountSharedData::from(Account { + lamports: 1, + data: vec![1u8; 100000], + owner: bpf_loader::id(), + executable: false, + rent_epoch: 100, + })), + RefCell::new(AccountSharedData::from(Account { + lamports: 2, + data: vec![11u8; 100000], + owner: bpf_loader::id(), + executable: true, + rent_epoch: 200, + })), + RefCell::new(AccountSharedData::from(Account { + lamports: 3, + data: vec![], + owner: bpf_loader::id(), + executable: false, + rent_epoch: 3100, + })), + RefCell::new(AccountSharedData::from(Account { + lamports: 4, + data: vec![1u8; 100000], + owner: bpf_loader::id(), + executable: false, + rent_epoch: 100, + })), + // dup + RefCell::new(AccountSharedData::from(Account { + lamports: 4, + data: vec![1u8; 1000000], + owner: bpf_loader::id(), + executable: false, + rent_epoch: 100, + })), + RefCell::new(AccountSharedData::from(Account { + lamports: 5, + data: vec![11u8; 10000], + owner: bpf_loader::id(), + executable: true, + rent_epoch: 200, + })), + RefCell::new(AccountSharedData::from(Account { + lamports: 6, + data: vec![], + owner: bpf_loader::id(), + executable: false, + rent_epoch: 3100, + })), + ]; + + let instruction_data = vec![1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; + + (program_id, keys, accounts, instruction_data) +} + +#[bench] +fn bench_serialize_unaligned(bencher: &mut Bencher) { + let (program_id, keys, accounts, instruction_data) = create_inputs(); + let keyed_accounts: Vec<_> = keys + .iter() + .zip(&accounts) + .enumerate() + .map(|(i, (key, account))| { + if i <= accounts.len() / 2 { + KeyedAccount::new_readonly(&key, false, &account) + } else { + KeyedAccount::new(&key, false, &account) + } + }) + .collect(); + bencher.iter(|| { + let _ = serialize_parameters_unaligned(&program_id, &keyed_accounts, &instruction_data) + .unwrap(); + }); +} + +#[bench] +fn bench_serialize_aligned(bencher: &mut Bencher) { + let (program_id, keys, accounts, instruction_data) = create_inputs(); + let keyed_accounts: Vec<_> = keys + .iter() + .zip(&accounts) + .enumerate() + .map(|(i, (key, account))| { + if i <= accounts.len() / 2 { + KeyedAccount::new_readonly(&key, false, &account) + } else { + KeyedAccount::new(&key, false, &account) + } + }) + .collect(); + bencher.iter(|| { + let _ = + serialize_parameters_aligned(&program_id, &keyed_accounts, &instruction_data).unwrap(); + }); +} diff --git a/programs/bpf_loader/src/allocator_bump.rs b/programs/bpf_loader/src/allocator_bump.rs index 05d68ba912..ea3fee8fd1 100644 --- a/programs/bpf_loader/src/allocator_bump.rs +++ b/programs/bpf_loader/src/allocator_bump.rs @@ -1,18 +1,19 @@ use crate::alloc; use alloc::{Alloc, AllocErr}; +use solana_rbpf::aligned_memory::AlignedMemory; use std::alloc::Layout; #[derive(Debug)] pub struct BpfAllocator { - heap: Vec, + heap: AlignedMemory, start: u64, len: u64, pos: u64, } impl BpfAllocator { - pub fn new(heap: Vec, virtual_address: u64) -> Self { + pub fn new(heap: AlignedMemory, virtual_address: u64) -> Self { let len = heap.len() as u64; Self { heap, diff --git a/programs/bpf_loader/src/bpf_verifier.rs b/programs/bpf_loader/src/bpf_verifier.rs index 19b9cd6957..57372262f5 100644 --- a/programs/bpf_loader/src/bpf_verifier.rs +++ b/programs/bpf_loader/src/bpf_verifier.rs @@ -1,4 +1,5 @@ #![allow(clippy::upper_case_acronyms)] + use crate::BpfError; use solana_rbpf::ebpf; use thiserror::Error; @@ -59,14 +60,10 @@ fn adj_insn_ptr(insn_ptr: usize) -> usize { insn_ptr + ebpf::ELF_INSN_DUMP_OFFSET } -fn check_prog_len(prog: &[u8], is_program_size_cap: bool) -> Result<(), BpfError> { +fn check_prog_len(prog: &[u8]) -> Result<(), BpfError> { if prog.len() % ebpf::INSN_SIZE != 0 { return Err(VerifierError::ProgramLengthNotMultiple.into()); } - if is_program_size_cap && prog.len() > ebpf::PROG_MAX_SIZE { - return Err(VerifierError::ProgramTooLarge(prog.len() / ebpf::INSN_SIZE).into()); - } - if prog.is_empty() { return Err(VerifierError::NoProgram.into()); } @@ -150,8 +147,8 @@ fn check_imm_register(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), Verifier } #[rustfmt::skip] -pub fn check(prog: &[u8], is_program_size_cap: bool) -> Result<(), BpfError> { - check_prog_len(prog, is_program_size_cap)?; +pub fn check(prog: &[u8]) -> Result<(), BpfError> { + check_prog_len(prog)?; let mut insn_ptr: usize = 0; while insn_ptr * ebpf::INSN_SIZE < prog.len() { diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 6d901a18ec..c19978baf8 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -15,23 +15,23 @@ use crate::{ syscalls::SyscallError, }; use log::{log_enabled, trace, Level::Trace}; +use solana_measure::measure::Measure; use solana_rbpf::{ - ebpf::MM_HEAP_START, + aligned_memory::AlignedMemory, + ebpf::{HOST_ALIGN, MM_HEAP_START}, error::{EbpfError, UserDefinedError}, memory_region::MemoryRegion, vm::{Config, EbpfVm, Executable, InstructionMeter}, }; use solana_runtime::message_processor::MessageProcessor; use solana_sdk::{ + account::{ReadableAccount, WritableAccount}, account_utils::State, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::Clock, entrypoint::SUCCESS, - feature_set::{ - bpf_compute_budget_balancing, matching_buffer_upgrade_authorities, - prevent_upgrade_and_invoke, upgradeable_close_instruction, - }, + feature_set::{skip_ro_deserialization, upgradeable_close_instruction}, ic_logger_msg, ic_msg, instruction::InstructionError, keyed_account::{from_keyed_account, next_keyed_account, KeyedAccount}, @@ -91,11 +91,8 @@ pub fn create_and_cache_executor( let (_, elf_bytes) = program .get_text_bytes() .map_err(|e| map_ebpf_error(invoke_context, e))?; - bpf_verifier::check( - elf_bytes, - !invoke_context.is_feature_active(&bpf_compute_budget_balancing::id()), - ) - .map_err(|e| map_ebpf_error(invoke_context, EbpfError::UserError(e)))?; + bpf_verifier::check(elf_bytes) + .map_err(|e| map_ebpf_error(invoke_context, EbpfError::UserError(e)))?; let syscall_registry = syscalls::register_syscalls(invoke_context).map_err(|e| { ic_msg!(invoke_context, "Failed to register syscalls: {}", e); InstructionError::ProgramEnvironmentSetupFailure @@ -150,8 +147,8 @@ pub fn create_vm<'a>( parameter_accounts: &'a [KeyedAccount<'a>], invoke_context: &'a mut dyn InvokeContext, ) -> Result, EbpfError> { - let heap = vec![0_u8; DEFAULT_HEAP_SIZE]; - let heap_region = MemoryRegion::new_from_slice(&heap, MM_HEAP_START, 0, true); + let heap = AlignedMemory::new_with_size(DEFAULT_HEAP_SIZE, HOST_ALIGN); + let heap_region = MemoryRegion::new_from_slice(heap.as_slice(), MM_HEAP_START, 0, true); let mut vm = EbpfVm::new(program, parameter_bytes, &[heap_region])?; syscalls::bind_syscall_context_objects( loader_id, @@ -248,7 +245,7 @@ fn process_instruction_common( Some(executor) => executor, None => create_and_cache_executor( program_id, - &program.try_account_ref()?.data[offset..], + &program.try_account_ref()?.data()[offset..], invoke_context, use_jit, )?, @@ -307,27 +304,18 @@ fn process_loader_upgradeable_instruction( return Err(InstructionError::AccountAlreadyInitialized); } - if invoke_context.is_feature_active(&matching_buffer_upgrade_authorities::id()) { - let authority = next_keyed_account(account_iter)?; + let authority = next_keyed_account(account_iter)?; - buffer.set_state(&UpgradeableLoaderState::Buffer { - authority_address: Some(*authority.unsigned_key()), - })?; - } else { - let authority = next_keyed_account(account_iter) - .ok() - .map(|account| account.unsigned_key()); - buffer.set_state(&UpgradeableLoaderState::Buffer { - authority_address: authority.cloned(), - })?; - } + buffer.set_state(&UpgradeableLoaderState::Buffer { + authority_address: Some(*authority.unsigned_key()), + })?; } UpgradeableLoaderInstruction::Write { offset, bytes } => { let buffer = next_keyed_account(account_iter)?; let authority = next_keyed_account(account_iter)?; if let UpgradeableLoaderState::Buffer { authority_address } = buffer.state()? { - if authority_address == None { + if authority_address.is_none() { ic_logger_msg!(logger, "Buffer is immutable"); return Err(InstructionError::Immutable); // TODO better error code } @@ -344,7 +332,7 @@ fn process_loader_upgradeable_instruction( return Err(InstructionError::InvalidAccountData); } write_program_data( - &mut buffer.try_account_ref_mut()?.data, + buffer.try_account_ref_mut()?.data_as_mut_slice(), UpgradeableLoaderState::buffer_data_offset()? + offset as usize, &bytes, invoke_context, @@ -358,19 +346,9 @@ fn process_loader_upgradeable_instruction( let rent = from_keyed_account::(next_keyed_account(account_iter)?)?; let clock = from_keyed_account::(next_keyed_account(account_iter)?)?; let system = next_keyed_account(account_iter)?; - let (upgrade_authority_address, upgrade_authority_signer) = - if invoke_context.is_feature_active(&matching_buffer_upgrade_authorities::id()) { - let authority = next_keyed_account(account_iter)?; - ( - Some(*authority.unsigned_key()), - authority.signer_key().is_none(), - ) - } else { - let authority = next_keyed_account(account_iter) - .ok() - .map(|account| account.unsigned_key()); - (authority.cloned(), false) - }; + let authority = next_keyed_account(account_iter)?; + let upgrade_authority_address = Some(*authority.unsigned_key()); + let upgrade_authority_signer = authority.signer_key().is_none(); // Verify Program account @@ -390,15 +368,13 @@ fn process_loader_upgradeable_instruction( // Verify Buffer account if let UpgradeableLoaderState::Buffer { authority_address } = buffer.state()? { - if invoke_context.is_feature_active(&matching_buffer_upgrade_authorities::id()) { - if authority_address != upgrade_authority_address { - ic_logger_msg!(logger, "Buffer and upgrade authority don't match"); - return Err(InstructionError::IncorrectAuthority); - } - if upgrade_authority_signer { - ic_logger_msg!(logger, "Upgrade authority did not sign"); - return Err(InstructionError::MissingRequiredSignature); - } + if authority_address != upgrade_authority_address { + ic_logger_msg!(logger, "Buffer and upgrade authority don't match"); + return Err(InstructionError::IncorrectAuthority); + } + if upgrade_authority_signer { + ic_logger_msg!(logger, "Upgrade authority did not sign"); + return Err(InstructionError::MissingRequiredSignature); } } else { ic_logger_msg!(logger, "Invalid Buffer account"); @@ -450,7 +426,7 @@ fn process_loader_upgradeable_instruction( // Load and verify the program bits let _ = create_and_cache_executor( program_id, - &buffer.try_account_ref()?.data[buffer_data_offset..], + &buffer.try_account_ref()?.data()[buffer_data_offset..], invoke_context, use_jit, )?; @@ -460,9 +436,9 @@ fn process_loader_upgradeable_instruction( slot: clock.slot, upgrade_authority_address, })?; - programdata.try_account_ref_mut()?.data + programdata.try_account_ref_mut()?.data_as_mut_slice() [programdata_data_offset..programdata_data_offset + buffer_data_len] - .copy_from_slice(&buffer.try_account_ref()?.data[buffer_data_offset..]); + .copy_from_slice(&buffer.try_account_ref()?.data()[buffer_data_offset..]); // Update the Program account program.set_state(&UpgradeableLoaderState::Program { @@ -491,9 +467,7 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(logger, "Program account not executable"); return Err(InstructionError::AccountNotExecutable); } - if !program.is_writable() - && invoke_context.is_feature_active(&prevent_upgrade_and_invoke::id()) - { + if !program.is_writable() { ic_logger_msg!(logger, "Program account not writeable"); return Err(InstructionError::InvalidArgument); } @@ -517,15 +491,13 @@ fn process_loader_upgradeable_instruction( // Verify Buffer account if let UpgradeableLoaderState::Buffer { authority_address } = buffer.state()? { - if invoke_context.is_feature_active(&matching_buffer_upgrade_authorities::id()) { - if authority_address != Some(*authority.unsigned_key()) { - ic_logger_msg!(logger, "Buffer and upgrade authority don't match"); - return Err(InstructionError::IncorrectAuthority); - } - if authority.signer_key().is_none() { - ic_logger_msg!(logger, "Upgrade authority did not sign"); - return Err(InstructionError::MissingRequiredSignature); - } + if authority_address != Some(*authority.unsigned_key()) { + ic_logger_msg!(logger, "Buffer and upgrade authority don't match"); + return Err(InstructionError::IncorrectAuthority); + } + if authority.signer_key().is_none() { + ic_logger_msg!(logger, "Upgrade authority did not sign"); + return Err(InstructionError::MissingRequiredSignature); } } else { ic_logger_msg!(logger, "Invalid Buffer account"); @@ -559,7 +531,7 @@ fn process_loader_upgradeable_instruction( upgrade_authority_address, } = programdata.state()? { - if upgrade_authority_address == None { + if upgrade_authority_address.is_none() { ic_logger_msg!(logger, "Program not upgradeable"); return Err(InstructionError::Immutable); } @@ -580,7 +552,7 @@ fn process_loader_upgradeable_instruction( let _ = create_and_cache_executor( program.unsigned_key(), - &buffer.try_account_ref()?.data[buffer_data_offset..], + &buffer.try_account_ref()?.data()[buffer_data_offset..], invoke_context, use_jit, )?; @@ -592,14 +564,12 @@ fn process_loader_upgradeable_instruction( slot: clock.slot, upgrade_authority_address: Some(*authority.unsigned_key()), })?; - programdata.try_account_ref_mut()?.data + programdata.try_account_ref_mut()?.data_as_mut_slice() [programdata_data_offset..programdata_data_offset + buffer_data_len] - .copy_from_slice(&buffer.try_account_ref()?.data[buffer_data_offset..]); - for i in &mut programdata.try_account_ref_mut()?.data + .copy_from_slice(&buffer.try_account_ref()?.data()[buffer_data_offset..]); + programdata.try_account_ref_mut()?.data_as_mut_slice() [programdata_data_offset + buffer_data_len..] - { - *i = 0 - } + .fill(0); // Fund ProgramData to rent-exemption, spill the rest @@ -620,13 +590,11 @@ fn process_loader_upgradeable_instruction( match account.state()? { UpgradeableLoaderState::Buffer { authority_address } => { - if invoke_context.is_feature_active(&matching_buffer_upgrade_authorities::id()) - && new_authority == None - { + if new_authority.is_none() { ic_logger_msg!(logger, "Buffer authority is not optional"); return Err(InstructionError::IncorrectAuthority); } - if authority_address == None { + if authority_address.is_none() { ic_logger_msg!(logger, "Buffer is immutable"); return Err(InstructionError::Immutable); } @@ -646,7 +614,7 @@ fn process_loader_upgradeable_instruction( slot, upgrade_authority_address, } => { - if upgrade_authority_address == None { + if upgrade_authority_address.is_none() { ic_logger_msg!(logger, "Program not upgradeable"); return Err(InstructionError::Immutable); } @@ -700,9 +668,7 @@ fn process_loader_upgradeable_instruction( recipient_account.try_account_ref_mut()?.lamports += close_account.lamports()?; close_account.try_account_ref_mut()?.lamports = 0; - for i in &mut close_account.try_account_ref_mut()?.data { - *i = 0 - } + close_account.try_account_ref_mut()?.data.fill(0); } else { ic_logger_msg!(logger, "Account does not support closing"); return Err(InstructionError::InvalidArgument); @@ -739,7 +705,7 @@ fn process_loader_instruction( return Err(InstructionError::MissingRequiredSignature); } write_program_data( - &mut program.try_account_ref_mut()?.data, + &mut program.try_account_ref_mut()?.data_as_mut_slice(), offset as usize, &bytes, invoke_context, @@ -753,7 +719,7 @@ fn process_loader_instruction( let _ = create_and_cache_executor( program.unsigned_key(), - &program.try_account_ref()?.data, + &program.try_account_ref()?.data(), invoke_context, use_jit, )?; @@ -817,14 +783,18 @@ impl Executor for BpfExecutor { let mut keyed_accounts_iter = keyed_accounts.iter(); let _ = next_keyed_account(&mut keyed_accounts_iter)?; let parameter_accounts = keyed_accounts_iter.as_slice(); + let mut serialize_time = Measure::start("serialize"); let mut parameter_bytes = serialize_parameters(loader_id, program_id, parameter_accounts, &instruction_data)?; + serialize_time.stop(); + let mut create_vm_time = Measure::start("create_vm"); + let mut execute_time; { let compute_meter = invoke_context.get_compute_meter(); let mut vm = match create_vm( loader_id, self.program.as_ref(), - &mut parameter_bytes, + parameter_bytes.as_slice_mut(), ¶meter_accounts, invoke_context, ) { @@ -834,7 +804,9 @@ impl Executor for BpfExecutor { return Err(InstructionError::ProgramEnvironmentSetupFailure); } }; + create_vm_time.stop(); + execute_time = Measure::start("execute"); stable_log::program_invoke(&logger, program_id, invoke_depth); let mut instruction_meter = ThisInstructionMeter::new(compute_meter.clone()); let before = compute_meter.borrow().get_remaining(); @@ -880,8 +852,22 @@ impl Executor for BpfExecutor { return Err(error); } } + execute_time.stop(); } - deserialize_parameters(loader_id, parameter_accounts, ¶meter_bytes)?; + let mut deserialize_time = Measure::start("deserialize"); + deserialize_parameters( + loader_id, + parameter_accounts, + parameter_bytes.as_slice(), + invoke_context.is_feature_active(&skip_ro_deserialization::id()), + )?; + deserialize_time.stop(); + invoke_context.update_timing( + serialize_time.as_us(), + create_vm_time.as_us(), + execute_time.as_us(), + deserialize_time.as_us(), + ); stable_log::program_success(&logger, program_id); Ok(()) } @@ -891,13 +877,11 @@ impl Executor for BpfExecutor { mod tests { use super::*; use rand::Rng; - use solana_runtime::{ - bank::Bank, - bank_client::BankClient, - message_processor::{Executors, ThisInvokeContext}, - }; + use solana_runtime::{bank::Bank, bank_client::BankClient}; use solana_sdk::{ - account::{create_account_for_test, Account}, + account::{ + create_account_shared_data_for_test as create_account_for_test, AccountSharedData, + }, account_utils::StateMut, client::SyncClient, clock::Clock, @@ -906,7 +890,7 @@ mod tests { instruction::Instruction, instruction::{AccountMeta, InstructionError}, message::Message, - process_instruction::{BpfComputeBudget, MockInvokeContext}, + process_instruction::{MockComputeMeter, MockInvokeContext}, pubkey::Pubkey, rent::Rent, signature::{Keypair, Signer}, @@ -957,14 +941,14 @@ mod tests { let prog = &[ 0x18, 0x00, 0x00, 0x00, 0x88, 0x77, 0x66, 0x55, // first half of lddw ]; - bpf_verifier::check(prog, true).unwrap(); + bpf_verifier::check(prog).unwrap(); } #[test] fn test_bpf_loader_write() { let program_id = bpf_loader::id(); let program_key = solana_sdk::pubkey::new_rand(); - let program_account = Account::new_ref(1, 0, &program_id); + let program_account = AccountSharedData::new_ref(1, 0, &program_id); let keyed_accounts = vec![KeyedAccount::new(&program_key, false, &program_account)]; let instruction_data = bincode::serialize(&LoaderInstruction::Write { offset: 3, @@ -997,7 +981,7 @@ mod tests { // Case: Write bytes to an offset #[allow(unused_mut)] let mut keyed_accounts = vec![KeyedAccount::new(&program_key, true, &program_account)]; - keyed_accounts[0].account.borrow_mut().data = vec![0; 6]; + keyed_accounts[0].account.borrow_mut().set_data(vec![0; 6]); assert_eq!( Ok(()), process_instruction( @@ -1008,14 +992,14 @@ mod tests { ) ); assert_eq!( - vec![0, 0, 0, 1, 2, 3], - keyed_accounts[0].account.borrow().data + &vec![0, 0, 0, 1, 2, 3], + keyed_accounts[0].account.borrow().data() ); // Case: Overflow #[allow(unused_mut)] let mut keyed_accounts = vec![KeyedAccount::new(&program_key, true, &program_account)]; - keyed_accounts[0].account.borrow_mut().data = vec![0; 5]; + keyed_accounts[0].account.borrow_mut().set_data(vec![0; 5]); assert_eq!( Err(InstructionError::AccountDataTooSmall), process_instruction( @@ -1035,8 +1019,9 @@ mod tests { let mut elf = Vec::new(); let rent = Rent::default(); file.read_to_end(&mut elf).unwrap(); - let program_account = Account::new_ref(rent.minimum_balance(elf.len()), 0, &program_id); - program_account.borrow_mut().data = elf; + let program_account = + AccountSharedData::new_ref(rent.minimum_balance(elf.len()), 0, &program_id); + program_account.borrow_mut().set_data(elf); let keyed_accounts = vec![KeyedAccount::new(&program_key, false, &program_account)]; let instruction_data = bincode::serialize(&LoaderInstruction::Finalize).unwrap(); @@ -1078,7 +1063,7 @@ mod tests { program_account.borrow_mut().executable = false; // Un-finalize the account // Case: Finalize - program_account.borrow_mut().data[0] = 0; // bad elf + program_account.borrow_mut().data_as_mut_slice()[0] = 0; // bad elf let keyed_accounts = vec![KeyedAccount::new(&program_key, true, &program_account)]; assert_eq!( Err(InstructionError::InvalidAccountData), @@ -1100,8 +1085,8 @@ mod tests { let mut file = File::open("test_elfs/noop_aligned.so").expect("file open failed"); let mut elf = Vec::new(); file.read_to_end(&mut elf).unwrap(); - let program_account = Account::new_ref(1, 0, &program_id); - program_account.borrow_mut().data = elf; + let program_account = AccountSharedData::new_ref(1, 0, &program_id); + program_account.borrow_mut().set_data(elf); program_account.borrow_mut().executable = true; let mut keyed_accounts = vec![KeyedAccount::new(&program_key, false, &program_account)]; @@ -1129,7 +1114,7 @@ mod tests { keyed_accounts[0].account.borrow_mut().executable = true; // Case: With program and parameter account - let parameter_account = Account::new_ref(1, 0, &program_id); + let parameter_account = AccountSharedData::new_ref(1, 0, &program_id); keyed_accounts.push(KeyedAccount::new(&program_key, false, ¶meter_account)); assert_eq!( Ok(()), @@ -1138,32 +1123,11 @@ mod tests { // Case: limited budget let program_id = Pubkey::default(); - let mut invoke_context = ThisInvokeContext::new( - &program_id, - Rent::default(), - vec![], - &[], - &[], - None, - BpfComputeBudget { - max_units: 1, - log_units: 100, - log_64_units: 100, - create_program_address_units: 1500, - invoke_units: 1000, - max_invoke_depth: 2, - sha256_base_cost: 85, - sha256_byte_cost: 1, - max_call_depth: 20, - stack_frame_size: 4096, - log_pubkey_units: 100, - max_cpi_instruction_size: usize::MAX, - cpi_bytes_per_unit: 250, - }, - Rc::new(RefCell::new(Executors::default())), - None, - Arc::new(FeatureSet::default()), - ); + let mut invoke_context = MockInvokeContext { + key: program_id, + compute_meter: MockComputeMeter::default(), + ..MockInvokeContext::default() + }; assert_eq!( Err(InstructionError::ProgramFailedToComplete), process_instruction(&program_key, &keyed_accounts, &[], &mut invoke_context) @@ -1171,7 +1135,7 @@ mod tests { // Case: With duplicate accounts let duplicate_key = solana_sdk::pubkey::new_rand(); - let parameter_account = Account::new_ref(1, 0, &program_id); + let parameter_account = AccountSharedData::new_ref(1, 0, &program_id); let mut keyed_accounts = vec![KeyedAccount::new(&program_key, false, &program_account)]; keyed_accounts.push(KeyedAccount::new(&duplicate_key, false, ¶meter_account)); keyed_accounts.push(KeyedAccount::new(&duplicate_key, false, ¶meter_account)); @@ -1195,13 +1159,13 @@ mod tests { let mut file = File::open("test_elfs/noop_unaligned.so").expect("file open failed"); let mut elf = Vec::new(); file.read_to_end(&mut elf).unwrap(); - let program_account = Account::new_ref(1, 0, &program_id); - program_account.borrow_mut().data = elf; + let program_account = AccountSharedData::new_ref(1, 0, &program_id); + program_account.borrow_mut().set_data(elf); program_account.borrow_mut().executable = true; let mut keyed_accounts = vec![KeyedAccount::new(&program_key, false, &program_account)]; // Case: With program and parameter account - let parameter_account = Account::new_ref(1, 0, &program_id); + let parameter_account = AccountSharedData::new_ref(1, 0, &program_id); keyed_accounts.push(KeyedAccount::new(&program_key, false, ¶meter_account)); assert_eq!( Ok(()), @@ -1215,7 +1179,7 @@ mod tests { // Case: With duplicate accounts let duplicate_key = solana_sdk::pubkey::new_rand(); - let parameter_account = Account::new_ref(1, 0, &program_id); + let parameter_account = AccountSharedData::new_ref(1, 0, &program_id); let mut keyed_accounts = vec![KeyedAccount::new(&program_key, false, &program_account)]; keyed_accounts.push(KeyedAccount::new(&duplicate_key, false, ¶meter_account)); keyed_accounts.push(KeyedAccount::new(&duplicate_key, false, ¶meter_account)); @@ -1239,13 +1203,13 @@ mod tests { let mut file = File::open("test_elfs/noop_aligned.so").expect("file open failed"); let mut elf = Vec::new(); file.read_to_end(&mut elf).unwrap(); - let program_account = Account::new_ref(1, 0, &program_id); - program_account.borrow_mut().data = elf; + let program_account = AccountSharedData::new_ref(1, 0, &program_id); + program_account.borrow_mut().set_data(elf); program_account.borrow_mut().executable = true; let mut keyed_accounts = vec![KeyedAccount::new(&program_key, false, &program_account)]; // Case: With program and parameter account - let parameter_account = Account::new_ref(1, 0, &program_id); + let parameter_account = AccountSharedData::new_ref(1, 0, &program_id); keyed_accounts.push(KeyedAccount::new(&program_key, false, ¶meter_account)); assert_eq!( Ok(()), @@ -1259,7 +1223,7 @@ mod tests { // Case: With duplicate accounts let duplicate_key = solana_sdk::pubkey::new_rand(); - let parameter_account = Account::new_ref(1, 0, &program_id); + let parameter_account = AccountSharedData::new_ref(1, 0, &program_id); let mut keyed_accounts = vec![KeyedAccount::new(&program_key, false, &program_account)]; keyed_accounts.push(KeyedAccount::new(&duplicate_key, false, ¶meter_account)); keyed_accounts.push(KeyedAccount::new(&duplicate_key, false, ¶meter_account)); @@ -1279,13 +1243,13 @@ mod tests { let instruction = bincode::serialize(&UpgradeableLoaderInstruction::InitializeBuffer).unwrap(); let buffer_address = Pubkey::new_unique(); - let buffer_account = Account::new_ref( + let buffer_account = AccountSharedData::new_ref( 1, UpgradeableLoaderState::buffer_len(9).unwrap(), &bpf_loader_upgradeable::id(), ); let authority_address = Pubkey::new_unique(); - let authority_account = Account::new_ref( + let authority_account = AccountSharedData::new_ref( 1, UpgradeableLoaderState::buffer_len(9).unwrap(), &bpf_loader_upgradeable::id(), @@ -1337,7 +1301,7 @@ mod tests { #[test] fn test_bpf_loader_upgradeable_write() { let buffer_address = Pubkey::new_unique(); - let buffer_account = Account::new_ref( + let buffer_account = AccountSharedData::new_ref( 1, UpgradeableLoaderState::buffer_len(9).unwrap(), &bpf_loader_upgradeable::id(), @@ -1394,7 +1358,8 @@ mod tests { } ); assert_eq!( - &buffer_account.borrow().data[UpgradeableLoaderState::buffer_data_offset().unwrap()..], + &buffer_account.borrow().data() + [UpgradeableLoaderState::buffer_data_offset().unwrap()..], &[42; 9] ); @@ -1404,7 +1369,7 @@ mod tests { bytes: vec![42; 6], }) .unwrap(); - let buffer_account = Account::new_ref( + let buffer_account = AccountSharedData::new_ref( 1, UpgradeableLoaderState::buffer_len(9).unwrap(), &bpf_loader_upgradeable::id(), @@ -1435,7 +1400,8 @@ mod tests { } ); assert_eq!( - &buffer_account.borrow().data[UpgradeableLoaderState::buffer_data_offset().unwrap()..], + &buffer_account.borrow().data() + [UpgradeableLoaderState::buffer_data_offset().unwrap()..], &[0, 0, 0, 42, 42, 42, 42, 42, 42] ); @@ -1596,7 +1562,7 @@ mod tests { UpgradeableLoaderState::programdata_len(elf.len()).unwrap(), ); let buffer_address = Pubkey::new_unique(); - let mut buffer_account = Account::new( + let mut buffer_account = AccountSharedData::new( min_programdata_balance, UpgradeableLoaderState::buffer_len(elf.len()).unwrap(), &bpf_loader_upgradeable::id(), @@ -1606,14 +1572,14 @@ mod tests { authority_address: Some(upgrade_authority_keypair.pubkey()), }) .unwrap(); - buffer_account.data[UpgradeableLoaderState::buffer_data_offset().unwrap()..] + buffer_account.data_as_mut_slice()[UpgradeableLoaderState::buffer_data_offset().unwrap()..] .copy_from_slice(&elf); - let program_account = Account::new( + let program_account = AccountSharedData::new( min_programdata_balance, UpgradeableLoaderState::program_len().unwrap(), &bpf_loader_upgradeable::id(), ); - let programdata_account = Account::new( + let programdata_account = AccountSharedData::new( 1, UpgradeableLoaderState::programdata_len(elf.len()).unwrap(), &bpf_loader_upgradeable::id(), @@ -1622,8 +1588,8 @@ mod tests { // Test successful deploy bank.clear_signatures(); bank.store_account(&buffer_address, &buffer_account); - bank.store_account(&program_keypair.pubkey(), &Account::default()); - bank.store_account(&programdata_address, &Account::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); let before = bank.get_balance(&mint_keypair.pubkey()); let message = Message::new( &bpf_loader_upgradeable::deploy_with_max_program_len( @@ -1653,7 +1619,7 @@ mod tests { assert_eq!(post_program_account.lamports, min_program_balance); assert_eq!(post_program_account.owner, bpf_loader_upgradeable::id()); assert_eq!( - post_program_account.data.len(), + post_program_account.data().len(), UpgradeableLoaderState::program_len().unwrap() ); let state: UpgradeableLoaderState = post_program_account.state().unwrap(); @@ -1674,7 +1640,7 @@ mod tests { upgrade_authority_address: Some(upgrade_authority_keypair.pubkey()) } ); - for (i, byte) in post_programdata_account.data + for (i, byte) in post_programdata_account.data() [UpgradeableLoaderState::programdata_data_offset().unwrap()..] .iter() .enumerate() @@ -1686,7 +1652,7 @@ mod tests { bank.clear_signatures(); bank.store_account(&buffer_address, &buffer_account); let message = Message::new( - &[Instruction::new( + &[Instruction::new_with_bincode( bpf_loader_upgradeable::id(), &UpgradeableLoaderInstruction::DeployWithMaxDataLen { max_data_len: elf.len(), @@ -1715,7 +1681,7 @@ mod tests { // Test initialized ProgramData account bank.clear_signatures(); bank.store_account(&buffer_address, &buffer_account); - bank.store_account(&program_keypair.pubkey(), &Account::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); let message = Message::new( &bpf_loader_upgradeable::deploy_with_max_program_len( &mint_keypair.pubkey(), @@ -1745,7 +1711,7 @@ mod tests { bank.store_account(&program_keypair.pubkey(), &program_account); bank.store_account(&programdata_address, &programdata_account); let message = Message::new( - &[Instruction::new( + &[Instruction::new_with_bincode( bpf_loader_upgradeable::id(), &UpgradeableLoaderInstruction::DeployWithMaxDataLen { max_data_len: elf.len(), @@ -1776,7 +1742,7 @@ mod tests { bank.store_account(&program_keypair.pubkey(), &program_account); bank.store_account(&programdata_address, &programdata_account); let message = Message::new( - &[Instruction::new( + &[Instruction::new_with_bincode( bpf_loader_upgradeable::id(), &UpgradeableLoaderInstruction::DeployWithMaxDataLen { max_data_len: elf.len(), @@ -1804,9 +1770,9 @@ mod tests { // Test invalid Buffer account state bank.clear_signatures(); - bank.store_account(&buffer_address, &Account::default()); - bank.store_account(&program_keypair.pubkey(), &Account::default()); - bank.store_account(&programdata_address, &Account::default()); + bank.store_account(&buffer_address, &AccountSharedData::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); let message = Message::new( &bpf_loader_upgradeable::deploy_with_max_program_len( &mint_keypair.pubkey(), @@ -1833,8 +1799,8 @@ mod tests { // Test program account not rent exempt bank.clear_signatures(); bank.store_account(&buffer_address, &buffer_account); - bank.store_account(&program_keypair.pubkey(), &Account::default()); - bank.store_account(&programdata_address, &Account::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); let message = Message::new( &bpf_loader_upgradeable::deploy_with_max_program_len( &mint_keypair.pubkey(), @@ -1861,8 +1827,8 @@ mod tests { // Test program account not rent exempt because data is larger than needed bank.clear_signatures(); bank.store_account(&buffer_address, &buffer_account); - bank.store_account(&program_keypair.pubkey(), &Account::default()); - bank.store_account(&programdata_address, &Account::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); let mut instructions = bpf_loader_upgradeable::deploy_with_max_program_len( &mint_keypair.pubkey(), &program_keypair.pubkey(), @@ -1894,8 +1860,8 @@ mod tests { // Test program account too small bank.clear_signatures(); bank.store_account(&buffer_address, &buffer_account); - bank.store_account(&program_keypair.pubkey(), &Account::default()); - bank.store_account(&programdata_address, &Account::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); let mut instructions = bpf_loader_upgradeable::deploy_with_max_program_len( &mint_keypair.pubkey(), &program_keypair.pubkey(), @@ -1928,11 +1894,11 @@ mod tests { bank.clear_signatures(); bank.store_account( &mint_keypair.pubkey(), - &Account::new(min_program_balance, 0, &system_program::id()), + &AccountSharedData::new(min_program_balance, 0, &system_program::id()), ); bank.store_account(&buffer_address, &buffer_account); - bank.store_account(&program_keypair.pubkey(), &Account::default()); - bank.store_account(&programdata_address, &Account::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); let message = Message::new( &bpf_loader_upgradeable::deploy_with_max_program_len( &mint_keypair.pubkey(), @@ -1957,14 +1923,14 @@ mod tests { ); bank.store_account( &mint_keypair.pubkey(), - &Account::new(1_000_000_000, 0, &system_program::id()), + &AccountSharedData::new(1_000_000_000, 0, &system_program::id()), ); // Test max_data_len bank.clear_signatures(); bank.store_account(&buffer_address, &buffer_account); - bank.store_account(&program_keypair.pubkey(), &Account::default()); - bank.store_account(&programdata_address, &Account::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); let message = Message::new( &bpf_loader_upgradeable::deploy_with_max_program_len( &mint_keypair.pubkey(), @@ -1992,13 +1958,13 @@ mod tests { bank.clear_signatures(); bank.store_account( &mint_keypair.pubkey(), - &Account::new(u64::MAX / 2, 0, &system_program::id()), + &AccountSharedData::new(u64::MAX / 2, 0, &system_program::id()), ); let mut modified_buffer_account = buffer_account.clone(); modified_buffer_account.lamports = u64::MAX / 2; bank.store_account(&buffer_address, &modified_buffer_account); - bank.store_account(&program_keypair.pubkey(), &Account::default()); - bank.store_account(&programdata_address, &Account::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); let message = Message::new( &bpf_loader_upgradeable::deploy_with_max_program_len( &mint_keypair.pubkey(), @@ -2025,8 +1991,8 @@ mod tests { // Test not the system account bank.clear_signatures(); bank.store_account(&buffer_address, &buffer_account); - bank.store_account(&program_keypair.pubkey(), &Account::default()); - bank.store_account(&programdata_address, &Account::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); let mut instructions = bpf_loader_upgradeable::deploy_with_max_program_len( &mint_keypair.pubkey(), &program_keypair.pubkey(), @@ -2056,8 +2022,8 @@ mod tests { .data .truncate(UpgradeableLoaderState::buffer_len(1).unwrap()); bank.store_account(&buffer_address, &modified_buffer_account); - bank.store_account(&program_keypair.pubkey(), &Account::default()); - bank.store_account(&programdata_address, &Account::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); let message = Message::new( &bpf_loader_upgradeable::deploy_with_max_program_len( &mint_keypair.pubkey(), @@ -2083,7 +2049,7 @@ mod tests { // Test small buffer account bank.clear_signatures(); - let mut modified_buffer_account = Account::new( + let mut modified_buffer_account = AccountSharedData::new( min_programdata_balance, UpgradeableLoaderState::buffer_len(elf.len()).unwrap(), &bpf_loader_upgradeable::id(), @@ -2093,12 +2059,13 @@ mod tests { authority_address: Some(upgrade_authority_keypair.pubkey()), }) .unwrap(); - modified_buffer_account.data[UpgradeableLoaderState::buffer_data_offset().unwrap()..] + modified_buffer_account.data_as_mut_slice() + [UpgradeableLoaderState::buffer_data_offset().unwrap()..] .copy_from_slice(&elf); modified_buffer_account.data.truncate(5); bank.store_account(&buffer_address, &modified_buffer_account); - bank.store_account(&program_keypair.pubkey(), &Account::default()); - bank.store_account(&programdata_address, &Account::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); let message = Message::new( &bpf_loader_upgradeable::deploy_with_max_program_len( &mint_keypair.pubkey(), @@ -2124,7 +2091,7 @@ mod tests { // Mismatched buffer and program authority bank.clear_signatures(); - let mut modified_buffer_account = Account::new( + let mut modified_buffer_account = AccountSharedData::new( min_programdata_balance, UpgradeableLoaderState::buffer_len(elf.len()).unwrap(), &bpf_loader_upgradeable::id(), @@ -2134,11 +2101,12 @@ mod tests { authority_address: Some(buffer_address), }) .unwrap(); - modified_buffer_account.data[UpgradeableLoaderState::buffer_data_offset().unwrap()..] + modified_buffer_account.data_as_mut_slice() + [UpgradeableLoaderState::buffer_data_offset().unwrap()..] .copy_from_slice(&elf); bank.store_account(&buffer_address, &modified_buffer_account); - bank.store_account(&program_keypair.pubkey(), &Account::default()); - bank.store_account(&programdata_address, &Account::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); let message = Message::new( &bpf_loader_upgradeable::deploy_with_max_program_len( &mint_keypair.pubkey(), @@ -2164,7 +2132,7 @@ mod tests { // Deploy buffer with mismatched None authority bank.clear_signatures(); - let mut modified_buffer_account = Account::new( + let mut modified_buffer_account = AccountSharedData::new( min_programdata_balance, UpgradeableLoaderState::buffer_len(elf.len()).unwrap(), &bpf_loader_upgradeable::id(), @@ -2174,11 +2142,12 @@ mod tests { authority_address: None, }) .unwrap(); - modified_buffer_account.data[UpgradeableLoaderState::buffer_data_offset().unwrap()..] + modified_buffer_account.data_as_mut_slice() + [UpgradeableLoaderState::buffer_data_offset().unwrap()..] .copy_from_slice(&elf); bank.store_account(&buffer_address, &modified_buffer_account); - bank.store_account(&program_keypair.pubkey(), &Account::default()); - bank.store_account(&programdata_address, &Account::default()); + bank.store_account(&program_keypair.pubkey(), &AccountSharedData::default()); + bank.store_account(&programdata_address, &AccountSharedData::default()); let message = Message::new( &bpf_loader_upgradeable::deploy_with_max_program_len( &mint_keypair.pubkey(), @@ -2228,10 +2197,12 @@ mod tests { let upgrade_authority_address = Pubkey::new_unique(); let buffer_address = Pubkey::new_unique(); let program_address = Pubkey::new_unique(); - let (programdata_address, _) = - Pubkey::find_program_address(&[program_address.as_ref()], &id()); + let (programdata_address, _) = Pubkey::find_program_address( + &[program_address.as_ref()], + &bpf_loader_upgradeable::id(), + ); let spill_address = Pubkey::new_unique(); - let upgrade_authority_account = Account::new_ref(1, 0, &Pubkey::new_unique()); + let upgrade_authority_account = AccountSharedData::new_ref(1, 0, &Pubkey::new_unique()); #[allow(clippy::type_complexity)] fn get_accounts( @@ -2244,12 +2215,12 @@ mod tests { min_program_balance: u64, min_programdata_balance: u64, ) -> ( - Rc>, - Rc>, - Rc>, - Rc>, + Rc>, + Rc>, + Rc>, + Rc>, ) { - let buffer_account = Account::new_ref( + let buffer_account = AccountSharedData::new_ref( 1, UpgradeableLoaderState::buffer_len(elf_new.len()).unwrap(), &bpf_loader_upgradeable::id(), @@ -2260,10 +2231,10 @@ mod tests { authority_address: Some(*buffer_authority), }) .unwrap(); - buffer_account.borrow_mut().data + buffer_account.borrow_mut().data_as_mut_slice() [UpgradeableLoaderState::buffer_data_offset().unwrap()..] .copy_from_slice(&elf_new); - let programdata_account = Account::new_ref( + let programdata_account = AccountSharedData::new_ref( min_programdata_balance, UpgradeableLoaderState::programdata_len(elf_orig.len().max(elf_new.len())).unwrap(), &bpf_loader_upgradeable::id(), @@ -2275,7 +2246,7 @@ mod tests { upgrade_authority_address: Some(*upgrade_authority_address), }) .unwrap(); - let program_account = Account::new_ref( + let program_account = AccountSharedData::new_ref( min_program_balance, UpgradeableLoaderState::program_len().unwrap(), &bpf_loader_upgradeable::id(), @@ -2287,7 +2258,7 @@ mod tests { programdata_address: *programdata_address, }) .unwrap(); - let spill_account = Account::new_ref(0, 0, &Pubkey::new_unique()); + let spill_account = AccountSharedData::new_ref(0, 0, &Pubkey::new_unique()); ( buffer_account, @@ -2343,7 +2314,7 @@ mod tests { upgrade_authority_address: Some(upgrade_authority_address) } ); - for (i, byte) in programdata_account.borrow().data + for (i, byte) in programdata_account.borrow().data() [UpgradeableLoaderState::programdata_data_offset().unwrap() ..UpgradeableLoaderState::programdata_data_offset().unwrap() + elf_new.len()] .iter() @@ -2677,7 +2648,7 @@ mod tests { min_program_balance, min_programdata_balance, ); - let buffer_account = Account::new_ref( + let buffer_account = AccountSharedData::new_ref( 1, UpgradeableLoaderState::buffer_len(elf_orig.len().max(elf_new.len()) + 1).unwrap(), &bpf_loader_upgradeable::id(), @@ -2874,13 +2845,15 @@ mod tests { let instruction = bincode::serialize(&UpgradeableLoaderInstruction::SetAuthority).unwrap(); let slot = 0; let upgrade_authority_address = Pubkey::new_unique(); - let upgrade_authority_account = Account::new_ref(1, 0, &Pubkey::new_unique()); + let upgrade_authority_account = AccountSharedData::new_ref(1, 0, &Pubkey::new_unique()); let new_upgrade_authority_address = Pubkey::new_unique(); - let new_upgrade_authority_account = Account::new_ref(1, 0, &Pubkey::new_unique()); + let new_upgrade_authority_account = AccountSharedData::new_ref(1, 0, &Pubkey::new_unique()); let program_address = Pubkey::new_unique(); - let (programdata_address, _) = - Pubkey::find_program_address(&[program_address.as_ref()], &id()); - let programdata_account = Account::new_ref( + let (programdata_address, _) = Pubkey::find_program_address( + &[program_address.as_ref()], + &bpf_loader_upgradeable::id(), + ); + let programdata_account = AccountSharedData::new_ref( 1, UpgradeableLoaderState::programdata_len(0).unwrap(), &bpf_loader_upgradeable::id(), @@ -3066,11 +3039,11 @@ mod tests { fn test_bpf_loader_upgradeable_set_buffer_authority() { let instruction = bincode::serialize(&UpgradeableLoaderInstruction::SetAuthority).unwrap(); let authority_address = Pubkey::new_unique(); - let authority_account = Account::new_ref(1, 0, &Pubkey::new_unique()); + let authority_account = AccountSharedData::new_ref(1, 0, &Pubkey::new_unique()); let new_authority_address = Pubkey::new_unique(); - let new_authority_account = Account::new_ref(1, 0, &Pubkey::new_unique()); + let new_authority_account = AccountSharedData::new_ref(1, 0, &Pubkey::new_unique()); let buffer_address = Pubkey::new_unique(); - let buffer_account = Account::new_ref( + let buffer_account = AccountSharedData::new_ref( 1, UpgradeableLoaderState::buffer_len(0).unwrap(), &bpf_loader_upgradeable::id(), @@ -3255,11 +3228,11 @@ mod tests { fn test_bpf_loader_upgradeable_close() { let instruction = bincode::serialize(&UpgradeableLoaderInstruction::Close).unwrap(); let authority_address = Pubkey::new_unique(); - let authority_account = Account::new_ref(1, 0, &Pubkey::new_unique()); + let authority_account = AccountSharedData::new_ref(1, 0, &Pubkey::new_unique()); let recipient_address = Pubkey::new_unique(); - let recipient_account = Account::new_ref(1, 0, &Pubkey::new_unique()); + let recipient_account = AccountSharedData::new_ref(1, 0, &Pubkey::new_unique()); let buffer_address = Pubkey::new_unique(); - let buffer_account = Account::new_ref( + let buffer_account = AccountSharedData::new_ref( 1, UpgradeableLoaderState::buffer_len(0).unwrap(), &bpf_loader_upgradeable::id(), @@ -3285,8 +3258,8 @@ mod tests { &mut MockInvokeContext::default() ) ); - assert_eq!(0, buffer_account.borrow().lamports); - assert_eq!(2, recipient_account.borrow().lamports); + assert_eq!(0, buffer_account.borrow().lamports()); + assert_eq!(2, recipient_account.borrow().lamports()); assert!(buffer_account.borrow().data.iter().all(|&value| value == 0)); // Case: close with wrong authority @@ -3374,11 +3347,11 @@ mod tests { 0..elf.len(), 0..255, |bytes: &mut [u8]| { - let program_account = Account::new_ref(1, 0, &program_id); - program_account.borrow_mut().data = bytes.to_vec(); + let program_account = AccountSharedData::new_ref(1, 0, &program_id); + program_account.borrow_mut().set_data(bytes.to_vec()); program_account.borrow_mut().executable = true; - let parameter_account = Account::new_ref(1, 0, &program_id); + let parameter_account = AccountSharedData::new_ref(1, 0, &program_id); let keyed_accounts = vec![ KeyedAccount::new(&program_key, false, &program_account), KeyedAccount::new(&program_key, false, ¶meter_account), diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index 391a24015c..3856101ef1 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -1,7 +1,12 @@ use byteorder::{ByteOrder, LittleEndian, WriteBytesExt}; +use solana_rbpf::{aligned_memory::AlignedMemory, ebpf::HOST_ALIGN}; use solana_sdk::{ - bpf_loader_deprecated, entrypoint::MAX_PERMITTED_DATA_INCREASE, instruction::InstructionError, - keyed_account::KeyedAccount, pubkey::Pubkey, + account::{ReadableAccount, WritableAccount}, + bpf_loader_deprecated, + entrypoint::MAX_PERMITTED_DATA_INCREASE, + instruction::InstructionError, + keyed_account::KeyedAccount, + pubkey::Pubkey, }; use std::{ io::prelude::*, @@ -23,7 +28,7 @@ pub fn serialize_parameters( program_id: &Pubkey, keyed_accounts: &[KeyedAccount], data: &[u8], -) -> Result, InstructionError> { +) -> Result { if *loader_id == bpf_loader_deprecated::id() { serialize_parameters_unaligned(program_id, keyed_accounts, data) } else { @@ -35,173 +40,220 @@ pub fn deserialize_parameters( loader_id: &Pubkey, keyed_accounts: &[KeyedAccount], buffer: &[u8], + skip_ro_deserialization: bool, ) -> Result<(), InstructionError> { if *loader_id == bpf_loader_deprecated::id() { - deserialize_parameters_unaligned(keyed_accounts, buffer) + deserialize_parameters_unaligned(keyed_accounts, buffer, skip_ro_deserialization) } else { - deserialize_parameters_aligned(keyed_accounts, buffer) + deserialize_parameters_aligned(keyed_accounts, buffer, skip_ro_deserialization) } } +pub fn get_serialized_account_size_unaligned( + keyed_account: &KeyedAccount, +) -> Result { + let data_len = keyed_account.data_len()?; + Ok( + size_of::() // is_signer + + size_of::() // is_writable + + size_of::() // key + + size_of::() // lamports + + size_of::() // data len + + data_len // data + + size_of::() // owner + + size_of::() // executable + + size_of::(), // rent_epoch + ) +} + pub fn serialize_parameters_unaligned( program_id: &Pubkey, keyed_accounts: &[KeyedAccount], instruction_data: &[u8], -) -> Result, InstructionError> { +) -> Result { // Calculate size in order to alloc once let mut size = size_of::(); for (i, keyed_account) in keyed_accounts.iter().enumerate() { let (is_dup, _) = is_dup(&keyed_accounts[..i], keyed_account); - size += 1; // dup, signer, writable, executable + size += 1; // dup if !is_dup { - let data_len = keyed_account.data_len()?; - size += size_of::() // key - + size_of::() // owner - + size_of::() // lamports - + size_of::() // data len - + data_len - + MAX_PERMITTED_DATA_INCREASE - + (data_len as *const u8).align_offset(align_of::()) - + size_of::(); // rent epoch; + size += get_serialized_account_size_unaligned(keyed_account)?; } } - size += size_of::() // data len - + instruction_data.len() - + size_of::(); // program id; - let mut v: Vec = Vec::with_capacity(size); + size += size_of::() // instruction data len + + instruction_data.len() // instruction data + + size_of::(); // program id + let mut v = AlignedMemory::new(size, HOST_ALIGN); v.write_u64::(keyed_accounts.len() as u64) - .unwrap(); + .map_err(|_| InstructionError::InvalidArgument)?; for (i, keyed_account) in keyed_accounts.iter().enumerate() { let (is_dup, position) = is_dup(&keyed_accounts[..i], keyed_account); if is_dup { - v.write_u8(position as u8).unwrap(); + v.write_u8(position as u8) + .map_err(|_| InstructionError::InvalidArgument)?; } else { - v.write_u8(std::u8::MAX).unwrap(); + v.write_u8(std::u8::MAX) + .map_err(|_| InstructionError::InvalidArgument)?; v.write_u8(keyed_account.signer_key().is_some() as u8) - .unwrap(); - v.write_u8(keyed_account.is_writable() as u8).unwrap(); - v.write_all(keyed_account.unsigned_key().as_ref()).unwrap(); + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_u8(keyed_account.is_writable() as u8) + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_all(keyed_account.unsigned_key().as_ref()) + .map_err(|_| InstructionError::InvalidArgument)?; v.write_u64::(keyed_account.lamports()?) - .unwrap(); + .map_err(|_| InstructionError::InvalidArgument)?; v.write_u64::(keyed_account.data_len()? as u64) - .unwrap(); - v.write_all(&keyed_account.try_account_ref()?.data).unwrap(); - v.write_all(keyed_account.owner()?.as_ref()).unwrap(); - v.write_u8(keyed_account.executable()? as u8).unwrap(); + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_all(&keyed_account.try_account_ref()?.data()) + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_all(keyed_account.owner()?.as_ref()) + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_u8(keyed_account.executable()? as u8) + .map_err(|_| InstructionError::InvalidArgument)?; v.write_u64::(keyed_account.rent_epoch()? as u64) - .unwrap(); + .map_err(|_| InstructionError::InvalidArgument)?; } } v.write_u64::(instruction_data.len() as u64) - .unwrap(); - v.write_all(instruction_data).unwrap(); - v.write_all(program_id.as_ref()).unwrap(); + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_all(instruction_data) + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_all(program_id.as_ref()) + .map_err(|_| InstructionError::InvalidArgument)?; Ok(v) } pub fn deserialize_parameters_unaligned( keyed_accounts: &[KeyedAccount], buffer: &[u8], + skip_ro_deserialization: bool, ) -> Result<(), InstructionError> { let mut start = size_of::(); // number of accounts for (i, keyed_account) in keyed_accounts.iter().enumerate() { let (is_dup, _) = is_dup(&keyed_accounts[..i], keyed_account); start += 1; // is_dup if !is_dup { - start += size_of::(); // is_signer - start += size_of::(); // is_writable - start += size_of::(); // pubkey - keyed_account.try_account_ref_mut()?.lamports = - LittleEndian::read_u64(&buffer[start..]); - start += size_of::() // lamports + if keyed_account.is_writable() || !skip_ro_deserialization { + start += size_of::(); // is_signer + start += size_of::(); // is_writable + start += size_of::(); // key + keyed_account.try_account_ref_mut()?.lamports = + LittleEndian::read_u64(&buffer[start..]); + start += size_of::() // lamports + size_of::(); // data length - let end = start + keyed_account.data_len()?; - keyed_account - .try_account_ref_mut()? - .data - .clone_from_slice(&buffer[start..end]); - start += keyed_account.data_len()? // data + let end = start + keyed_account.data_len()?; + keyed_account + .try_account_ref_mut()? + .data_as_mut_slice() + .clone_from_slice(&buffer[start..end]); + start += keyed_account.data_len()? // data + size_of::() // owner + size_of::() // executable + size_of::(); // rent_epoch + } else { + start += get_serialized_account_size_unaligned(keyed_account)?; + } } } Ok(()) } +pub fn get_serialized_account_size_aligned( + keyed_account: &KeyedAccount, +) -> Result { + let data_len = keyed_account.data_len()?; + Ok( + size_of::() // is_signer + + size_of::() // is_writable + + size_of::() // executable + + 4 // padding to 128-bit aligned + + size_of::() // key + + size_of::() // owner + + size_of::() // lamports + + size_of::() // data len + + data_len + + MAX_PERMITTED_DATA_INCREASE + + (data_len as *const u8).align_offset(align_of::()) + + size_of::(), // rent epoch + ) +} + pub fn serialize_parameters_aligned( program_id: &Pubkey, keyed_accounts: &[KeyedAccount], instruction_data: &[u8], -) -> Result, InstructionError> { +) -> Result { // Calculate size in order to alloc once let mut size = size_of::(); for (i, keyed_account) in keyed_accounts.iter().enumerate() { let (is_dup, _) = is_dup(&keyed_accounts[..i], keyed_account); - size += 8; // dup, signer, writable, executable - if !is_dup { - let data_len = keyed_account.data_len()?; - size += size_of::() // key - + size_of::() // owner - + size_of::() // lamports - + size_of::() // data len - + data_len - + MAX_PERMITTED_DATA_INCREASE - + (data_len as *const u8).align_offset(align_of::()) - + size_of::(); // rent epoch; + size += 1; // dup + if is_dup { + size += 7; // padding to 64-bit aligned + } else { + size += get_serialized_account_size_aligned(keyed_account)?; } } size += size_of::() // data len + instruction_data.len() + size_of::(); // program id; - let mut v: Vec = Vec::with_capacity(size); + let mut v = AlignedMemory::new(size, HOST_ALIGN); // Serialize into the buffer v.write_u64::(keyed_accounts.len() as u64) - .unwrap(); - if v.as_ptr().align_offset(align_of::()) != 0 { - panic!(); - } + .map_err(|_| InstructionError::InvalidArgument)?; for (i, keyed_account) in keyed_accounts.iter().enumerate() { let (is_dup, position) = is_dup(&keyed_accounts[..i], keyed_account); if is_dup { - v.write_u8(position as u8).unwrap(); - v.write_all(&[0u8, 0, 0, 0, 0, 0, 0]).unwrap(); // 7 bytes of padding to make 64-bit aligned + v.write_u8(position as u8) + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_all(&[0u8, 0, 0, 0, 0, 0, 0]) + .map_err(|_| InstructionError::InvalidArgument)?; // 7 bytes of padding to make 64-bit aligned } else { - v.write_u8(std::u8::MAX).unwrap(); + v.write_u8(std::u8::MAX) + .map_err(|_| InstructionError::InvalidArgument)?; v.write_u8(keyed_account.signer_key().is_some() as u8) - .unwrap(); - v.write_u8(keyed_account.is_writable() as u8).unwrap(); - v.write_u8(keyed_account.executable()? as u8).unwrap(); - v.write_all(&[0u8, 0, 0, 0]).unwrap(); // 4 bytes of padding to make 128-bit aligned - v.write_all(keyed_account.unsigned_key().as_ref()).unwrap(); - v.write_all(keyed_account.owner()?.as_ref()).unwrap(); + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_u8(keyed_account.is_writable() as u8) + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_u8(keyed_account.executable()? as u8) + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_all(&[0u8, 0, 0, 0]) + .map_err(|_| InstructionError::InvalidArgument)?; // 4 bytes of padding to make 128-bit aligned + v.write_all(keyed_account.unsigned_key().as_ref()) + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_all(keyed_account.owner()?.as_ref()) + .map_err(|_| InstructionError::InvalidArgument)?; v.write_u64::(keyed_account.lamports()?) - .unwrap(); + .map_err(|_| InstructionError::InvalidArgument)?; v.write_u64::(keyed_account.data_len()? as u64) - .unwrap(); - v.write_all(&keyed_account.try_account_ref()?.data).unwrap(); + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_all(&keyed_account.try_account_ref()?.data()) + .map_err(|_| InstructionError::InvalidArgument)?; v.resize( - v.len() - + MAX_PERMITTED_DATA_INCREASE - + (v.len() as *const u8).align_offset(align_of::()), + MAX_PERMITTED_DATA_INCREASE + + (v.write_index() as *const u8).align_offset(align_of::()), 0, - ); + ) + .map_err(|_| InstructionError::InvalidArgument)?; v.write_u64::(keyed_account.rent_epoch()? as u64) - .unwrap(); + .map_err(|_| InstructionError::InvalidArgument)?; } } v.write_u64::(instruction_data.len() as u64) - .unwrap(); - v.write_all(instruction_data).unwrap(); - v.write_all(program_id.as_ref()).unwrap(); + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_all(instruction_data) + .map_err(|_| InstructionError::InvalidArgument)?; + v.write_all(program_id.as_ref()) + .map_err(|_| InstructionError::InvalidArgument)?; Ok(v) } pub fn deserialize_parameters_aligned( keyed_accounts: &[KeyedAccount], buffer: &[u8], + skip_ro_deserialization: bool, ) -> Result<(), InstructionError> { let mut start = size_of::(); // number of accounts for (i, keyed_account) in keyed_accounts.iter().enumerate() { @@ -209,7 +261,7 @@ pub fn deserialize_parameters_aligned( start += size_of::(); // position if is_dup { start += 7; // padding to 64-bit aligned - } else { + } else if keyed_account.is_writable() || !skip_ro_deserialization { let mut account = keyed_account.try_account_ref_mut()?; start += size_of::() // is_signer + size_of::() // is_writable @@ -220,7 +272,7 @@ pub fn deserialize_parameters_aligned( start += size_of::(); // owner account.lamports = LittleEndian::read_u64(&buffer[start..]); start += size_of::(); // lamports - let pre_len = account.data.len(); + let pre_len = account.data_as_mut_slice().len(); let post_len = LittleEndian::read_u64(&buffer[start..]) as usize; start += size_of::(); // data length let mut data_end = start + pre_len; @@ -230,10 +282,14 @@ pub fn deserialize_parameters_aligned( account.data.resize(post_len, 0); data_end = start + post_len; } - account.data.clone_from_slice(&buffer[start..data_end]); + account + .data_as_mut_slice() + .clone_from_slice(&buffer[start..data_end]); start += pre_len + MAX_PERMITTED_DATA_INCREASE; // data start += (start as *const u8).align_offset(align_of::()); start += size_of::(); // rent_epoch + } else { + start += get_serialized_account_size_aligned(keyed_account)?; } } Ok(()) @@ -243,7 +299,10 @@ pub fn deserialize_parameters_aligned( mod tests { use super::*; use solana_sdk::{ - account::Account, account_info::AccountInfo, bpf_loader, entrypoint::deserialize, + account::{Account, AccountSharedData}, + account_info::AccountInfo, + bpf_loader, + entrypoint::deserialize, }; use std::{ cell::RefCell, @@ -256,48 +315,89 @@ mod tests { fn test_serialize_parameters() { let program_id = solana_sdk::pubkey::new_rand(); let dup_key = solana_sdk::pubkey::new_rand(); + let dup_key2 = solana_sdk::pubkey::new_rand(); let keys = vec![ dup_key, dup_key, solana_sdk::pubkey::new_rand(), solana_sdk::pubkey::new_rand(), + dup_key2, + dup_key2, + solana_sdk::pubkey::new_rand(), + solana_sdk::pubkey::new_rand(), ]; let accounts = [ - RefCell::new(Account { + RefCell::new(AccountSharedData::from(Account { lamports: 1, data: vec![1u8, 2, 3, 4, 5], owner: bpf_loader::id(), executable: false, rent_epoch: 100, - }), - // dup of first - RefCell::new(Account { + })), + // dup + RefCell::new(AccountSharedData::from(Account { lamports: 1, data: vec![1u8, 2, 3, 4, 5], owner: bpf_loader::id(), executable: false, rent_epoch: 100, - }), - RefCell::new(Account { + })), + RefCell::new(AccountSharedData::from(Account { lamports: 2, data: vec![11u8, 12, 13, 14, 15, 16, 17, 18, 19], owner: bpf_loader::id(), executable: true, rent_epoch: 200, - }), - RefCell::new(Account { + })), + RefCell::new(AccountSharedData::from(Account { lamports: 3, data: vec![], owner: bpf_loader::id(), executable: false, rent_epoch: 3100, - }), + })), + RefCell::new(AccountSharedData::from(Account { + lamports: 4, + data: vec![1u8, 2, 3, 4, 5], + owner: bpf_loader::id(), + executable: false, + rent_epoch: 100, + })), + // dup + RefCell::new(AccountSharedData::from(Account { + lamports: 4, + data: vec![1u8, 2, 3, 4, 5], + owner: bpf_loader::id(), + executable: false, + rent_epoch: 100, + })), + RefCell::new(AccountSharedData::from(Account { + lamports: 5, + data: vec![11u8, 12, 13, 14, 15, 16, 17, 18, 19], + owner: bpf_loader::id(), + executable: true, + rent_epoch: 200, + })), + RefCell::new(AccountSharedData::from(Account { + lamports: 6, + data: vec![], + owner: bpf_loader::id(), + executable: false, + rent_epoch: 3100, + })), ]; let keyed_accounts: Vec<_> = keys .iter() .zip(&accounts) - .map(|(key, account)| KeyedAccount::new(&key, false, &account)) + .enumerate() + .map(|(i, (key, account))| { + if i <= accounts.len() / 2 { + KeyedAccount::new_readonly(&key, false, &account) + } else { + KeyedAccount::new(&key, false, &account) + } + }) .collect(); let instruction_data = vec![1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; @@ -310,8 +410,9 @@ mod tests { &instruction_data, ) .unwrap(); + let (de_program_id, de_accounts, de_instruction_data) = - unsafe { deserialize(&mut serialized[0] as *mut u8) }; + unsafe { deserialize(&mut serialized.as_slice_mut()[0] as *mut u8) }; assert_eq!(&program_id, de_program_id); assert_eq!(instruction_data, de_instruction_data); @@ -323,7 +424,7 @@ mod tests { assert_eq!(key, *account_info.key); let account = account.borrow(); assert_eq!(account.lamports, account_info.lamports()); - assert_eq!(&account.data[..], &account_info.data.borrow()[..]); + assert_eq!(&account.data()[..], &account_info.data.borrow()[..]); assert_eq!(&account.owner, account_info.owner); assert_eq!(account.executable, account_info.executable); assert_eq!(account.rent_epoch, account_info.rent_epoch); @@ -342,6 +443,41 @@ mod tests { ); } + let de_accounts = accounts.clone(); + let de_keyed_accounts: Vec<_> = keys + .iter() + .zip(&de_accounts) + .enumerate() + .map(|(i, (key, account))| { + if i <= accounts.len() / 2 { + KeyedAccount::new_readonly(&key, false, &account) + } else { + KeyedAccount::new(&key, false, &account) + } + }) + .collect(); + deserialize_parameters( + &bpf_loader::id(), + &de_keyed_accounts, + serialized.as_slice(), + true, + ) + .unwrap(); + for ((account, de_keyed_account), key) in + accounts.iter().zip(de_keyed_accounts).zip(keys.clone()) + { + assert_eq!(key, *de_keyed_account.unsigned_key()); + let account = account.borrow(); + assert_eq!(account.lamports, de_keyed_account.lamports().unwrap()); + assert_eq!( + &account.data()[..], + &de_keyed_account.try_account_ref().unwrap().data[..] + ); + assert_eq!(account.owner, de_keyed_account.owner().unwrap()); + assert_eq!(account.executable, de_keyed_account.executable().unwrap()); + assert_eq!(account.rent_epoch, de_keyed_account.rent_epoch().unwrap()); + } + // check serialize_parameters_unaligned let mut serialized = serialize_parameters( @@ -351,20 +487,55 @@ mod tests { &instruction_data, ) .unwrap(); - let (de_program_id, de_accounts, de_instruction_data) = - unsafe { deserialize_unaligned(&mut serialized[0] as *mut u8) }; + let (de_program_id, de_accounts, de_instruction_data) = + unsafe { deserialize_unaligned(&mut serialized.as_slice_mut()[0] as *mut u8) }; assert_eq!(&program_id, de_program_id); assert_eq!(instruction_data, de_instruction_data); - for ((account, account_info), key) in accounts.iter().zip(de_accounts).zip(keys) { + for ((account, account_info), key) in accounts.iter().zip(de_accounts).zip(keys.clone()) { assert_eq!(key, *account_info.key); let account = account.borrow(); assert_eq!(account.lamports, account_info.lamports()); - assert_eq!(&account.data[..], &account_info.data.borrow()[..]); + assert_eq!(&account.data()[..], &account_info.data.borrow()[..]); assert_eq!(&account.owner, account_info.owner); assert_eq!(account.executable, account_info.executable); assert_eq!(account.rent_epoch, account_info.rent_epoch); } + + let de_accounts = accounts.clone(); + let de_keyed_accounts: Vec<_> = keys + .iter() + .zip(&de_accounts) + .enumerate() + .map(|(i, (key, account))| { + if i < accounts.len() / 2 { + KeyedAccount::new_readonly(&key, false, &account) + } else { + KeyedAccount::new(&key, false, &account) + } + }) + .collect(); + deserialize_parameters( + &bpf_loader_deprecated::id(), + &de_keyed_accounts, + serialized.as_slice(), + true, + ) + .unwrap(); + for ((account, de_keyed_account), key) in + accounts.iter().zip(de_keyed_accounts).zip(keys.clone()) + { + assert_eq!(key, *de_keyed_account.unsigned_key()); + let account = account.borrow(); + assert_eq!(account.lamports, de_keyed_account.lamports().unwrap()); + assert_eq!( + &account.data()[..], + &de_keyed_account.try_account_ref().unwrap().data[..] + ); + assert_eq!(account.owner, de_keyed_account.owner().unwrap()); + assert_eq!(account.executable, de_keyed_account.executable().unwrap()); + assert_eq!(account.rent_epoch, de_keyed_account.rent_epoch().unwrap()); + } } // the old bpf_loader in-program deserializer bpf_loader::id() diff --git a/programs/bpf_loader/src/syscalls.rs b/programs/bpf_loader/src/syscalls.rs index 5520db8329..13e7cd543d 100644 --- a/programs/bpf_loader/src/syscalls.rs +++ b/programs/bpf_loader/src/syscalls.rs @@ -1,7 +1,7 @@ use crate::{alloc, BpfError}; use alloc::Alloc; -use curve25519_dalek::{ristretto::RistrettoPoint, scalar::Scalar}; use solana_rbpf::{ + aligned_memory::AlignedMemory, ebpf::MM_HEAP_START, error::EbpfError, memory_region::{AccessType, MemoryMapping}, @@ -10,31 +10,33 @@ use solana_rbpf::{ }; use solana_runtime::message_processor::MessageProcessor; use solana_sdk::{ - account::Account, + account::{Account, AccountSharedData, ReadableAccount}, account_info::AccountInfo, account_utils::StateMut, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, + clock::Clock, entrypoint::{MAX_PERMITTED_DATA_INCREASE, SUCCESS}, + epoch_schedule::EpochSchedule, feature_set::{ - abort_on_all_cpi_failures, cpi_data_cost, limit_cpi_loader_invoke, per_byte_logging_cost, - pubkey_log_syscall_enabled, ristretto_mul_syscall_enabled, sha256_syscall_enabled, - sol_log_compute_units_syscall, try_find_program_address_syscall_enabled, - use_loaded_executables, use_loaded_program_accounts, + cpi_data_cost, cpi_share_ro_and_exec_accounts, demote_sysvar_write_locks, + enforce_aligned_host_addrs, keccak256_syscall_enabled, + set_upgrade_authority_via_cpi_enabled, sysvar_via_syscall, update_data_on_realloc, }, hash::{Hasher, HASH_BYTES}, ic_msg, instruction::{AccountMeta, Instruction, InstructionError}, + keccak, keyed_account::KeyedAccount, native_loader, - process_instruction::{stable_log, ComputeMeter, InvokeContext, Logger}, - program_error::ProgramError, + process_instruction::{self, stable_log, ComputeMeter, InvokeContext, Logger}, pubkey::{Pubkey, PubkeyError, MAX_SEEDS}, + rent::Rent, + sysvar::{self, fees::Fees, Sysvar, SysvarId}, }; use std::{ alloc::Layout, cell::{Ref, RefCell, RefMut}, - convert::TryFrom, mem::{align_of, size_of}, rc::Rc, slice::from_raw_parts_mut, @@ -110,34 +112,39 @@ pub fn register_syscalls( syscall_registry.register_syscall_by_name(b"sol_log_", SyscallLog::call)?; syscall_registry.register_syscall_by_name(b"sol_log_64_", SyscallLogU64::call)?; - if invoke_context.is_feature_active(&sol_log_compute_units_syscall::id()) { - syscall_registry - .register_syscall_by_name(b"sol_log_compute_units_", SyscallLogBpfComputeUnits::call)?; - } - - if invoke_context.is_feature_active(&pubkey_log_syscall_enabled::id()) { - syscall_registry.register_syscall_by_name(b"sol_log_pubkey", SyscallLogPubkey::call)?; - } + syscall_registry + .register_syscall_by_name(b"sol_log_compute_units_", SyscallLogBpfComputeUnits::call)?; - if invoke_context.is_feature_active(&sha256_syscall_enabled::id()) { - syscall_registry.register_syscall_by_name(b"sol_sha256", SyscallSha256::call)?; - } - - if invoke_context.is_feature_active(&ristretto_mul_syscall_enabled::id()) { - syscall_registry - .register_syscall_by_name(b"sol_ristretto_mul", SyscallRistrettoMul::call)?; - } + syscall_registry.register_syscall_by_name(b"sol_log_pubkey", SyscallLogPubkey::call)?; syscall_registry.register_syscall_by_name( b"sol_create_program_address", SyscallCreateProgramAddress::call, )?; - if invoke_context.is_feature_active(&try_find_program_address_syscall_enabled::id()) { + syscall_registry.register_syscall_by_name( + b"sol_try_find_program_address", + SyscallTryFindProgramAddress::call, + )?; + + syscall_registry.register_syscall_by_name(b"sol_sha256", SyscallSha256::call)?; + + if invoke_context.is_feature_active(&keccak256_syscall_enabled::id()) { + syscall_registry.register_syscall_by_name(b"sol_keccak256", SyscallKeccak256::call)?; + } + + if invoke_context.is_feature_active(&sysvar_via_syscall::id()) { + syscall_registry + .register_syscall_by_name(b"sol_get_clock_sysvar", SyscallGetClockSysvar::call)?; syscall_registry.register_syscall_by_name( - b"sol_try_find_program_address", - SyscallTryFindProgramAddress::call, + b"sol_get_epoch_schedule_sysvar", + SyscallGetEpochScheduleSysvar::call, )?; + syscall_registry + .register_syscall_by_name(b"sol_get_fees_sysvar", SyscallGetFeesSysvar::call)?; + syscall_registry + .register_syscall_by_name(b"sol_get_rent_sysvar", SyscallGetRentSysvar::call)?; } + syscall_registry .register_syscall_by_name(b"sol_invoke_signed_c", SyscallInvokeSignedC::call)?; syscall_registry @@ -148,8 +155,8 @@ pub fn register_syscalls( } macro_rules! bind_feature_gated_syscall_context_object { - ($vm:expr, $invoke_context:expr, $feature_id:expr, $syscall_context_object:expr $(,)?) => { - if $invoke_context.is_feature_active($feature_id) { + ($vm:expr, $is_feature_active:expr, $syscall_context_object:expr $(,)?) => { + if $is_feature_active { match $vm.bind_syscall_context_object($syscall_context_object, None) { Err(EbpfError::SyscallNotRegistered(_)) | Ok(()) => {} Err(err) => { @@ -165,31 +172,29 @@ pub fn bind_syscall_context_objects<'a>( vm: &mut EbpfVm<'a, BpfError, crate::ThisInstructionMeter>, callers_keyed_accounts: &'a [KeyedAccount<'a>], invoke_context: &'a mut dyn InvokeContext, - heap: Vec, + heap: AlignedMemory, ) -> Result<(), EbpfError> { let bpf_compute_budget = invoke_context.get_bpf_compute_budget(); + let enforce_aligned_host_addrs = + invoke_context.is_feature_active(&enforce_aligned_host_addrs::id()); // Syscall functions common across languages vm.bind_syscall_context_object(Box::new(SyscallAbort {}), None)?; vm.bind_syscall_context_object( Box::new(SyscallPanic { - compute_meter: if invoke_context.is_feature_active(&per_byte_logging_cost::id()) { - Some(invoke_context.get_compute_meter()) - } else { - None - }, + compute_meter: invoke_context.get_compute_meter(), loader_id, + enforce_aligned_host_addrs, }), None, )?; vm.bind_syscall_context_object( Box::new(SyscallLog { - per_byte_cost: invoke_context.is_feature_active(&per_byte_logging_cost::id()), - cost: bpf_compute_budget.log_units, compute_meter: invoke_context.get_compute_meter(), logger: invoke_context.get_logger(), loader_id, + enforce_aligned_host_addrs, }), None, )?; @@ -202,75 +207,106 @@ pub fn bind_syscall_context_objects<'a>( None, )?; - bind_feature_gated_syscall_context_object!( - vm, - invoke_context, - &sol_log_compute_units_syscall::id(), + vm.bind_syscall_context_object( Box::new(SyscallLogBpfComputeUnits { cost: 0, compute_meter: invoke_context.get_compute_meter(), logger: invoke_context.get_logger(), }), - ); + None, + )?; - bind_feature_gated_syscall_context_object!( - vm, - invoke_context, - &pubkey_log_syscall_enabled::id(), + vm.bind_syscall_context_object( Box::new(SyscallLogPubkey { cost: bpf_compute_budget.log_pubkey_units, compute_meter: invoke_context.get_compute_meter(), logger: invoke_context.get_logger(), loader_id, + enforce_aligned_host_addrs, }), - ); + None, + )?; - bind_feature_gated_syscall_context_object!( - vm, - invoke_context, - &sha256_syscall_enabled::id(), - Box::new(SyscallSha256 { - sha256_base_cost: bpf_compute_budget.sha256_base_cost, - sha256_byte_cost: bpf_compute_budget.sha256_byte_cost, + vm.bind_syscall_context_object( + Box::new(SyscallCreateProgramAddress { + cost: bpf_compute_budget.create_program_address_units, compute_meter: invoke_context.get_compute_meter(), loader_id, + enforce_aligned_host_addrs, }), - ); + None, + )?; - bind_feature_gated_syscall_context_object!( - vm, - invoke_context, - &ristretto_mul_syscall_enabled::id(), - Box::new(SyscallRistrettoMul { - cost: 0, + vm.bind_syscall_context_object( + Box::new(SyscallTryFindProgramAddress { + cost: bpf_compute_budget.create_program_address_units, compute_meter: invoke_context.get_compute_meter(), loader_id, + enforce_aligned_host_addrs, }), - ); + None, + )?; vm.bind_syscall_context_object( - Box::new(SyscallCreateProgramAddress { - cost: bpf_compute_budget.create_program_address_units, + Box::new(SyscallSha256 { + sha256_base_cost: bpf_compute_budget.sha256_base_cost, + sha256_byte_cost: bpf_compute_budget.sha256_byte_cost, compute_meter: invoke_context.get_compute_meter(), loader_id, + enforce_aligned_host_addrs, }), None, )?; bind_feature_gated_syscall_context_object!( vm, - invoke_context, - &try_find_program_address_syscall_enabled::id(), - Box::new(SyscallTryFindProgramAddress { - cost: bpf_compute_budget.create_program_address_units, + invoke_context.is_feature_active(&keccak256_syscall_enabled::id()), + Box::new(SyscallKeccak256 { + base_cost: bpf_compute_budget.sha256_base_cost, + byte_cost: bpf_compute_budget.sha256_byte_cost, compute_meter: invoke_context.get_compute_meter(), loader_id, }), ); - // Cross-program invocation syscalls + let is_sysvar_via_syscall_active = invoke_context.is_feature_active(&sysvar_via_syscall::id()); let invoke_context = Rc::new(RefCell::new(invoke_context)); + + bind_feature_gated_syscall_context_object!( + vm, + is_sysvar_via_syscall_active, + Box::new(SyscallGetClockSysvar { + invoke_context: invoke_context.clone(), + loader_id, + }), + ); + bind_feature_gated_syscall_context_object!( + vm, + is_sysvar_via_syscall_active, + Box::new(SyscallGetEpochScheduleSysvar { + invoke_context: invoke_context.clone(), + loader_id, + }), + ); + bind_feature_gated_syscall_context_object!( + vm, + is_sysvar_via_syscall_active, + Box::new(SyscallGetFeesSysvar { + invoke_context: invoke_context.clone(), + loader_id, + }), + ); + bind_feature_gated_syscall_context_object!( + vm, + is_sysvar_via_syscall_active, + Box::new(SyscallGetRentSysvar { + invoke_context: invoke_context.clone(), + loader_id, + }), + ); + + // Cross-program invocation syscalls vm.bind_syscall_context_object( Box::new(SyscallInvokeSignedC { callers_keyed_accounts, @@ -315,32 +351,53 @@ fn translate_type_inner<'a, T>( access_type: AccessType, vm_addr: u64, loader_id: &Pubkey, + enforce_aligned_host_addrs: bool, ) -> Result<&'a mut T, EbpfError> { - if loader_id != &bpf_loader_deprecated::id() - && (vm_addr as u64 as *mut T).align_offset(align_of::()) != 0 + if !enforce_aligned_host_addrs + && loader_id != &bpf_loader_deprecated::id() + && (vm_addr as *mut T).align_offset(align_of::()) != 0 { - Err(SyscallError::UnalignedPointer.into()) - } else { - unsafe { - translate(memory_mapping, access_type, vm_addr, size_of::() as u64) - .map(|value| &mut *(value as *mut T)) - } + return Err(SyscallError::UnalignedPointer.into()); + } + + let host_addr = translate(memory_mapping, access_type, vm_addr, size_of::() as u64)?; + + if enforce_aligned_host_addrs + && loader_id != &bpf_loader_deprecated::id() + && (host_addr as *mut T).align_offset(align_of::()) != 0 + { + return Err(SyscallError::UnalignedPointer.into()); } + Ok(unsafe { &mut *(host_addr as *mut T) }) } fn translate_type_mut<'a, T>( memory_mapping: &MemoryMapping, vm_addr: u64, loader_id: &Pubkey, + enforce_aligned_host_addrs: bool, ) -> Result<&'a mut T, EbpfError> { - translate_type_inner::(memory_mapping, AccessType::Store, vm_addr, loader_id) + translate_type_inner::( + memory_mapping, + AccessType::Store, + vm_addr, + loader_id, + enforce_aligned_host_addrs, + ) } fn translate_type<'a, T>( memory_mapping: &MemoryMapping, vm_addr: u64, loader_id: &Pubkey, + enforce_aligned_host_addrs: bool, ) -> Result<&'a T, EbpfError> { - translate_type_inner::(memory_mapping, AccessType::Load, vm_addr, loader_id) - .map(|value| &*value) + translate_type_inner::( + memory_mapping, + AccessType::Load, + vm_addr, + loader_id, + enforce_aligned_host_addrs, + ) + .map(|value| &*value) } fn translate_slice_inner<'a, T>( @@ -349,41 +406,65 @@ fn translate_slice_inner<'a, T>( vm_addr: u64, len: u64, loader_id: &Pubkey, + enforce_aligned_host_addrs: bool, ) -> Result<&'a mut [T], EbpfError> { - if loader_id != &bpf_loader_deprecated::id() + if !enforce_aligned_host_addrs + && loader_id != &bpf_loader_deprecated::id() && (vm_addr as u64 as *mut T).align_offset(align_of::()) != 0 { - Err(SyscallError::UnalignedPointer.into()) - } else if len == 0 { - Ok(&mut []) - } else { - match translate( - memory_mapping, - access_type, - vm_addr, - len.saturating_mul(size_of::() as u64), - ) { - Ok(value) => Ok(unsafe { from_raw_parts_mut(value as *mut T, len as usize) }), - Err(e) => Err(e), - } + return Err(SyscallError::UnalignedPointer.into()); } + if len == 0 { + return Ok(&mut []); + } + + let host_addr = translate( + memory_mapping, + access_type, + vm_addr, + len.saturating_mul(size_of::() as u64), + )?; + + if enforce_aligned_host_addrs + && loader_id != &bpf_loader_deprecated::id() + && (host_addr as *mut T).align_offset(align_of::()) != 0 + { + return Err(SyscallError::UnalignedPointer.into()); + } + Ok(unsafe { from_raw_parts_mut(host_addr as *mut T, len as usize) }) } fn translate_slice_mut<'a, T>( memory_mapping: &MemoryMapping, vm_addr: u64, len: u64, loader_id: &Pubkey, + enforce_aligned_host_addrs: bool, ) -> Result<&'a mut [T], EbpfError> { - translate_slice_inner::(memory_mapping, AccessType::Store, vm_addr, len, loader_id) + translate_slice_inner::( + memory_mapping, + AccessType::Store, + vm_addr, + len, + loader_id, + enforce_aligned_host_addrs, + ) } fn translate_slice<'a, T>( memory_mapping: &MemoryMapping, vm_addr: u64, len: u64, loader_id: &Pubkey, + enforce_aligned_host_addrs: bool, ) -> Result<&'a [T], EbpfError> { - translate_slice_inner::(memory_mapping, AccessType::Load, vm_addr, len, loader_id) - .map(|value| &*value) + translate_slice_inner::( + memory_mapping, + AccessType::Load, + vm_addr, + len, + loader_id, + enforce_aligned_host_addrs, + ) + .map(|value| &*value) } /// Take a virtual pointer to a string (points to BPF VM memory space), translate it @@ -393,9 +474,16 @@ fn translate_string_and_do( addr: u64, len: u64, loader_id: &Pubkey, + enforce_aligned_host_addrs: bool, work: &mut dyn FnMut(&str) -> Result>, ) -> Result> { - let buf = translate_slice::(memory_mapping, addr, len, loader_id)?; + let buf = translate_slice::( + memory_mapping, + addr, + len, + loader_id, + enforce_aligned_host_addrs, + )?; let i = match buf.iter().position(|byte| *byte == 0) { Some(i) => i, None => len as usize, @@ -430,8 +518,9 @@ impl SyscallObject for SyscallAbort { /// Causes the BPF program to be halted immediately /// Log a user's info message pub struct SyscallPanic<'a> { - compute_meter: Option>>, + compute_meter: Rc>, loader_id: &'a Pubkey, + enforce_aligned_host_addrs: bool, } impl<'a> SyscallObject for SyscallPanic<'a> { fn call( @@ -444,14 +533,13 @@ impl<'a> SyscallObject for SyscallPanic<'a> { memory_mapping: &MemoryMapping, result: &mut Result>, ) { - if let Some(ref mut compute_meter) = self.compute_meter { - question_mark!(compute_meter.consume(len), result); - } + question_mark!(self.compute_meter.consume(len), result); *result = translate_string_and_do( memory_mapping, file, len, &self.loader_id, + self.enforce_aligned_host_addrs, &mut |string: &str| Err(SyscallError::Panic(string.to_string(), line, column).into()), ); } @@ -459,11 +547,10 @@ impl<'a> SyscallObject for SyscallPanic<'a> { /// Log a user's info message pub struct SyscallLog<'a> { - per_byte_cost: bool, - cost: u64, compute_meter: Rc>, logger: Rc>, loader_id: &'a Pubkey, + enforce_aligned_host_addrs: bool, } impl<'a> SyscallObject for SyscallLog<'a> { fn call( @@ -476,17 +563,14 @@ impl<'a> SyscallObject for SyscallLog<'a> { memory_mapping: &MemoryMapping, result: &mut Result>, ) { - if self.per_byte_cost { - question_mark!(self.compute_meter.consume(len), result); - } else { - question_mark!(self.compute_meter.consume(self.cost), result); - } + question_mark!(self.compute_meter.consume(len), result); question_mark!( translate_string_and_do( memory_mapping, addr, len, &self.loader_id, + self.enforce_aligned_host_addrs, &mut |string: &str| { stable_log::program_log(&self.logger, string); Ok(0) @@ -567,6 +651,7 @@ pub struct SyscallLogPubkey<'a> { compute_meter: Rc>, logger: Rc>, loader_id: &'a Pubkey, + enforce_aligned_host_addrs: bool, } impl<'a> SyscallObject for SyscallLogPubkey<'a> { fn call( @@ -581,7 +666,12 @@ impl<'a> SyscallObject for SyscallLogPubkey<'a> { ) { question_mark!(self.compute_meter.consume(self.cost), result); let pubkey = question_mark!( - translate_type::(memory_mapping, pubkey_addr, self.loader_id), + translate_type::( + memory_mapping, + pubkey_addr, + self.loader_id, + self.enforce_aligned_host_addrs, + ), result ); stable_log::program_log(&self.logger, &pubkey.to_string()); @@ -640,9 +730,15 @@ fn translate_program_address_inputs<'a>( program_id_addr: u64, memory_mapping: &MemoryMapping, loader_id: &Pubkey, + enforce_aligned_host_addrs: bool, ) -> Result<(Vec<&'a [u8]>, &'a Pubkey), EbpfError> { - let untranslated_seeds = - translate_slice::<&[&u8]>(memory_mapping, seeds_addr, seeds_len, loader_id)?; + let untranslated_seeds = translate_slice::<&[&u8]>( + memory_mapping, + seeds_addr, + seeds_len, + loader_id, + enforce_aligned_host_addrs, + )?; if untranslated_seeds.len() > MAX_SEEDS { return Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into()); } @@ -654,10 +750,16 @@ fn translate_program_address_inputs<'a>( untranslated_seed.as_ptr() as *const _ as u64, untranslated_seed.len() as u64, loader_id, + enforce_aligned_host_addrs, ) }) .collect::, EbpfError>>()?; - let program_id = translate_type::(memory_mapping, program_id_addr, loader_id)?; + let program_id = translate_type::( + memory_mapping, + program_id_addr, + loader_id, + enforce_aligned_host_addrs, + )?; Ok((seeds, program_id)) } @@ -666,6 +768,7 @@ struct SyscallCreateProgramAddress<'a> { cost: u64, compute_meter: Rc>, loader_id: &'a Pubkey, + enforce_aligned_host_addrs: bool, } impl<'a> SyscallObject for SyscallCreateProgramAddress<'a> { fn call( @@ -685,6 +788,7 @@ impl<'a> SyscallObject for SyscallCreateProgramAddress<'a> { program_id_addr, memory_mapping, self.loader_id, + self.enforce_aligned_host_addrs, ), result ); @@ -698,7 +802,13 @@ impl<'a> SyscallObject for SyscallCreateProgramAddress<'a> { } }; let address = question_mark!( - translate_slice_mut::(memory_mapping, address_addr, 32, self.loader_id), + translate_slice_mut::( + memory_mapping, + address_addr, + 32, + self.loader_id, + self.enforce_aligned_host_addrs, + ), result ); address.copy_from_slice(new_address.as_ref()); @@ -711,6 +821,7 @@ struct SyscallTryFindProgramAddress<'a> { cost: u64, compute_meter: Rc>, loader_id: &'a Pubkey, + enforce_aligned_host_addrs: bool, } impl<'a> SyscallObject for SyscallTryFindProgramAddress<'a> { fn call( @@ -730,6 +841,7 @@ impl<'a> SyscallObject for SyscallTryFindProgramAddress<'a> { program_id_addr, memory_mapping, self.loader_id, + self.enforce_aligned_host_addrs, ), result ); @@ -745,11 +857,22 @@ impl<'a> SyscallObject for SyscallTryFindProgramAddress<'a> { Pubkey::create_program_address(&seeds_with_bump, program_id) { let bump_seed_ref = question_mark!( - translate_type_mut::(memory_mapping, bump_seed_addr, self.loader_id), + translate_type_mut::( + memory_mapping, + bump_seed_addr, + self.loader_id, + self.enforce_aligned_host_addrs, + ), result ); let address = question_mark!( - translate_slice_mut::(memory_mapping, address_addr, 32, self.loader_id), + translate_slice_mut::( + memory_mapping, + address_addr, + 32, + self.loader_id, + self.enforce_aligned_host_addrs, + ), result ); *bump_seed_ref = bump_seed[0]; @@ -770,6 +893,7 @@ pub struct SyscallSha256<'a> { sha256_byte_cost: u64, compute_meter: Rc>, loader_id: &'a Pubkey, + enforce_aligned_host_addrs: bool, } impl<'a> SyscallObject for SyscallSha256<'a> { fn call( @@ -788,14 +912,21 @@ impl<'a> SyscallObject for SyscallSha256<'a> { memory_mapping, result_addr, HASH_BYTES as u64, - self.loader_id + self.loader_id, + self.enforce_aligned_host_addrs, ), result ); let mut hasher = Hasher::default(); if vals_len > 0 { let vals = question_mark!( - translate_slice::<&[u8]>(memory_mapping, vals_addr, vals_len, self.loader_id), + translate_slice::<&[u8]>( + memory_mapping, + vals_addr, + vals_len, + self.loader_id, + self.enforce_aligned_host_addrs, + ), result ); for val in vals.iter() { @@ -804,7 +935,8 @@ impl<'a> SyscallObject for SyscallSha256<'a> { memory_mapping, val.as_ptr() as u64, val.len() as u64, - self.loader_id + self.loader_id, + self.enforce_aligned_host_addrs, ), result ); @@ -821,39 +953,189 @@ impl<'a> SyscallObject for SyscallSha256<'a> { } } -/// Ristretto point multiply -pub struct SyscallRistrettoMul<'a> { - cost: u64, - compute_meter: Rc>, +fn get_sysvar( + id: &Pubkey, + var_addr: u64, + loader_id: &Pubkey, + memory_mapping: &MemoryMapping, + invoke_context: Rc>, +) -> Result> { + let invoke_context = invoke_context + .try_borrow() + .map_err(|_| SyscallError::InvokeContextBorrowFailed)?; + + invoke_context.get_compute_meter().consume( + invoke_context.get_bpf_compute_budget().sysvar_base_cost + size_of::() as u64, + )?; + let var = translate_type_mut::( + memory_mapping, + var_addr, + loader_id, + invoke_context.is_feature_active(&enforce_aligned_host_addrs::id()), + )?; + + *var = process_instruction::get_sysvar::(*invoke_context, id) + .map_err(SyscallError::InstructionError)?; + + Ok(SUCCESS) +} + +/// Get a Clock sysvar +struct SyscallGetClockSysvar<'a> { + invoke_context: Rc>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallRistrettoMul<'a> { +impl<'a> SyscallObject for SyscallGetClockSysvar<'a> { fn call( &mut self, - point_addr: u64, - scalar_addr: u64, - result_addr: u64, + var_addr: u64, + _arg2: u64, + _arg3: u64, _arg4: u64, _arg5: u64, memory_mapping: &MemoryMapping, result: &mut Result>, ) { - question_mark!(self.compute_meter.consume(self.cost), result); - - let point = question_mark!( - translate_type::(memory_mapping, point_addr, self.loader_id), - result + *result = get_sysvar::( + &sysvar::clock::id(), + var_addr, + self.loader_id, + memory_mapping, + self.invoke_context.clone(), ); - let scalar = question_mark!( - translate_type::(memory_mapping, scalar_addr, self.loader_id), - result + } +} +/// Get a EpochSchedule sysvar +struct SyscallGetEpochScheduleSysvar<'a> { + invoke_context: Rc>, + loader_id: &'a Pubkey, +} +impl<'a> SyscallObject for SyscallGetEpochScheduleSysvar<'a> { + fn call( + &mut self, + var_addr: u64, + _arg2: u64, + _arg3: u64, + _arg4: u64, + _arg5: u64, + memory_mapping: &MemoryMapping, + result: &mut Result>, + ) { + *result = get_sysvar::( + &sysvar::epoch_schedule::id(), + var_addr, + self.loader_id, + memory_mapping, + self.invoke_context.clone(), ); - let output = question_mark!( - translate_type_mut::(memory_mapping, result_addr, self.loader_id), - result + } +} +/// Get a Fees sysvar +struct SyscallGetFeesSysvar<'a> { + invoke_context: Rc>, + loader_id: &'a Pubkey, +} +impl<'a> SyscallObject for SyscallGetFeesSysvar<'a> { + fn call( + &mut self, + var_addr: u64, + _arg2: u64, + _arg3: u64, + _arg4: u64, + _arg5: u64, + memory_mapping: &MemoryMapping, + result: &mut Result>, + ) { + *result = get_sysvar::( + &sysvar::fees::id(), + var_addr, + self.loader_id, + memory_mapping, + self.invoke_context.clone(), + ); + } +} +/// Get a Rent sysvar +struct SyscallGetRentSysvar<'a> { + invoke_context: Rc>, + loader_id: &'a Pubkey, +} +impl<'a> SyscallObject for SyscallGetRentSysvar<'a> { + fn call( + &mut self, + var_addr: u64, + _arg2: u64, + _arg3: u64, + _arg4: u64, + _arg5: u64, + memory_mapping: &MemoryMapping, + result: &mut Result>, + ) { + *result = get_sysvar::( + &sysvar::rent::id(), + var_addr, + self.loader_id, + memory_mapping, + self.invoke_context.clone(), ); - *output = point * scalar; + } +} +// Keccak256 +pub struct SyscallKeccak256<'a> { + base_cost: u64, + byte_cost: u64, + compute_meter: Rc>, + loader_id: &'a Pubkey, +} +impl<'a> SyscallObject for SyscallKeccak256<'a> { + fn call( + &mut self, + vals_addr: u64, + vals_len: u64, + result_addr: u64, + _arg4: u64, + _arg5: u64, + memory_mapping: &MemoryMapping, + result: &mut Result>, + ) { + question_mark!(self.compute_meter.consume(self.base_cost), result); + let hash_result = question_mark!( + translate_slice_mut::( + memory_mapping, + result_addr, + keccak::HASH_BYTES as u64, + self.loader_id, + true, + ), + result + ); + let mut hasher = keccak::Hasher::default(); + if vals_len > 0 { + let vals = question_mark!( + translate_slice::<&[u8]>(memory_mapping, vals_addr, vals_len, self.loader_id, true), + result + ); + for val in vals.iter() { + let bytes = question_mark!( + translate_slice::( + memory_mapping, + val.as_ptr() as u64, + val.len() as u64, + self.loader_id, + true, + ), + result + ); + question_mark!( + self.compute_meter + .consume(self.byte_cost * (val.len() as u64 / 2)), + result + ); + hasher.hash(bytes); + } + } + hash_result.copy_from_slice(&hasher.result().to_bytes()); *result = Ok(0); } } @@ -868,9 +1150,12 @@ struct AccountReferences<'a> { ref_to_len_in_vm: &'a mut u64, serialized_len_ptr: &'a mut u64, } -type TranslatedAccount<'a> = (Rc>, Option>); +type TranslatedAccount<'a> = ( + Rc>, + Option>, +); type TranslatedAccounts<'a> = ( - Vec>>, + Vec>>, Vec>>, ); @@ -883,10 +1168,12 @@ trait SyscallInvokeSigned<'a> { &self, addr: u64, memory_mapping: &MemoryMapping, + enforce_aligned_host_addrs: bool, ) -> Result>; fn translate_accounts( &self, account_keys: &[Pubkey], + caller_write_privileges: &[bool], program_account_index: usize, account_infos_addr: u64, account_infos_len: u64, @@ -898,6 +1185,7 @@ trait SyscallInvokeSigned<'a> { signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, + enforce_aligned_host_addrs: bool, ) -> Result, EbpfError>; } @@ -925,8 +1213,14 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { &self, addr: u64, memory_mapping: &MemoryMapping, + enforce_aligned_host_addrs: bool, ) -> Result> { - let ix = translate_type::(memory_mapping, addr, self.loader_id)?; + let ix = translate_type::( + memory_mapping, + addr, + self.loader_id, + enforce_aligned_host_addrs, + )?; check_instruction_size( ix.accounts.len(), @@ -939,6 +1233,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { ix.accounts.as_ptr() as u64, ix.accounts.len() as u64, self.loader_id, + enforce_aligned_host_addrs, )? .to_vec(); let data = translate_slice::( @@ -946,6 +1241,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { ix.data.as_ptr() as u64, ix.data.len() as u64, self.loader_id, + enforce_aligned_host_addrs, )? .to_vec(); Ok(Instruction { @@ -958,18 +1254,22 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { fn translate_accounts( &self, account_keys: &[Pubkey], + caller_write_privileges: &[bool], program_account_index: usize, account_infos_addr: u64, account_infos_len: u64, memory_mapping: &MemoryMapping, ) -> Result, EbpfError> { let invoke_context = self.invoke_context.borrow(); + let enforce_aligned_host_addrs = + invoke_context.is_feature_active(&enforce_aligned_host_addrs::id()); let account_infos = translate_slice::( memory_mapping, account_infos_addr, account_infos_len, self.loader_id, + enforce_aligned_host_addrs, )?; check_account_infos(account_infos.len(), &invoke_context)?; let account_info_keys = account_infos @@ -979,6 +1279,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { memory_mapping, account_info.key as *const _ as u64, self.loader_id, + enforce_aligned_host_addrs, ) }) .collect::, EbpfError>>()?; @@ -993,13 +1294,20 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { memory_mapping, account_info.lamports.as_ptr() as u64, self.loader_id, + enforce_aligned_host_addrs, )?; - translate_type_mut::(memory_mapping, *ptr, self.loader_id)? + translate_type_mut::( + memory_mapping, + *ptr, + self.loader_id, + enforce_aligned_host_addrs, + )? }; let owner = translate_type_mut::( memory_mapping, account_info.owner as *const _ as u64, self.loader_id, + enforce_aligned_host_addrs, )?; let (data, vm_data_addr, ref_to_len_in_vm, serialized_len_ptr) = { @@ -1008,6 +1316,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { memory_mapping, account_info.data.as_ptr() as *const _ as u64, self.loader_id, + enforce_aligned_host_addrs, )?; if invoke_context.is_feature_active(&cpi_data_cost::id()) { @@ -1029,6 +1338,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { memory_mapping, ref_of_len_in_input_buffer as *const _ as u64, self.loader_id, + enforce_aligned_host_addrs, )?; let vm_data_addr = data.as_ptr() as u64; ( @@ -1037,6 +1347,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { vm_data_addr, data.len() as u64, self.loader_id, + enforce_aligned_host_addrs, )?, vm_data_addr, ref_to_len_in_vm, @@ -1045,13 +1356,13 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { }; Ok(( - Rc::new(RefCell::new(Account { + Rc::new(RefCell::new(AccountSharedData::from(Account { lamports: *lamports, data: data.to_vec(), executable: account_info.executable, owner: *owner, rent_epoch: account_info.rent_epoch, - })), + }))), Some(AccountReferences { lamports, owner, @@ -1065,6 +1376,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { get_translated_accounts( account_keys, + caller_write_privileges, program_account_index, &account_info_keys, account_infos, @@ -1079,6 +1391,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, + enforce_aligned_host_addrs: bool, ) -> Result, EbpfError> { let mut signers = Vec::new(); if signers_seeds_len > 0 { @@ -1087,6 +1400,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { signers_seeds_addr, signers_seeds_len, self.loader_id, + enforce_aligned_host_addrs, )?; if signers_seeds.len() > MAX_SIGNERS { return Err(SyscallError::TooManySigners.into()); @@ -1097,6 +1411,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { signer_seeds.as_ptr() as *const _ as u64, signer_seeds.len() as u64, self.loader_id, + enforce_aligned_host_addrs, )?; if untranslated_seeds.len() > MAX_SEEDS { return Err(SyscallError::InstructionError( @@ -1112,6 +1427,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { untranslated_seed.as_ptr() as *const _ as u64, untranslated_seed.len() as u64, self.loader_id, + enforce_aligned_host_addrs, ) }) .collect::, EbpfError>>()?; @@ -1220,34 +1536,50 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { &self, addr: u64, memory_mapping: &MemoryMapping, + enforce_aligned_host_addrs: bool, ) -> Result> { - let ix_c = translate_type::(memory_mapping, addr, self.loader_id)?; + let ix_c = translate_type::( + memory_mapping, + addr, + self.loader_id, + enforce_aligned_host_addrs, + )?; check_instruction_size( ix_c.accounts_len, ix_c.data_len, &self.invoke_context.borrow(), )?; - let program_id = - translate_type::(memory_mapping, ix_c.program_id_addr, self.loader_id)?; + let program_id = translate_type::( + memory_mapping, + ix_c.program_id_addr, + self.loader_id, + enforce_aligned_host_addrs, + )?; let meta_cs = translate_slice::( memory_mapping, ix_c.accounts_addr, ix_c.accounts_len as u64, self.loader_id, + enforce_aligned_host_addrs, )?; let data = translate_slice::( memory_mapping, ix_c.data_addr, ix_c.data_len as u64, self.loader_id, + enforce_aligned_host_addrs, )? .to_vec(); let accounts = meta_cs .iter() .map(|meta_c| { - let pubkey = - translate_type::(memory_mapping, meta_c.pubkey_addr, self.loader_id)?; + let pubkey = translate_type::( + memory_mapping, + meta_c.pubkey_addr, + self.loader_id, + enforce_aligned_host_addrs, + )?; Ok(AccountMeta { pubkey: *pubkey, is_signer: meta_c.is_signer, @@ -1266,24 +1598,33 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { fn translate_accounts( &self, account_keys: &[Pubkey], + caller_write_privileges: &[bool], program_account_index: usize, account_infos_addr: u64, account_infos_len: u64, memory_mapping: &MemoryMapping, ) -> Result, EbpfError> { let invoke_context = self.invoke_context.borrow(); + let enforce_aligned_host_addrs = + invoke_context.is_feature_active(&enforce_aligned_host_addrs::id()); let account_infos = translate_slice::( memory_mapping, account_infos_addr, account_infos_len, self.loader_id, + enforce_aligned_host_addrs, )?; check_account_infos(account_infos.len(), &invoke_context)?; let account_info_keys = account_infos .iter() .map(|account_info| { - translate_type::(memory_mapping, account_info.key_addr, self.loader_id) + translate_type::( + memory_mapping, + account_info.key_addr, + self.loader_id, + enforce_aligned_host_addrs, + ) }) .collect::, EbpfError>>()?; @@ -1295,11 +1636,13 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { memory_mapping, account_info.lamports_addr, self.loader_id, + enforce_aligned_host_addrs, )?; let owner = translate_type_mut::( memory_mapping, account_info.owner_addr, self.loader_id, + enforce_aligned_host_addrs, )?; let vm_data_addr = account_info.data_addr; @@ -1315,6 +1658,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { vm_data_addr, account_info.data_len, self.loader_id, + enforce_aligned_host_addrs, )?; let first_info_addr = &account_infos[0] as *const _ as u64; @@ -1334,16 +1678,17 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { memory_mapping, ref_of_len_in_input_buffer as *const _ as u64, self.loader_id, + enforce_aligned_host_addrs, )?; Ok(( - Rc::new(RefCell::new(Account { + Rc::new(RefCell::new(AccountSharedData::from(Account { lamports: *lamports, data: data.to_vec(), executable: account_info.executable, owner: *owner, rent_epoch: account_info.rent_epoch, - })), + }))), Some(AccountReferences { lamports, owner, @@ -1357,6 +1702,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { get_translated_accounts( account_keys, + caller_write_privileges, program_account_index, &account_info_keys, account_infos, @@ -1371,6 +1717,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, + enforce_aligned_host_addrs: bool, ) -> Result, EbpfError> { if signers_seeds_len > 0 { let signers_seeds = translate_slice::( @@ -1378,6 +1725,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { signers_seeds_addr, signers_seeds_len, self.loader_id, + enforce_aligned_host_addrs, )?; if signers_seeds.len() > MAX_SIGNERS { return Err(SyscallError::TooManySigners.into()); @@ -1390,6 +1738,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { signer_seeds.addr, signer_seeds.len, self.loader_id, + enforce_aligned_host_addrs, )?; if seeds.len() > MAX_SEEDS { return Err(SyscallError::InstructionError( @@ -1405,6 +1754,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { seed.addr, seed.len, self.loader_id, + enforce_aligned_host_addrs, ) }) .collect::, EbpfError>>()?; @@ -1442,6 +1792,7 @@ impl<'a> SyscallObject for SyscallInvokeSignedC<'a> { fn get_translated_accounts<'a, T, F>( account_keys: &[Pubkey], + caller_write_privileges: &[bool], program_account_index: usize, account_info_keys: &[&Pubkey], account_infos: &[T], @@ -1463,13 +1814,13 @@ where SyscallError::InstructionError(InstructionError::MissingAccount) })?; - if (invoke_context.is_feature_active(&use_loaded_program_accounts::id()) - && i == program_account_index) - || (invoke_context.is_feature_active(&use_loaded_executables::id()) - && account.borrow().executable) + if i == program_account_index + || account.borrow().executable + || (invoke_context.is_feature_active(&cpi_share_ro_and_exec_accounts::id()) + && !caller_write_privileges[i]) { - // Use the known executable - accounts.push(Rc::new(account)); + // Use the known account + accounts.push(account); refs.push(None); } else if let Some(account_info) = account_info_keys @@ -1524,7 +1875,6 @@ fn check_account_infos( > invoke_context .get_bpf_compute_budget() .max_cpi_instruction_size - && invoke_context.is_feature_active(&use_loaded_program_accounts::id()) { // Cap the number of account_infos a caller can pass to approximate // maximum that accounts that could be passed in an instruction @@ -1536,23 +1886,28 @@ fn check_account_infos( fn check_authorized_program( program_id: &Pubkey, instruction_data: &[u8], + invoke_context: &Ref<&mut dyn InvokeContext>, ) -> Result<(), EbpfError> { if native_loader::check_id(program_id) || bpf_loader::check_id(program_id) || bpf_loader_deprecated::check_id(program_id) || (bpf_loader_upgradeable::check_id(program_id) - && !bpf_loader_upgradeable::is_upgrade_instruction(instruction_data)) + && !(bpf_loader_upgradeable::is_upgrade_instruction(instruction_data) + || (bpf_loader_upgradeable::is_set_authority_instruction(instruction_data) + && invoke_context + .is_feature_active(&set_upgrade_authority_via_cpi_enabled::id())))) { return Err(SyscallError::ProgramNotSupported(*program_id).into()); } Ok(()) } +#[allow(clippy::type_complexity)] fn get_upgradeable_executable( callee_program_id: &Pubkey, - program_account: &RefCell, + program_account: &Rc>, invoke_context: &Ref<&mut dyn InvokeContext>, -) -> Result)>, EbpfError> { +) -> Result>)>, EbpfError> { if program_account.borrow().owner == bpf_loader_upgradeable::id() { match program_account.borrow().state() { Ok(UpgradeableLoaderState::Program { @@ -1598,8 +1953,8 @@ fn call<'a>( executables, accounts, account_refs, - caller_privileges, - abort_on_all_cpi_failures, + caller_write_privileges, + demote_sysvar_write_locks, ) = { let invoke_context = syscall.get_context()?; @@ -1607,18 +1962,26 @@ fn call<'a>( .get_compute_meter() .consume(invoke_context.get_bpf_compute_budget().invoke_units)?; + let enforce_aligned_host_addrs = + invoke_context.is_feature_active(&enforce_aligned_host_addrs::id()); + let caller_program_id = invoke_context .get_caller() .map_err(SyscallError::InstructionError)?; // Translate and verify caller's data - let instruction = syscall.translate_instruction(instruction_addr, &memory_mapping)?; + let instruction = syscall.translate_instruction( + instruction_addr, + &memory_mapping, + enforce_aligned_host_addrs, + )?; let signers = syscall.translate_signers( caller_program_id, signers_seeds_addr, signers_seeds_len, memory_mapping, + enforce_aligned_host_addrs, )?; let keyed_account_refs = syscall .get_callers_keyed_accounts() @@ -1632,7 +1995,7 @@ fn call<'a>( &invoke_context, ) .map_err(SyscallError::InstructionError)?; - let caller_privileges = message + let caller_write_privileges = message .account_keys .iter() .map(|key| { @@ -1646,11 +2009,10 @@ fn call<'a>( } }) .collect::>(); - if invoke_context.is_feature_active(&limit_cpi_loader_invoke::id()) { - check_authorized_program(&callee_program_id, &instruction.data)?; - } + check_authorized_program(&callee_program_id, &instruction.data, &invoke_context)?; let (accounts, account_refs) = syscall.translate_accounts( &message.account_keys, + &caller_write_privileges, callee_program_id_index, account_infos_addr, account_infos_len, @@ -1659,11 +2021,13 @@ fn call<'a>( // Construct executables - let program_account = (**accounts.get(callee_program_id_index).ok_or_else(|| { - ic_msg!(invoke_context, "Unknown program {}", callee_program_id,); - SyscallError::InstructionError(InstructionError::MissingAccount) - })?) - .clone(); + let program_account = accounts + .get(callee_program_id_index) + .ok_or_else(|| { + ic_msg!(invoke_context, "Unknown program {}", callee_program_id,); + SyscallError::InstructionError(InstructionError::MissingAccount) + })? + .clone(); let programdata_executable = get_upgradeable_executable(&callee_program_id, &program_account, &invoke_context)?; let mut executables = vec![(callee_program_id, program_account)]; @@ -1680,8 +2044,8 @@ fn call<'a>( executables, accounts, account_refs, - caller_privileges, - invoke_context.is_feature_active(&abort_on_all_cpi_failures::id()), + caller_write_privileges, + invoke_context.is_feature_active(&demote_sysvar_write_locks::id()), ) }; @@ -1692,19 +2056,12 @@ fn call<'a>( &message, &executables, &accounts, - &caller_privileges, + &caller_write_privileges, *(&mut *(syscall.get_context_mut()?)), ) { Ok(()) => (), Err(err) => { - if abort_on_all_cpi_failures { - return Err(SyscallError::InstructionError(err).into()); - } else { - match ProgramError::try_from(err) { - Ok(err) => return Ok(err.into()), - Err(err) => return Err(SyscallError::InstructionError(err).into()), - } - } + return Err(SyscallError::InstructionError(err).into()); } } @@ -1713,11 +2070,11 @@ fn call<'a>( let invoke_context = syscall.get_context()?; for (i, (account, account_ref)) in accounts.iter().zip(account_refs).enumerate() { let account = account.borrow(); - if let Some(account_ref) = account_ref { - if message.is_writable(i) && !account.executable { + if let Some(mut account_ref) = account_ref { + if message.is_writable(i, demote_sysvar_write_locks) && !account.executable { *account_ref.lamports = account.lamports; *account_ref.owner = account.owner; - if account_ref.data.len() != account.data.len() { + if account_ref.data.len() != account.data().len() { if !account_ref.data.is_empty() { // Only support for `CreateAccount` at this time. // Need a way to limit total realloc size across multiple CPI calls @@ -1730,7 +2087,8 @@ fn call<'a>( ) .into()); } - if account.data.len() > account_ref.data.len() + MAX_PERMITTED_DATA_INCREASE + if account.data().len() + > account_ref.data.len() + MAX_PERMITTED_DATA_INCREASE { ic_msg!( invoke_context, @@ -1742,18 +2100,28 @@ fn call<'a>( ) .into()); } - let _ = translate( - memory_mapping, - AccessType::Store, - account_ref.vm_data_addr, - account.data.len() as u64, - )?; - *account_ref.ref_to_len_in_vm = account.data.len() as u64; - *account_ref.serialized_len_ptr = account.data.len() as u64; + if invoke_context.is_feature_active(&update_data_on_realloc::id()) { + account_ref.data = translate_slice_mut::( + memory_mapping, + account_ref.vm_data_addr, + account.data().len() as u64, + &bpf_loader_deprecated::id(), // Don't care since it is byte aligned + true, + )?; + } else { + let _ = translate( + memory_mapping, + AccessType::Store, + account_ref.vm_data_addr, + account.data().len() as u64, + )?; + } + *account_ref.ref_to_len_in_vm = account.data().len() as u64; + *account_ref.serialized_len_ptr = account.data().len() as u64; } account_ref .data - .clone_from_slice(&account.data[0..account_ref.data.len()]); + .copy_from_slice(&account.data()[0..account_ref.data.len()]); } } } @@ -1765,11 +2133,14 @@ fn call<'a>( #[cfg(test)] mod tests { use super::*; - use solana_rbpf::{memory_region::MemoryRegion, user_error::UserError, vm::Config}; + use solana_rbpf::{ + ebpf::HOST_ALIGN, memory_region::MemoryRegion, user_error::UserError, vm::Config, + }; use solana_sdk::{ bpf_loader, + fee_calculator::FeeCalculator, hash::hashv, - process_instruction::{MockComputeMeter, MockLogger}, + process_instruction::{MockComputeMeter, MockInvokeContext, MockLogger}, }; use std::str::FromStr; @@ -1820,11 +2191,11 @@ mod tests { for (ok, start, length, value) in cases { if ok { assert_eq!( - translate(&memory_mapping, AccessType::Load, start, length,).unwrap(), + translate(&memory_mapping, AccessType::Load, start, length).unwrap(), value ) } else { - assert!(translate(&memory_mapping, AccessType::Load, start, length,).is_err()) + assert!(translate(&memory_mapping, AccessType::Load, start, length).is_err()) } } } @@ -1846,17 +2217,17 @@ mod tests { ) .unwrap(); let translated_pubkey = - translate_type::(&memory_mapping, 100, &bpf_loader::id()).unwrap(); + translate_type::(&memory_mapping, 100, &bpf_loader::id(), true).unwrap(); assert_eq!(pubkey, *translated_pubkey); // Instruction - let instruction = Instruction::new( + let instruction = Instruction::new_with_bincode( solana_sdk::pubkey::new_rand(), &"foobar", vec![AccountMeta::new(solana_sdk::pubkey::new_rand(), false)], ); let addr = &instruction as *const _ as u64; - let memory_mapping = MemoryMapping::new::( + let mut memory_mapping = MemoryMapping::new::( vec![MemoryRegion { host_addr: addr, vm_addr: 96, @@ -1868,12 +2239,12 @@ mod tests { ) .unwrap(); let translated_instruction = - translate_type::(&memory_mapping, 96, &bpf_loader::id()).unwrap(); + translate_type::(&memory_mapping, 96, &bpf_loader::id(), true).unwrap(); assert_eq!(instruction, *translated_instruction); - // TODO: Reenable when solana_rbpf is bumped to "0.2.8" or higher - // Text search anchor so that it is easier to find: solana_rbpf = "=0.2.7" - // memory_mapping.resize_region::(0, 1).unwrap(); - // assert!(translate_type::(&memory_mapping, 100, &bpf_loader::id()).is_err()); + memory_mapping.resize_region::(0, 1).unwrap(); + assert!( + translate_type::(&memory_mapping, 100, &bpf_loader::id(), true).is_err() + ); } #[test] @@ -1894,9 +2265,14 @@ mod tests { &DEFAULT_CONFIG, ) .unwrap(); - let translated_data = - translate_slice::(&memory_mapping, data.as_ptr() as u64, 0, &bpf_loader::id()) - .unwrap(); + let translated_data = translate_slice::( + &memory_mapping, + data.as_ptr() as u64, + 0, + &bpf_loader::id(), + true, + ) + .unwrap(); assert_eq!(data, translated_data); assert_eq!(0, translated_data.len()); @@ -1914,9 +2290,14 @@ mod tests { &DEFAULT_CONFIG, ) .unwrap(); - let translated_data = - translate_slice::(&memory_mapping, 100, data.len() as u64, &bpf_loader::id()) - .unwrap(); + let translated_data = translate_slice::( + &memory_mapping, + 100, + data.len() as u64, + &bpf_loader::id(), + true, + ) + .unwrap(); assert_eq!(data, translated_data); data[0] = 10; assert_eq!(data, translated_data); @@ -1924,7 +2305,8 @@ mod tests { &memory_mapping, data.as_ptr() as u64, u64::MAX, - &bpf_loader::id() + &bpf_loader::id(), + true, ) .is_err()); @@ -1932,7 +2314,8 @@ mod tests { &memory_mapping, 100 - 1, data.len() as u64, - &bpf_loader::id() + &bpf_loader::id(), + true, ) .is_err()); @@ -1950,13 +2333,21 @@ mod tests { &DEFAULT_CONFIG, ) .unwrap(); - let translated_data = - translate_slice::(&memory_mapping, 96, data.len() as u64, &bpf_loader::id()) - .unwrap(); + let translated_data = translate_slice::( + &memory_mapping, + 96, + data.len() as u64, + &bpf_loader::id(), + true, + ) + .unwrap(); assert_eq!(data, translated_data); data[0] = 10; assert_eq!(data, translated_data); - assert!(translate_slice::(&memory_mapping, 96, u64::MAX, &bpf_loader::id(),).is_err()); + assert!( + translate_slice::(&memory_mapping, 96, u64::MAX, &bpf_loader::id(), true,) + .is_err() + ); // Pubkeys let mut data = vec![solana_sdk::pubkey::new_rand(); 5]; @@ -1972,9 +2363,14 @@ mod tests { &DEFAULT_CONFIG, ) .unwrap(); - let translated_data = - translate_slice::(&memory_mapping, 100, data.len() as u64, &bpf_loader::id()) - .unwrap(); + let translated_data = translate_slice::( + &memory_mapping, + 100, + data.len() as u64, + &bpf_loader::id(), + true, + ) + .unwrap(); assert_eq!(data, translated_data); data[0] = solana_sdk::pubkey::new_rand(); // Both should point to same place assert_eq!(data, translated_data); @@ -2002,6 +2398,7 @@ mod tests { 100, string.len() as u64, &bpf_loader::id(), + true, &mut |string: &str| { assert_eq!(string, "Gaggablaghblagh!"); Ok(42) @@ -2053,8 +2450,9 @@ mod tests { remaining: string.len() as u64 - 1, })); let mut syscall_panic = SyscallPanic { - compute_meter: Some(compute_meter), + compute_meter, loader_id: &bpf_loader::id(), + enforce_aligned_host_addrs: true, }; let mut result: Result> = Ok(0); syscall_panic.call( @@ -2073,9 +2471,14 @@ mod tests { result ); + let compute_meter: Rc> = + Rc::new(RefCell::new(MockComputeMeter { + remaining: string.len() as u64, + })); let mut syscall_panic = SyscallPanic { - compute_meter: None, + compute_meter, loader_id: &bpf_loader::id(), + enforce_aligned_host_addrs: true, }; let mut result: Result> = Ok(0); syscall_panic.call( @@ -2096,16 +2499,15 @@ mod tests { let addr = string.as_ptr() as *const _ as u64; let compute_meter: Rc> = - Rc::new(RefCell::new(MockComputeMeter { remaining: 3 })); + Rc::new(RefCell::new(MockComputeMeter { remaining: 1000000 })); let log = Rc::new(RefCell::new(vec![])); let logger: Rc> = Rc::new(RefCell::new(MockLogger { log: log.clone() })); let mut syscall_sol_log = SyscallLog { - per_byte_cost: false, - cost: 1, compute_meter, logger, loader_id: &bpf_loader::id(), + enforce_aligned_host_addrs: true, }; let memory_mapping = MemoryMapping::new::( vec![MemoryRegion { @@ -2165,12 +2567,6 @@ mod tests { &memory_mapping, &mut result, ); - assert_eq!( - Err(EbpfError::UserError(BpfError::SyscallError( - SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) - ))), - result - ); let compute_meter: Rc> = Rc::new(RefCell::new(MockComputeMeter { @@ -2178,11 +2574,10 @@ mod tests { })); let logger: Rc> = Rc::new(RefCell::new(MockLogger { log })); let mut syscall_sol_log = SyscallLog { - per_byte_cost: true, - cost: 1, compute_meter, logger, loader_id: &bpf_loader::id(), + enforce_aligned_host_addrs: true, }; let mut result: Result> = Ok(0); syscall_sol_log.call( @@ -2252,6 +2647,7 @@ mod tests { compute_meter, logger, loader_id: &bpf_loader::id(), + enforce_aligned_host_addrs: true, }; let memory_mapping = MemoryMapping::new::( vec![MemoryRegion { @@ -2298,9 +2694,14 @@ mod tests { fn test_syscall_sol_alloc_free() { // large alloc { - let heap = vec![0_u8; 100]; + let heap = AlignedMemory::new_with_size(100, HOST_ALIGN); let memory_mapping = MemoryMapping::new::( - vec![MemoryRegion::new_from_slice(&heap, MM_HEAP_START, 0, true)], + vec![MemoryRegion::new_from_slice( + heap.as_slice(), + MM_HEAP_START, + 0, + true, + )], &DEFAULT_CONFIG, ) .unwrap(); @@ -2320,9 +2721,14 @@ mod tests { } // many small unaligned allocs { - let heap = vec![0_u8; 100]; + let heap = AlignedMemory::new_with_size(100, HOST_ALIGN); let memory_mapping = MemoryMapping::new::( - vec![MemoryRegion::new_from_slice(&heap, MM_HEAP_START, 0, true)], + vec![MemoryRegion::new_from_slice( + heap.as_slice(), + MM_HEAP_START, + 0, + true, + )], &DEFAULT_CONFIG, ) .unwrap(); @@ -2341,9 +2747,14 @@ mod tests { } // many small aligned allocs { - let heap = vec![0_u8; 100]; + let heap = AlignedMemory::new_with_size(100, HOST_ALIGN); let memory_mapping = MemoryMapping::new::( - vec![MemoryRegion::new_from_slice(&heap, MM_HEAP_START, 0, true)], + vec![MemoryRegion::new_from_slice( + heap.as_slice(), + MM_HEAP_START, + 0, + true, + )], &DEFAULT_CONFIG, ) .unwrap(); @@ -2363,9 +2774,14 @@ mod tests { // aligned allocs fn check_alignment() { - let heap = vec![0_u8; 100]; + let heap = AlignedMemory::new_with_size(100, HOST_ALIGN); let memory_mapping = MemoryMapping::new::( - vec![MemoryRegion::new_from_slice(&heap, MM_HEAP_START, 0, true)], + vec![MemoryRegion::new_from_slice( + heap.as_slice(), + MM_HEAP_START, + 0, + true, + )], &DEFAULT_CONFIG, ) .unwrap(); @@ -2399,6 +2815,7 @@ mod tests { let bytes1 = "Gaggablaghblagh!"; let bytes2 = "flurbos"; + #[allow(dead_code)] struct MockSlice { pub addr: u64, pub len: usize, @@ -2459,6 +2876,7 @@ mod tests { sha256_byte_cost: 2, compute_meter, loader_id: &bpf_loader_deprecated::id(), + enforce_aligned_host_addrs: true, }; let mut result: Result> = Ok(0); @@ -2509,4 +2927,179 @@ mod tests { result ); } + + #[test] + fn test_syscall_get_sysvar() { + // Test clock sysvar + { + let got_clock = Clock::default(); + let got_clock_va = 2048; + + let memory_mapping = MemoryMapping::new::( + vec![MemoryRegion { + host_addr: &got_clock as *const _ as u64, + vm_addr: got_clock_va, + len: size_of::() as u64, + vm_gap_shift: 63, + is_writable: true, + }], + &DEFAULT_CONFIG, + ) + .unwrap(); + + let src_clock = Clock { + slot: 1, + epoch_start_timestamp: 2, + epoch: 3, + leader_schedule_epoch: 4, + unix_timestamp: 5, + }; + let mut invoke_context = MockInvokeContext::default(); + let mut data = vec![]; + bincode::serialize_into(&mut data, &src_clock).unwrap(); + invoke_context + .sysvars + .push((sysvar::clock::id(), Some(Rc::new(data)))); + + let mut syscall = SyscallGetClockSysvar { + invoke_context: Rc::new(RefCell::new(&mut invoke_context)), + loader_id: &bpf_loader::id(), + }; + let mut result: Result> = Ok(0); + + syscall.call(got_clock_va, 0, 0, 0, 0, &memory_mapping, &mut result); + result.unwrap(); + assert_eq!(got_clock, src_clock); + } + + // Test epoch_schedule sysvar + { + let got_epochschedule = EpochSchedule::default(); + let got_epochschedule_va = 2048; + + let memory_mapping = MemoryMapping::new::( + vec![MemoryRegion { + host_addr: &got_epochschedule as *const _ as u64, + vm_addr: got_epochschedule_va, + len: size_of::() as u64, + vm_gap_shift: 63, + is_writable: true, + }], + &DEFAULT_CONFIG, + ) + .unwrap(); + + let src_epochschedule = EpochSchedule { + slots_per_epoch: 1, + leader_schedule_slot_offset: 2, + warmup: false, + first_normal_epoch: 3, + first_normal_slot: 4, + }; + let mut invoke_context = MockInvokeContext::default(); + let mut data = vec![]; + bincode::serialize_into(&mut data, &src_epochschedule).unwrap(); + invoke_context + .sysvars + .push((sysvar::epoch_schedule::id(), Some(Rc::new(data)))); + + let mut syscall = SyscallGetEpochScheduleSysvar { + invoke_context: Rc::new(RefCell::new(&mut invoke_context)), + loader_id: &bpf_loader::id(), + }; + let mut result: Result> = Ok(0); + + syscall.call( + got_epochschedule_va, + 0, + 0, + 0, + 0, + &memory_mapping, + &mut result, + ); + result.unwrap(); + assert_eq!(got_epochschedule, src_epochschedule); + } + + // Test fees sysvar + { + let got_fees = Fees::default(); + let got_fees_va = 2048; + + let memory_mapping = MemoryMapping::new::( + vec![MemoryRegion { + host_addr: &got_fees as *const _ as u64, + vm_addr: got_fees_va, + len: size_of::() as u64, + vm_gap_shift: 63, + is_writable: true, + }], + &DEFAULT_CONFIG, + ) + .unwrap(); + + let src_fees = Fees { + fee_calculator: FeeCalculator { + lamports_per_signature: 1, + }, + }; + let mut invoke_context = MockInvokeContext::default(); + let mut data = vec![]; + bincode::serialize_into(&mut data, &src_fees).unwrap(); + invoke_context + .sysvars + .push((sysvar::fees::id(), Some(Rc::new(data)))); + + let mut syscall = SyscallGetFeesSysvar { + invoke_context: Rc::new(RefCell::new(&mut invoke_context)), + loader_id: &bpf_loader::id(), + }; + let mut result: Result> = Ok(0); + + syscall.call(got_fees_va, 0, 0, 0, 0, &memory_mapping, &mut result); + result.unwrap(); + assert_eq!(got_fees, src_fees); + } + + // Test rent sysvar + { + let got_rent = Rent::default(); + let got_rent_va = 2048; + + let memory_mapping = MemoryMapping::new::( + vec![MemoryRegion { + host_addr: &got_rent as *const _ as u64, + vm_addr: got_rent_va, + len: size_of::() as u64, + vm_gap_shift: 63, + is_writable: true, + }], + &DEFAULT_CONFIG, + ) + .unwrap(); + + let src_rent = Rent { + lamports_per_byte_year: 1, + exemption_threshold: 2.0, + burn_percent: 3, + }; + let mut invoke_context = MockInvokeContext::default(); + let mut data = vec![]; + bincode::serialize_into(&mut data, &src_rent).unwrap(); + invoke_context + .sysvars + .push((sysvar::rent::id(), Some(Rc::new(data)))); + + let mut syscall = SyscallGetRentSysvar { + invoke_context: Rc::new(RefCell::new(&mut invoke_context)), + loader_id: &bpf_loader::id(), + }; + let mut result: Result> = Ok(0); + + syscall.call(got_rent_va, 0, 0, 0, 0, &memory_mapping, &mut result); + result.unwrap(); + assert_eq!(got_rent, src_rent); + } + } } diff --git a/programs/budget/Cargo.toml b/programs/budget/Cargo.toml index af285b44af..a1a05ba58e 100644 --- a/programs/budget/Cargo.toml +++ b/programs/budget/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-budget-program" -version = "1.5.19" +version = "1.6.14" description = "Solana Budget program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -15,13 +15,13 @@ chrono = { version = "0.4.11", features = ["serde"] } log = "0.4.11" num-derive = "0.3" num-traits = "0.2" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" -solana-sdk = { path = "../../sdk", version = "=1.5.19" } +solana-sdk = { path = "../../sdk", version = "=1.6.14" } thiserror = "1.0" [dev-dependencies] -solana-runtime = { path = "../../runtime", version = "=1.5.19" } +solana-runtime = { path = "../../runtime", version = "=1.6.14" } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/budget/src/budget_instruction.rs b/programs/budget/src/budget_instruction.rs index fc4a83ae91..e61bdfcce0 100644 --- a/programs/budget/src/budget_instruction.rs +++ b/programs/budget/src/budget_instruction.rs @@ -47,7 +47,7 @@ fn initialize_account(contract: &Pubkey, expr: BudgetExpr) -> Instruction { keys.push(AccountMeta::new(payment.to, false)); } keys.push(AccountMeta::new(*contract, false)); - Instruction::new( + Instruction::new_with_bincode( id(), &BudgetInstruction::InitializeAccount(Box::new(expr)), keys, @@ -136,7 +136,7 @@ pub fn apply_timestamp( if from != to { account_metas.push(AccountMeta::new(*to, false)); } - Instruction::new(id(), &BudgetInstruction::ApplyTimestamp(dt), account_metas) + Instruction::new_with_bincode(id(), &BudgetInstruction::ApplyTimestamp(dt), account_metas) } pub fn apply_signature(from: &Pubkey, contract: &Pubkey, to: &Pubkey) -> Instruction { @@ -147,7 +147,7 @@ pub fn apply_signature(from: &Pubkey, contract: &Pubkey, to: &Pubkey) -> Instruc if from != to { account_metas.push(AccountMeta::new(*to, false)); } - Instruction::new(id(), &BudgetInstruction::ApplySignature, account_metas) + Instruction::new_with_bincode(id(), &BudgetInstruction::ApplySignature, account_metas) } /// Apply account data to a contract waiting on an AccountData witness. @@ -157,7 +157,7 @@ pub fn apply_account_data(witness_pubkey: &Pubkey, contract: &Pubkey, to: &Pubke AccountMeta::new(*contract, false), AccountMeta::new(*to, false), ]; - Instruction::new(id(), &BudgetInstruction::ApplyAccountData, account_metas) + Instruction::new_with_bincode(id(), &BudgetInstruction::ApplyAccountData, account_metas) } #[cfg(test)] diff --git a/programs/budget/src/budget_processor.rs b/programs/budget/src/budget_processor.rs index df32694924..9b0dc96335 100644 --- a/programs/budget/src/budget_processor.rs +++ b/programs/budget/src/budget_processor.rs @@ -7,6 +7,7 @@ use crate::{ use chrono::prelude::{DateTime, Utc}; use log::*; use solana_sdk::{ + account::{ReadableAccount, WritableAccount}, hash::hash, instruction::InstructionError, keyed_account::{next_keyed_account, KeyedAccount}, @@ -95,7 +96,7 @@ fn apply_account_data( if let Some(ref mut expr) = budget_state.pending_budget { let key = witness_keyed_account.unsigned_key(); let program_id = witness_keyed_account.owner()?; - let actual_hash = hash(&witness_keyed_account.try_account_ref()?.data); + let actual_hash = hash(&witness_keyed_account.try_account_ref()?.data()); expr.apply_witness(&Witness::AccountData(actual_hash, program_id), key); final_payment = expr.final_payment(); } @@ -136,7 +137,8 @@ pub fn process_instruction( return Ok(()); } let existing = - BudgetState::deserialize(&contract_keyed_account.try_account_ref_mut()?.data).ok(); + BudgetState::deserialize(&contract_keyed_account.try_account_ref_mut()?.data()) + .ok(); if Some(true) == existing.map(|x| x.initialized) { trace!("contract already exists"); return Err(InstructionError::AccountAlreadyInitialized); @@ -145,13 +147,17 @@ pub fn process_instruction( pending_budget: Some(*expr), initialized: true, }; - budget_state.serialize(&mut contract_keyed_account.try_account_ref_mut()?.data) + budget_state.serialize( + &mut contract_keyed_account + .try_account_ref_mut()? + .data_as_mut_slice(), + ) } BudgetInstruction::ApplyTimestamp(dt) => { let witness_keyed_account = next_keyed_account(keyed_accounts_iter)?; let contract_keyed_account = next_keyed_account(keyed_accounts_iter)?; let mut budget_state = - BudgetState::deserialize(&contract_keyed_account.try_account_ref()?.data)?; + BudgetState::deserialize(&contract_keyed_account.try_account_ref()?.data())?; if !budget_state.is_pending() { return Ok(()); // Nothing to do here. } @@ -171,13 +177,17 @@ pub fn process_instruction( dt, )?; trace!("apply timestamp committed"); - budget_state.serialize(&mut contract_keyed_account.try_account_ref_mut()?.data) + budget_state.serialize( + &mut contract_keyed_account + .try_account_ref_mut()? + .data_as_mut_slice(), + ) } BudgetInstruction::ApplySignature => { let witness_keyed_account = next_keyed_account(keyed_accounts_iter)?; let contract_keyed_account = next_keyed_account(keyed_accounts_iter)?; let mut budget_state = - BudgetState::deserialize(&contract_keyed_account.try_account_ref()?.data)?; + BudgetState::deserialize(&contract_keyed_account.try_account_ref()?.data())?; if !budget_state.is_pending() { return Ok(()); // Nothing to do here. } @@ -196,13 +206,17 @@ pub fn process_instruction( next_keyed_account(keyed_accounts_iter), )?; trace!("apply signature committed"); - budget_state.serialize(&mut contract_keyed_account.try_account_ref_mut()?.data) + budget_state.serialize( + &mut contract_keyed_account + .try_account_ref_mut()? + .data_as_mut_slice(), + ) } BudgetInstruction::ApplyAccountData => { let witness_keyed_account = next_keyed_account(keyed_accounts_iter)?; let contract_keyed_account = next_keyed_account(keyed_accounts_iter)?; let mut budget_state = - BudgetState::deserialize(&contract_keyed_account.try_account_ref()?.data)?; + BudgetState::deserialize(&contract_keyed_account.try_account_ref()?.data())?; if !budget_state.is_pending() { return Ok(()); // Nothing to do here. } @@ -217,7 +231,11 @@ pub fn process_instruction( next_keyed_account(keyed_accounts_iter), )?; trace!("apply account data committed"); - budget_state.serialize(&mut contract_keyed_account.try_account_ref_mut()?.data) + budget_state.serialize( + &mut contract_keyed_account + .try_account_ref_mut()? + .data_as_mut_slice(), + ) } } } @@ -229,7 +247,7 @@ mod tests { use crate::id; use solana_runtime::bank::Bank; use solana_runtime::bank_client::BankClient; - use solana_sdk::account::Account; + use solana_sdk::account::{Account, AccountSharedData}; use solana_sdk::client::SyncClient; use solana_sdk::genesis_config::create_genesis_config; use solana_sdk::hash::hash; @@ -522,13 +540,16 @@ mod tests { fn test_pay_when_account_data() { let (bank, alice_keypair) = create_bank(42); let game_pubkey = solana_sdk::pubkey::new_rand(); - let game_account = Account { + let game_account = AccountSharedData::from(Account { lamports: 1, data: vec![1, 2, 3], ..Account::default() - }; + }); bank.store_account(&game_pubkey, &game_account); - assert_eq!(bank.get_account(&game_pubkey).unwrap().data, vec![1, 2, 3]); + assert_eq!( + bank.get_account(&game_pubkey).unwrap().data(), + &vec![1, 2, 3] + ); let bank_client = BankClient::new(bank); diff --git a/programs/config/Cargo.toml b/programs/config/Cargo.toml index b130c42e90..c02ca0c38a 100644 --- a/programs/config/Cargo.toml +++ b/programs/config/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-config-program" -version = "1.5.19" +version = "1.6.14" description = "Solana Config program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,12 +14,12 @@ bincode = "1.3.1" chrono = { version = "0.4.11", features = ["serde"] } log = "0.4.11" rand_core = "0.6.2" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" -solana-sdk = { path = "../../sdk", version = "=1.5.19" } +solana-sdk = { path = "../../sdk", version = "=1.6.14" } [dev-dependencies] -solana-logger = { path = "../../logger", version = "=1.5.19" } +solana-logger = { path = "../../logger", version = "=1.6.14" } [lib] crate-type = ["lib"] diff --git a/programs/config/src/config_instruction.rs b/programs/config/src/config_instruction.rs index f6ac6b5508..b1869b5d4f 100644 --- a/programs/config/src/config_instruction.rs +++ b/programs/config/src/config_instruction.rs @@ -9,7 +9,7 @@ use solana_sdk::{ fn initialize_account(config_pubkey: &Pubkey) -> Instruction { let account_metas = vec![AccountMeta::new(*config_pubkey, true)]; let account_data = (ConfigKeys { keys: vec![] }, T::default()); - Instruction::new(id(), &account_data, account_metas) + Instruction::new_with_bincode(id(), &account_data, account_metas) } /// Create a new, empty configuration account @@ -46,5 +46,5 @@ pub fn store( } } let account_data = (ConfigKeys { keys }, data); - Instruction::new(id(), &account_data, account_metas) + Instruction::new_with_bincode(id(), &account_data, account_metas) } diff --git a/programs/config/src/config_processor.rs b/programs/config/src/config_processor.rs index b56b89ca23..105cf8a36e 100644 --- a/programs/config/src/config_processor.rs +++ b/programs/config/src/config_processor.rs @@ -3,6 +3,7 @@ use crate::ConfigKeys; use bincode::deserialize; use solana_sdk::{ + account::{ReadableAccount, WritableAccount}, feature_set, ic_msg, instruction::InstructionError, keyed_account::{next_keyed_account, KeyedAccount}, @@ -29,7 +30,7 @@ pub fn process_instruction( return Err(InstructionError::InvalidAccountOwner); } - deserialize(&config_account.data).map_err(|err| { + deserialize(&config_account.data()).map_err(|err| { ic_msg!( invoke_context, "Unable to deserialize config account: {}", @@ -85,10 +86,7 @@ pub fn process_instruction( } // If Config account is already initialized, update signatures must match Config data if !current_data.keys.is_empty() - && current_signer_keys - .iter() - .find(|&pubkey| pubkey == signer) - .is_none() + && !current_signer_keys.iter().any(|pubkey| pubkey == signer) { ic_msg!( invoke_context, @@ -119,7 +117,10 @@ pub fn process_instruction( return Err(InstructionError::InvalidInstructionData); } - config_keyed_account.try_account_ref_mut()?.data[..data.len()].copy_from_slice(&data); + config_keyed_account + .try_account_ref_mut()? + .data_as_mut_slice()[..data.len()] + .copy_from_slice(&data); Ok(()) } @@ -130,7 +131,7 @@ mod tests { use bincode::serialized_size; use serde_derive::{Deserialize, Serialize}; use solana_sdk::{ - account::Account, + account::{Account, AccountSharedData}, keyed_account::create_keyed_is_signer_accounts, process_instruction::MockInvokeContext, signature::{Keypair, Signer}, @@ -162,7 +163,7 @@ mod tests { } } - fn create_config_account(keys: Vec<(Pubkey, bool)>) -> (Keypair, RefCell) { + fn create_config_account(keys: Vec<(Pubkey, bool)>) -> (Keypair, RefCell) { let from_pubkey = solana_sdk::pubkey::new_rand(); let config_keypair = Keypair::new(); let config_pubkey = config_keypair.pubkey(); @@ -179,11 +180,11 @@ mod tests { } => space, _ => panic!("Not a CreateAccount system instruction"), }; - let config_account = RefCell::new(Account { + let config_account = RefCell::new(AccountSharedData::from(Account { data: vec![0; space as usize], owner: id(), ..Account::default() - }); + })); let accounts = vec![(&config_pubkey, true, &config_account)]; let keyed_accounts = create_keyed_is_signer_accounts(&accounts); assert_eq!( @@ -206,7 +207,7 @@ mod tests { let (_, config_account) = create_config_account(keys); assert_eq!( Some(MyConfig::default()), - deserialize(get_config_data(&config_account.borrow().data).unwrap()).ok() + deserialize(get_config_data(&config_account.borrow().data()).unwrap()).ok() ); } @@ -232,7 +233,7 @@ mod tests { ); assert_eq!( Some(my_config), - deserialize(get_config_data(&config_account.borrow().data).unwrap()).ok() + deserialize(get_config_data(&config_account.borrow().data()).unwrap()).ok() ); } @@ -298,8 +299,8 @@ mod tests { let my_config = MyConfig::new(42); let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config); - let signer0_account = RefCell::new(Account::default()); - let signer1_account = RefCell::new(Account::default()); + let signer0_account = RefCell::new(AccountSharedData::default()); + let signer1_account = RefCell::new(AccountSharedData::default()); let accounts = vec![ (&config_pubkey, true, &config_account), (&signer0_pubkey, true, &signer0_account), @@ -315,11 +316,11 @@ mod tests { ), Ok(()) ); - let meta_data: ConfigKeys = deserialize(&config_account.borrow().data).unwrap(); + let meta_data: ConfigKeys = deserialize(&config_account.borrow().data()).unwrap(); assert_eq!(meta_data.keys, keys); assert_eq!( Some(my_config), - deserialize(get_config_data(&config_account.borrow().data).unwrap()).ok() + deserialize(get_config_data(&config_account.borrow().data()).unwrap()).ok() ); } @@ -334,10 +335,10 @@ mod tests { let my_config = MyConfig::new(42); let instruction = config_instruction::store(&config_pubkey, false, keys, &my_config); - let signer0_account = RefCell::new(Account { + let signer0_account = RefCell::new(AccountSharedData::from(Account { owner: id(), ..Account::default() - }); + })); let accounts = vec![(&signer0_pubkey, true, &signer0_account)]; let keyed_accounts = create_keyed_is_signer_accounts(&accounts); assert_eq!( @@ -356,8 +357,8 @@ mod tests { solana_logger::setup(); let signer0_pubkey = solana_sdk::pubkey::new_rand(); let signer1_pubkey = solana_sdk::pubkey::new_rand(); - let signer0_account = RefCell::new(Account::default()); - let signer1_account = RefCell::new(Account::default()); + let signer0_account = RefCell::new(AccountSharedData::default()); + let signer1_account = RefCell::new(AccountSharedData::default()); let keys = vec![(signer0_pubkey, true)]; let (config_keypair, config_account) = create_config_account(keys.clone()); let config_pubkey = config_keypair.pubkey(); @@ -405,9 +406,9 @@ mod tests { let signer0_pubkey = solana_sdk::pubkey::new_rand(); let signer1_pubkey = solana_sdk::pubkey::new_rand(); let signer2_pubkey = solana_sdk::pubkey::new_rand(); - let signer0_account = RefCell::new(Account::default()); - let signer1_account = RefCell::new(Account::default()); - let signer2_account = RefCell::new(Account::default()); + let signer0_account = RefCell::new(AccountSharedData::default()); + let signer1_account = RefCell::new(AccountSharedData::default()); + let signer2_account = RefCell::new(AccountSharedData::default()); let keys = vec![ (pubkey, false), (signer0_pubkey, true), @@ -453,11 +454,12 @@ mod tests { ), Ok(()) ); - let meta_data: ConfigKeys = deserialize(&config_account.borrow().data).unwrap(); + let meta_data: ConfigKeys = deserialize(&config_account.borrow().data()).unwrap(); assert_eq!(meta_data.keys, keys); assert_eq!( new_config, - MyConfig::deserialize(get_config_data(&config_account.borrow().data).unwrap()).unwrap() + MyConfig::deserialize(get_config_data(&config_account.borrow().data()).unwrap()) + .unwrap() ); // Attempt update with incomplete signatures @@ -508,7 +510,7 @@ mod tests { solana_logger::setup(); let pubkey = solana_sdk::pubkey::new_rand(); let signer0_pubkey = solana_sdk::pubkey::new_rand(); - let signer0_account = RefCell::new(Account::default()); + let signer0_account = RefCell::new(AccountSharedData::default()); let keys = vec![ (pubkey, false), (signer0_pubkey, true), @@ -558,11 +560,12 @@ mod tests { ), Ok(()) ); - let meta_data: ConfigKeys = deserialize(&config_account.borrow().data).unwrap(); + let meta_data: ConfigKeys = deserialize(&config_account.borrow().data()).unwrap(); assert_eq!(meta_data.keys, keys); assert_eq!( new_config, - MyConfig::deserialize(get_config_data(&config_account.borrow().data).unwrap()).unwrap() + MyConfig::deserialize(get_config_data(&config_account.borrow().data()).unwrap()) + .unwrap() ); // Attempt update with incomplete signatures @@ -606,8 +609,8 @@ mod tests { let config_pubkey = solana_sdk::pubkey::new_rand(); let new_config = MyConfig::new(84); let signer0_pubkey = solana_sdk::pubkey::new_rand(); - let signer0_account = RefCell::new(Account::default()); - let config_account = RefCell::new(Account::default()); + let signer0_account = RefCell::new(AccountSharedData::default()); + let config_account = RefCell::new(AccountSharedData::default()); let keys = vec![ (from_pubkey, false), (signer0_pubkey, true), diff --git a/programs/config/src/lib.rs b/programs/config/src/lib.rs index 4521dfbf17..cdf27c61ef 100644 --- a/programs/config/src/lib.rs +++ b/programs/config/src/lib.rs @@ -5,9 +5,13 @@ pub mod date_instruction; use bincode::{deserialize, serialize, serialized_size}; use serde_derive::{Deserialize, Serialize}; -use solana_sdk::{account::Account, pubkey::Pubkey, short_vec}; +use solana_sdk::{ + account::{Account, AccountSharedData}, + pubkey::Pubkey, + short_vec, +}; -solana_sdk::declare_id!("Config1111111111111111111111111111111111111"); +pub use solana_sdk::config::program::id; pub trait ConfigState: serde::Serialize + Default { /// Maximum space that the serialized representation will require @@ -40,13 +44,13 @@ pub fn create_config_account( keys: Vec<(Pubkey, bool)>, config_data: &T, lamports: u64, -) -> Account { +) -> AccountSharedData { let mut data = serialize(&ConfigKeys { keys }).unwrap(); data.extend_from_slice(&serialize(config_data).unwrap()); - Account { + AccountSharedData::from(Account { lamports, data, owner: id(), ..Account::default() - } + }) } diff --git a/programs/exchange/Cargo.toml b/programs/exchange/Cargo.toml index d379dc8222..3d8ac7078c 100644 --- a/programs/exchange/Cargo.toml +++ b/programs/exchange/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-exchange-program" -version = "1.5.19" +version = "1.6.14" description = "Solana Exchange program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,15 +14,15 @@ bincode = "1.3.1" log = "0.4.11" num-derive = { version = "0.3" } num-traits = { version = "0.2" } -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" -solana-logger = { path = "../../logger", version = "=1.5.19" } -solana-metrics = { path = "../../metrics", version = "=1.5.19" } -solana-sdk = { path = "../../sdk", version = "=1.5.19" } +solana-logger = { path = "../../logger", version = "=1.6.14" } +solana-metrics = { path = "../../metrics", version = "=1.6.14" } +solana-sdk = { path = "../../sdk", version = "=1.6.14" } thiserror = "1.0" [dev-dependencies] -solana-runtime = { path = "../../runtime", version = "=1.5.19" } +solana-runtime = { path = "../../runtime", version = "=1.6.14" } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/exchange/src/exchange_instruction.rs b/programs/exchange/src/exchange_instruction.rs index 444fbbb02a..806c112f6a 100644 --- a/programs/exchange/src/exchange_instruction.rs +++ b/programs/exchange/src/exchange_instruction.rs @@ -59,7 +59,7 @@ pub fn account_request(owner: &Pubkey, new: &Pubkey) -> Instruction { AccountMeta::new(*owner, true), AccountMeta::new(*new, false), ]; - Instruction::new(id(), &ExchangeInstruction::AccountRequest, account_metas) + Instruction::new_with_bincode(id(), &ExchangeInstruction::AccountRequest, account_metas) } pub fn transfer_request( @@ -74,7 +74,7 @@ pub fn transfer_request( AccountMeta::new(*to, false), AccountMeta::new(*from, false), ]; - Instruction::new( + Instruction::new_with_bincode( id(), &ExchangeInstruction::TransferRequest(token, tokens), account_metas, @@ -95,7 +95,7 @@ pub fn trade_request( AccountMeta::new(*trade, false), AccountMeta::new(*src_account, false), ]; - Instruction::new( + Instruction::new_with_bincode( id(), &ExchangeInstruction::OrderRequest(OrderRequestInfo { side, @@ -112,7 +112,7 @@ pub fn order_cancellation(owner: &Pubkey, order: &Pubkey) -> Instruction { AccountMeta::new(*owner, true), AccountMeta::new(*order, false), ]; - Instruction::new(id(), &ExchangeInstruction::OrderCancellation, account_metas) + Instruction::new_with_bincode(id(), &ExchangeInstruction::OrderCancellation, account_metas) } pub fn swap_request( @@ -127,5 +127,5 @@ pub fn swap_request( AccountMeta::new(*from_trade, false), AccountMeta::new(*profit_account, false), ]; - Instruction::new(id(), &ExchangeInstruction::SwapRequest, account_metas) + Instruction::new_with_bincode(id(), &ExchangeInstruction::SwapRequest, account_metas) } diff --git a/programs/exchange/src/exchange_processor.rs b/programs/exchange/src/exchange_processor.rs index 2cf146ee72..200126c17f 100644 --- a/programs/exchange/src/exchange_processor.rs +++ b/programs/exchange/src/exchange_processor.rs @@ -8,8 +8,13 @@ use num_derive::{FromPrimitive, ToPrimitive}; use serde_derive::Serialize; use solana_metrics::inc_new_counter_info; use solana_sdk::{ - decode_error::DecodeError, instruction::InstructionError, keyed_account::KeyedAccount, - process_instruction::InvokeContext, program_utils::limited_deserialize, pubkey::Pubkey, + account::{ReadableAccount, WritableAccount}, + decode_error::DecodeError, + instruction::InstructionError, + keyed_account::KeyedAccount, + process_instruction::InvokeContext, + program_utils::limited_deserialize, + pubkey::Pubkey, }; use std::cmp; use thiserror::Error; @@ -188,7 +193,7 @@ impl ExchangeProcessor { error!("Not enough accounts"); return Err(InstructionError::InvalidArgument); } - Self::is_account_unallocated(&keyed_accounts[NEW_ACCOUNT_INDEX].try_account_ref()?.data)?; + Self::is_account_unallocated(&keyed_accounts[NEW_ACCOUNT_INDEX].try_account_ref()?.data())?; Self::serialize( &ExchangeState::Account( TokenAccountInfo::default() @@ -197,7 +202,7 @@ impl ExchangeProcessor { ), &mut keyed_accounts[NEW_ACCOUNT_INDEX] .try_account_ref_mut()? - .data, + .data_as_mut_slice(), ) } @@ -216,13 +221,13 @@ impl ExchangeProcessor { } let mut to_account = - Self::deserialize_account(&keyed_accounts[TO_ACCOUNT_INDEX].try_account_ref()?.data)?; + Self::deserialize_account(&keyed_accounts[TO_ACCOUNT_INDEX].try_account_ref()?.data())?; if &faucet::id() == keyed_accounts[FROM_ACCOUNT_INDEX].unsigned_key() { to_account.tokens[token] += tokens; } else { let state: ExchangeState = - bincode::deserialize(&keyed_accounts[FROM_ACCOUNT_INDEX].try_account_ref()?.data) + bincode::deserialize(&keyed_accounts[FROM_ACCOUNT_INDEX].try_account_ref()?.data()) .map_err(Self::map_to_invalid_arg)?; match state { ExchangeState::Account(mut from_account) => { @@ -243,7 +248,7 @@ impl ExchangeProcessor { &ExchangeState::Account(from_account), &mut keyed_accounts[FROM_ACCOUNT_INDEX] .try_account_ref_mut()? - .data, + .data_as_mut_slice(), )?; } ExchangeState::Trade(mut from_trade) => { @@ -273,7 +278,7 @@ impl ExchangeProcessor { &ExchangeState::Trade(from_trade), &mut keyed_accounts[FROM_ACCOUNT_INDEX] .try_account_ref_mut()? - .data, + .data_as_mut_slice(), )?; } _ => { @@ -285,7 +290,9 @@ impl ExchangeProcessor { Self::serialize( &ExchangeState::Account(to_account), - &mut keyed_accounts[TO_ACCOUNT_INDEX].try_account_ref_mut()?.data, + &mut keyed_accounts[TO_ACCOUNT_INDEX] + .try_account_ref_mut()? + .data_as_mut_slice(), ) } @@ -302,10 +309,11 @@ impl ExchangeProcessor { return Err(InstructionError::InvalidArgument); } - Self::is_account_unallocated(&keyed_accounts[ORDER_INDEX].try_account_ref()?.data)?; + Self::is_account_unallocated(&keyed_accounts[ORDER_INDEX].try_account_ref()?.data())?; - let mut account = - Self::deserialize_account(&keyed_accounts[ACCOUNT_INDEX].try_account_ref_mut()?.data)?; + let mut account = Self::deserialize_account( + &keyed_accounts[ACCOUNT_INDEX].try_account_ref_mut()?.data(), + )?; if &account.owner != keyed_accounts[OWNER_INDEX].unsigned_key() { error!("Signer does not own account"); @@ -338,11 +346,15 @@ impl ExchangeProcessor { price: info.price, tokens_settled: 0, }), - &mut keyed_accounts[ORDER_INDEX].try_account_ref_mut()?.data, + &mut keyed_accounts[ORDER_INDEX] + .try_account_ref_mut()? + .data_as_mut_slice(), )?; Self::serialize( &ExchangeState::Account(account), - &mut keyed_accounts[ACCOUNT_INDEX].try_account_ref_mut()?.data, + &mut keyed_accounts[ACCOUNT_INDEX] + .try_account_ref_mut()? + .data_as_mut_slice(), ) } @@ -355,7 +367,8 @@ impl ExchangeProcessor { return Err(InstructionError::InvalidArgument); } - let order = Self::deserialize_order(&keyed_accounts[ORDER_INDEX].try_account_ref()?.data)?; + let order = + Self::deserialize_order(&keyed_accounts[ORDER_INDEX].try_account_ref()?.data())?; if &order.owner != keyed_accounts[OWNER_INDEX].unsigned_key() { error!("Signer does not own order"); @@ -374,7 +387,9 @@ impl ExchangeProcessor { // Turn trade order into a token account Self::serialize( &ExchangeState::Account(account), - &mut keyed_accounts[ORDER_INDEX].try_account_ref_mut()?.data, + &mut keyed_accounts[ORDER_INDEX] + .try_account_ref_mut()? + .data_as_mut_slice(), ) } @@ -389,11 +404,13 @@ impl ExchangeProcessor { } let mut to_order = - Self::deserialize_order(&keyed_accounts[TO_ORDER_INDEX].try_account_ref()?.data)?; + Self::deserialize_order(&keyed_accounts[TO_ORDER_INDEX].try_account_ref()?.data())?; let mut from_order = - Self::deserialize_order(&keyed_accounts[FROM_ORDER_INDEX].try_account_ref()?.data)?; + Self::deserialize_order(&keyed_accounts[FROM_ORDER_INDEX].try_account_ref()?.data())?; let mut profit_account = Self::deserialize_account( - &keyed_accounts[PROFIT_ACCOUNT_INDEX].try_account_ref()?.data, + &keyed_accounts[PROFIT_ACCOUNT_INDEX] + .try_account_ref()? + .data(), )?; if to_order.side != OrderSide::Ask { @@ -429,12 +446,16 @@ impl ExchangeProcessor { // Turn into token account Self::serialize( &ExchangeState::Account(Self::trade_to_token_account(&from_order)), - &mut keyed_accounts[TO_ORDER_INDEX].try_account_ref_mut()?.data, + &mut keyed_accounts[TO_ORDER_INDEX] + .try_account_ref_mut()? + .data_as_mut_slice(), )?; } else { Self::serialize( &ExchangeState::Trade(to_order), - &mut keyed_accounts[TO_ORDER_INDEX].try_account_ref_mut()?.data, + &mut keyed_accounts[TO_ORDER_INDEX] + .try_account_ref_mut()? + .data_as_mut_slice(), )?; } @@ -442,12 +463,16 @@ impl ExchangeProcessor { // Turn into token account Self::serialize( &ExchangeState::Account(Self::trade_to_token_account(&from_order)), - &mut keyed_accounts[FROM_ORDER_INDEX].try_account_ref_mut()?.data, + &mut keyed_accounts[FROM_ORDER_INDEX] + .try_account_ref_mut()? + .data_as_mut_slice(), )?; } else { Self::serialize( &ExchangeState::Trade(from_order), - &mut keyed_accounts[FROM_ORDER_INDEX].try_account_ref_mut()?.data, + &mut keyed_accounts[FROM_ORDER_INDEX] + .try_account_ref_mut()? + .data_as_mut_slice(), )?; } @@ -455,7 +480,7 @@ impl ExchangeProcessor { &ExchangeState::Account(profit_account), &mut keyed_accounts[PROFIT_ACCOUNT_INDEX] .try_account_ref_mut()? - .data, + .data_as_mut_slice(), ) } } diff --git a/programs/failure/Cargo.toml b/programs/failure/Cargo.toml index c929eb1891..85f02fccee 100644 --- a/programs/failure/Cargo.toml +++ b/programs/failure/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-failure-program" -version = "1.5.19" +version = "1.6.14" description = "Solana failure program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,10 +10,10 @@ documentation = "https://docs.rs/solana-failure-program" edition = "2018" [dependencies] -solana-sdk = { path = "../../sdk", version = "=1.5.19" } +solana-sdk = { path = "../../sdk", version = "=1.6.14" } [dev-dependencies] -solana-runtime = { path = "../../runtime", version = "=1.5.19" } +solana-runtime = { path = "../../runtime", version = "=1.6.14" } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/noop/Cargo.toml b/programs/noop/Cargo.toml index eda1b45461..92c2596710 100644 --- a/programs/noop/Cargo.toml +++ b/programs/noop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-noop-program" -version = "1.5.19" +version = "1.6.14" description = "Solana Noop program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,8 +11,8 @@ edition = "2018" [dependencies] log = "0.4.11" -solana-logger = { path = "../../logger", version = "=1.5.19" } -solana-sdk = { path = "../../sdk", version = "=1.5.19" } +solana-logger = { path = "../../logger", version = "=1.6.14" } +solana-sdk = { path = "../../sdk", version = "=1.6.14" } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/ownable/Cargo.toml b/programs/ownable/Cargo.toml index 7eec425b30..e66dd16333 100644 --- a/programs/ownable/Cargo.toml +++ b/programs/ownable/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-ownable" -version = "1.5.19" +version = "1.6.14" description = "ownable program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,13 +11,13 @@ edition = "2018" [dependencies] bincode = "1.3.1" -solana-sdk = { path = "../../sdk", version = "=1.5.19" } +solana-sdk = { path = "../../sdk", version = "=1.6.14" } num-derive = "0.3" num-traits = "0.2" thiserror = "1.0" [dev-dependencies] -solana-runtime = { path = "../../runtime", version = "=1.5.19" } +solana-runtime = { path = "../../runtime", version = "=1.6.14" } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/ownable/src/ownable_instruction.rs b/programs/ownable/src/ownable_instruction.rs index 48095e1f06..2e4cb1c5b0 100644 --- a/programs/ownable/src/ownable_instruction.rs +++ b/programs/ownable/src/ownable_instruction.rs @@ -21,7 +21,7 @@ impl DecodeError for OwnableError { fn initialize_account(account_pubkey: &Pubkey, owner_pubkey: &Pubkey) -> Instruction { let keys = vec![AccountMeta::new(*account_pubkey, false)]; - Instruction::new(crate::id(), &owner_pubkey, keys) + Instruction::new_with_bincode(crate::id(), &owner_pubkey, keys) } pub fn create_account( @@ -48,5 +48,5 @@ pub fn set_owner(account_pubkey: &Pubkey, old_pubkey: &Pubkey, new_pubkey: &Pubk AccountMeta::new(*account_pubkey, false), AccountMeta::new(*old_pubkey, true), ]; - Instruction::new(crate::id(), &new_pubkey, keys) + Instruction::new_with_bincode(crate::id(), &new_pubkey, keys) } diff --git a/programs/ownable/src/ownable_processor.rs b/programs/ownable/src/ownable_processor.rs index ecfda56da1..b5be6e6abe 100644 --- a/programs/ownable/src/ownable_processor.rs +++ b/programs/ownable/src/ownable_processor.rs @@ -3,6 +3,7 @@ use crate::ownable_instruction::OwnableError; use bincode::serialize_into; use solana_sdk::{ + account::{ReadableAccount, WritableAccount}, instruction::InstructionError, keyed_account::{next_keyed_account, KeyedAccount}, process_instruction::InvokeContext, @@ -37,7 +38,7 @@ pub fn process_instruction( let keyed_accounts_iter = &mut keyed_accounts.iter(); let account_keyed_account = &mut next_keyed_account(keyed_accounts_iter)?; let mut account_owner_pubkey: Pubkey = - limited_deserialize(&account_keyed_account.try_account_ref()?.data)?; + limited_deserialize(&account_keyed_account.try_account_ref()?.data())?; if account_owner_pubkey == Pubkey::default() { account_owner_pubkey = new_owner_pubkey; @@ -51,7 +52,7 @@ pub fn process_instruction( } let mut account = account_keyed_account.try_account_ref_mut()?; - serialize_into(&mut account.data[..], &account_owner_pubkey) + serialize_into(account.data_as_mut_slice(), &account_owner_pubkey) .map_err(|_| InstructionError::AccountDataTooSmall) } @@ -61,7 +62,7 @@ mod tests { use crate::ownable_instruction; use solana_runtime::{bank::Bank, bank_client::BankClient}; use solana_sdk::{ - account::Account, + account::AccountSharedData, client::SyncClient, genesis_config::create_genesis_config, message::Message, @@ -156,7 +157,7 @@ mod tests { let mut account_owner_pubkey = solana_sdk::pubkey::new_rand(); let owner_pubkey = account_owner_pubkey; let new_owner_pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new_ref(1, 0, &system_program::id()); + let account = AccountSharedData::new_ref(1, 0, &system_program::id()); let owner_keyed_account = KeyedAccount::new(&owner_pubkey, false, &account); // <-- Attack! Setting owner without the original owner's signature. let err = set_owner( &mut account_owner_pubkey, @@ -171,7 +172,7 @@ mod tests { fn test_ownable_incorrect_owner() { let mut account_owner_pubkey = solana_sdk::pubkey::new_rand(); let new_owner_pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new_ref(1, 0, &system_program::id()); + let account = AccountSharedData::new_ref(1, 0, &system_program::id()); let mallory_pubkey = solana_sdk::pubkey::new_rand(); // <-- Attack! Signing with wrong pubkey let owner_keyed_account = KeyedAccount::new(&mallory_pubkey, true, &account); let err = set_owner( diff --git a/programs/secp256k1/Cargo.toml b/programs/secp256k1/Cargo.toml index aec79332bb..6007cdad0e 100644 --- a/programs/secp256k1/Cargo.toml +++ b/programs/secp256k1/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-secp256k1-program" description = "Solana Secp256k1 program" -version = "1.5.19" +version = "1.6.14" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-secp256k1-program" repository = "https://github.com/solana-labs/solana" @@ -10,13 +10,13 @@ license = "Apache-2.0" edition = "2018" [dependencies] -solana-sdk = { path = "../../sdk", version = "=1.5.19" } +solana-sdk = { path = "../../sdk", version = "=1.6.14" } libsecp256k1 = "0.3.5" sha3 = "0.9.1" digest = "0.9.0" bincode = "1.3.1" rand = "0.7.0" -solana-logger = { path = "../../logger", version = "=1.5.19" } +solana-logger = { path = "../../logger", version = "=1.6.14" } [lib] crate-type = ["lib"] diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index 32596c6bb6..d06a9eb861 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-stake-program" -version = "1.5.19" +version = "1.6.14" description = "Solana Stake program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,18 +14,18 @@ bincode = "1.3.1" log = "0.4.11" num-derive = "0.3" num-traits = "0.2" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" -solana-frozen-abi = { path = "../../frozen-abi", version = "=1.5.19" } -solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.5.19" } -solana-metrics = { path = "../../metrics", version = "=1.5.19" } -solana-sdk = { path = "../../sdk", version = "=1.5.19" } -solana-vote-program = { path = "../vote", version = "=1.5.19" } -solana-config-program = { path = "../config", version = "=1.5.19" } +solana-frozen-abi = { path = "../../frozen-abi", version = "=1.6.14" } +solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.6.14" } +solana-metrics = { path = "../../metrics", version = "=1.6.14" } +solana-sdk = { path = "../../sdk", version = "=1.6.14" } +solana-vote-program = { path = "../vote", version = "=1.6.14" } +solana-config-program = { path = "../config", version = "=1.6.14" } thiserror = "1.0" [dev-dependencies] -solana-logger = { path = "../../logger", version = "=1.5.19" } +solana-logger = { path = "../../logger", version = "=1.6.14" } [build-dependencies] rustc_version = "0.2" diff --git a/programs/stake/src/config.rs b/programs/stake/src/config.rs index 35d2a53db8..9ca3fc0f66 100644 --- a/programs/stake/src/config.rs +++ b/programs/stake/src/config.rs @@ -4,12 +4,14 @@ use bincode::{deserialize, serialized_size}; use serde_derive::{Deserialize, Serialize}; use solana_config_program::{create_config_account, get_config_data, ConfigState}; use solana_sdk::{ - account::Account, genesis_config::GenesisConfig, instruction::InstructionError, + account::{AccountSharedData, ReadableAccount}, + genesis_config::GenesisConfig, + instruction::InstructionError, keyed_account::KeyedAccount, }; // stake config ID -solana_sdk::declare_id!("StakeConfig11111111111111111111111111111111"); +pub use solana_sdk::stake::config::{check_id, id}; // means that no more than RATE of current effective stake may be added or subtracted per // epoch @@ -25,8 +27,8 @@ pub struct Config { } impl Config { - pub fn from(account: &Account) -> Option { - get_config_data(&account.data) + pub fn from(account: &T) -> Option { + get_config_data(&account.data()) .ok() .and_then(|data| deserialize(data).ok()) } @@ -49,7 +51,7 @@ impl ConfigState for Config { pub fn add_genesis_account(genesis_config: &mut GenesisConfig) -> u64 { let mut account = create_config_account(vec![], &Config::default(), 0); - let lamports = genesis_config.rent.minimum_balance(account.data.len()); + let lamports = genesis_config.rent.minimum_balance(account.data().len()); account.lamports = lamports.max(1); @@ -58,7 +60,7 @@ pub fn add_genesis_account(genesis_config: &mut GenesisConfig) -> u64 { lamports } -pub fn create_account(lamports: u64, config: &Config) -> Account { +pub fn create_account(lamports: u64, config: &Config) -> AccountSharedData { create_config_account(vec![], config, lamports) } diff --git a/programs/stake/src/lib.rs b/programs/stake/src/lib.rs index 23438bf8c8..db7227f2bf 100644 --- a/programs/stake/src/lib.rs +++ b/programs/stake/src/lib.rs @@ -6,7 +6,7 @@ pub mod config; pub mod stake_instruction; pub mod stake_state; -solana_sdk::declare_id!("Stake11111111111111111111111111111111111111"); +pub use solana_sdk::stake::program::{check_id, id}; pub fn add_genesis_accounts(genesis_config: &mut GenesisConfig) -> u64 { config::add_genesis_account(genesis_config) diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index da747cd848..39d6bfb6d2 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -11,7 +11,7 @@ use solana_sdk::{ feature_set, instruction::{AccountMeta, Instruction, InstructionError}, keyed_account::{from_keyed_account, get_signers, next_keyed_account, KeyedAccount}, - process_instruction::InvokeContext, + process_instruction::{get_sysvar, InvokeContext}, program_utils::limited_deserialize, pubkey::Pubkey, system_instruction, @@ -126,9 +126,12 @@ pub enum StakeInstruction { /// Set stake lockup /// + /// If a lockup is not active, the withdraw authority may set a new lockup + /// If a lockup is active, the lockup custodian may update the lockup parameters + /// /// # Account references /// 0. [WRITE] Initialized stake account - /// 1. [SIGNER] Lockup authority + /// 1. [SIGNER] Lockup authority or withdraw authority SetLockup(LockupArgs), /// Merge two stake accounts. @@ -183,8 +186,8 @@ pub struct AuthorizeWithSeedArgs { pub authority_owner: Pubkey, } -fn initialize(stake_pubkey: &Pubkey, authorized: &Authorized, lockup: &Lockup) -> Instruction { - Instruction::new( +pub fn initialize(stake_pubkey: &Pubkey, authorized: &Authorized, lockup: &Lockup) -> Instruction { + Instruction::new_with_bincode( id(), &StakeInstruction::Initialize(*authorized, *lockup), vec![ @@ -248,7 +251,7 @@ fn _split( AccountMeta::new_readonly(*authorized_pubkey, true), ]; - Instruction::new(id(), &StakeInstruction::Split(lamports), account_metas) + Instruction::new_with_bincode(id(), &StakeInstruction::Split(lamports), account_metas) } pub fn split( @@ -258,13 +261,8 @@ pub fn split( split_stake_pubkey: &Pubkey, ) -> Vec { vec![ - system_instruction::create_account( - authorized_pubkey, // Sending 0, so any signer will suffice - split_stake_pubkey, - 0, - std::mem::size_of::() as u64, - &id(), - ), + system_instruction::allocate(split_stake_pubkey, std::mem::size_of::() as u64), + system_instruction::assign(split_stake_pubkey, &id()), _split( stake_pubkey, authorized_pubkey, @@ -283,12 +281,10 @@ pub fn split_with_seed( seed: &str, // seed ) -> Vec { vec![ - system_instruction::create_account_with_seed( - authorized_pubkey, // Sending 0, so any signer will suffice + system_instruction::allocate_with_seed( split_stake_pubkey, base, seed, - 0, std::mem::size_of::() as u64, &id(), ), @@ -314,7 +310,7 @@ pub fn merge( AccountMeta::new_readonly(*authorized_pubkey, true), ]; - vec![Instruction::new( + vec![Instruction::new_with_bincode( id(), &StakeInstruction::Merge, account_metas, @@ -382,7 +378,7 @@ pub fn authorize( account_metas.push(AccountMeta::new_readonly(*custodian_pubkey, true)); } - Instruction::new( + Instruction::new_with_bincode( id(), &StakeInstruction::Authorize(*new_authorized_pubkey, stake_authorize), account_metas, @@ -415,7 +411,7 @@ pub fn authorize_with_seed( authority_owner: *authority_owner, }; - Instruction::new( + Instruction::new_with_bincode( id(), &StakeInstruction::AuthorizeWithSeed(args), account_metas, @@ -435,7 +431,7 @@ pub fn delegate_stake( AccountMeta::new_readonly(crate::config::id(), false), AccountMeta::new_readonly(*authorized_pubkey, true), ]; - Instruction::new(id(), &StakeInstruction::DelegateStake, account_metas) + Instruction::new_with_bincode(id(), &StakeInstruction::DelegateStake, account_metas) } pub fn withdraw( @@ -457,7 +453,7 @@ pub fn withdraw( account_metas.push(AccountMeta::new_readonly(*custodian_pubkey, true)); } - Instruction::new(id(), &StakeInstruction::Withdraw(lamports), account_metas) + Instruction::new_with_bincode(id(), &StakeInstruction::Withdraw(lamports), account_metas) } pub fn deactivate_stake(stake_pubkey: &Pubkey, authorized_pubkey: &Pubkey) -> Instruction { @@ -466,7 +462,7 @@ pub fn deactivate_stake(stake_pubkey: &Pubkey, authorized_pubkey: &Pubkey) -> In AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(*authorized_pubkey, true), ]; - Instruction::new(id(), &StakeInstruction::Deactivate, account_metas) + Instruction::new_with_bincode(id(), &StakeInstruction::Deactivate, account_metas) } pub fn set_lockup( @@ -478,7 +474,7 @@ pub fn set_lockup( AccountMeta::new(*stake_pubkey, false), AccountMeta::new_readonly(*custodian_pubkey, true), ]; - Instruction::new(id(), &StakeInstruction::SetLockup(*lockup), account_metas) + Instruction::new_with_bincode(id(), &StakeInstruction::SetLockup(*lockup), account_metas) } pub fn process_instruction( @@ -572,6 +568,8 @@ pub fn process_instruction( } } StakeInstruction::DelegateStake => { + let can_reverse_deactivation = + invoke_context.is_feature_active(&feature_set::stake_program_v4::id()); let vote = next_keyed_account(keyed_accounts)?; me.delegate( @@ -580,6 +578,7 @@ pub fn process_instruction( &from_keyed_account::(next_keyed_account(keyed_accounts)?)?, &config::from_keyed_account(next_keyed_account(keyed_accounts)?)?, &signers, + can_reverse_deactivation, ) } StakeInstruction::Split(lamports) => { @@ -588,12 +587,15 @@ pub fn process_instruction( } StakeInstruction::Merge => { let source_stake = &next_keyed_account(keyed_accounts)?; + let can_merge_expired_lockups = + invoke_context.is_feature_active(&feature_set::stake_program_v4::id()); me.merge( invoke_context, source_stake, &from_keyed_account::(next_keyed_account(keyed_accounts)?)?, &from_keyed_account::(next_keyed_account(keyed_accounts)?)?, &signers, + can_merge_expired_lockups, ) } @@ -606,6 +608,7 @@ pub fn process_instruction( &from_keyed_account::(next_keyed_account(keyed_accounts)?)?, next_keyed_account(keyed_accounts)?, keyed_accounts.next(), + invoke_context.is_feature_active(&feature_set::stake_program_v4::id()), ) } StakeInstruction::Deactivate => me.deactivate( @@ -613,7 +616,14 @@ pub fn process_instruction( &signers, ), - StakeInstruction::SetLockup(lockup) => me.set_lockup(&lockup, &signers), + StakeInstruction::SetLockup(lockup) => { + let clock = if invoke_context.is_feature_active(&feature_set::stake_program_v4::id()) { + Some(get_sysvar::(invoke_context, &sysvar::clock::id())?) + } else { + None + }; + me.set_lockup(&lockup, &signers, clock.as_ref()) + } } } @@ -622,23 +632,23 @@ mod tests { use super::*; use bincode::serialize; use solana_sdk::{ - account::{self, Account}, - process_instruction::MockInvokeContext, + account::{self, Account, AccountSharedData}, + process_instruction::{mock_set_sysvar, MockInvokeContext}, rent::Rent, sysvar::stake_history::StakeHistory, }; use std::cell::RefCell; use std::str::FromStr; - fn create_default_account() -> RefCell { - RefCell::new(Account::default()) + fn create_default_account() -> RefCell { + RefCell::new(AccountSharedData::default()) } - fn create_default_stake_account() -> RefCell { - RefCell::new(Account { + fn create_default_stake_account() -> RefCell { + RefCell::new(AccountSharedData::from(Account { owner: id(), ..Account::default() - }) + })) } fn invalid_stake_state_pubkey() -> Pubkey { @@ -663,35 +673,37 @@ mod tests { .iter() .map(|meta| { RefCell::new(if sysvar::clock::check_id(&meta.pubkey) { - account::create_account_for_test(&sysvar::clock::Clock::default()) + account::create_account_shared_data_for_test(&sysvar::clock::Clock::default()) } else if sysvar::rewards::check_id(&meta.pubkey) { - account::create_account_for_test(&sysvar::rewards::Rewards::new(0.0)) + account::create_account_shared_data_for_test(&sysvar::rewards::Rewards::new( + 0.0, + )) } else if sysvar::stake_history::check_id(&meta.pubkey) { - account::create_account_for_test(&StakeHistory::default()) + account::create_account_shared_data_for_test(&StakeHistory::default()) } else if config::check_id(&meta.pubkey) { config::create_account(0, &config::Config::default()) } else if sysvar::rent::check_id(&meta.pubkey) { - account::create_account_for_test(&Rent::default()) + account::create_account_shared_data_for_test(&Rent::default()) } else if meta.pubkey == invalid_stake_state_pubkey() { - Account { + AccountSharedData::from(Account { owner: id(), ..Account::default() - } + }) } else if meta.pubkey == invalid_vote_state_pubkey() { - Account { + AccountSharedData::from(Account { owner: solana_vote_program::id(), ..Account::default() - } + }) } else if meta.pubkey == spoofed_stake_state_pubkey() { - Account { + AccountSharedData::from(Account { owner: spoofed_stake_program_id(), ..Account::default() - } + }) } else { - Account { + AccountSharedData::from(Account { owner: id(), ..Account::default() - } + }) }) }) .collect(); @@ -703,11 +715,19 @@ mod tests { .zip(accounts.iter()) .map(|(meta, account)| KeyedAccount::new(&meta.pubkey, meta.is_signer, account)) .collect(); + + let mut invoke_context = MockInvokeContext::default(); + mock_set_sysvar( + &mut invoke_context, + sysvar::clock::id(), + sysvar::clock::Clock::default(), + ) + .unwrap(); super::process_instruction( &Pubkey::default(), &keyed_accounts, &instruction.data, - &mut MockInvokeContext::default(), + &mut invoke_context, ) } } @@ -739,7 +759,7 @@ mod tests { &Pubkey::default(), 100, &invalid_stake_state_pubkey(), - )[1] + )[2] ), Err(InstructionError::InvalidAccountData), ); @@ -825,7 +845,7 @@ mod tests { &Pubkey::default(), 100, &Pubkey::default(), - )[1] + )[2] ), Err(InstructionError::InvalidAccountOwner), ); @@ -836,7 +856,7 @@ mod tests { &Pubkey::default(), 100, &spoofed_stake_state_pubkey(), - )[1] + )[2] ), Err(InstructionError::IncorrectProgramId), ); @@ -973,7 +993,9 @@ mod tests { KeyedAccount::new( &sysvar::rent::id(), false, - &RefCell::new(account::create_account_for_test(&Rent::default())) + &RefCell::new(account::create_account_shared_data_for_test( + &Rent::default() + )) ) ], &serialize(&StakeInstruction::Initialize( @@ -1028,14 +1050,14 @@ mod tests { KeyedAccount::new( &sysvar::clock::id(), false, - &RefCell::new(account::create_account_for_test( - &sysvar::clock::Clock::default() + &RefCell::new(account::create_account_shared_data_for_test( + &sysvar::clock::Clock::default(), )) ), KeyedAccount::new( &sysvar::stake_history::id(), false, - &RefCell::new(account::create_account_for_test( + &RefCell::new(account::create_account_shared_data_for_test( &sysvar::stake_history::StakeHistory::default(), )) ), @@ -1061,14 +1083,16 @@ mod tests { KeyedAccount::new( &sysvar::rewards::id(), false, - &RefCell::new(account::create_account_for_test( + &RefCell::new(account::create_account_shared_data_for_test( &sysvar::rewards::Rewards::new(0.0), )) ), KeyedAccount::new( &sysvar::stake_history::id(), false, - &RefCell::new(account::create_account_for_test(&StakeHistory::default())) + &RefCell::new(account::create_account_shared_data_for_test( + &StakeHistory::default(), + )) ), ], &serialize(&StakeInstruction::Withdraw(42)).unwrap(), @@ -1101,7 +1125,7 @@ mod tests { KeyedAccount::new( &sysvar::rewards::id(), false, - &RefCell::new(account::create_account_for_test( + &RefCell::new(account::create_account_shared_data_for_test( &sysvar::rewards::Rewards::new(0.0), )) ), diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index d9ff8356a2..955027f831 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -10,7 +10,7 @@ use crate::{ }; use serde_derive::{Deserialize, Serialize}; use solana_sdk::{ - account::Account, + account::{AccountSharedData, ReadableAccount}, account_utils::{State, StateMut}, clock::{Clock, Epoch, UnixTimestamp}, ic_msg, @@ -25,12 +25,12 @@ use solana_vote_program::vote_state::{VoteState, VoteStateVersions}; use std::{collections::HashSet, convert::TryFrom}; /// Minimal amount for delegate to create stake account. = 10k -// Removed requirement of having 10k to create stake account. 14.04.2021, keeping constant, to allow more smooth future changes. -pub const MIN_DELEGATE_STAKE_AMOUNT: u64 = solana_sdk::native_token::sol_to_lamports_u64(0); +// Removed requirement of having 10k to create stake account. +// 14.04.2021, keeping constant, to allow more smooth future changes. +pub const MIN_DELEGATE_STAKE_AMOUNT: u64 = 0; /// Amount of stake to be in majority = 1M -pub const MIN_STAKERS_TO_BE_MAJORITY: u64 = - solana_sdk::native_token::sol_to_lamports_u64(1_000_000); +pub const MIN_STAKERS_TO_BE_MAJORITY: u64 = 1_000_000 * solana_sdk::native_token::LAMPORTS_PER_VLX; /// Number of stakers with lamports more than 1M, to start filtering = 19 pub const NUM_MAJOR_STAKERS_FOR_FILTERING: usize = 19; @@ -87,11 +87,11 @@ impl StakeState { } // utility function, used by Stakes, tests - pub fn from(account: &Account) -> Option { + pub fn from>(account: &T) -> Option { account.state().ok() } - pub fn stake_from(account: &Account) -> Option { + pub fn stake_from>(account: &T) -> Option { Self::from(account).and_then(|state: Self| state.stake()) } pub fn stake(&self) -> Option { @@ -101,7 +101,7 @@ impl StakeState { } } - pub fn delegation_from(account: &Account) -> Option { + pub fn delegation_from(account: &AccountSharedData) -> Option { Self::from(account).and_then(|state: Self| state.delegation()) } pub fn delegation(&self) -> Option { @@ -111,7 +111,7 @@ impl StakeState { } } - pub fn authorized_from(account: &Account) -> Option { + pub fn authorized_from(account: &AccountSharedData) -> Option { Self::from(account).and_then(|state: Self| state.authorized()) } @@ -123,7 +123,7 @@ impl StakeState { } } - pub fn lockup_from(account: &Account) -> Option { + pub fn lockup_from>(account: &T) -> Option { Self::from(account).and_then(|state: Self| state.lockup()) } @@ -131,7 +131,7 @@ impl StakeState { self.meta().map(|meta| meta.lockup) } - pub fn meta_from(account: &Account) -> Option { + pub fn meta_from(account: &AccountSharedData) -> Option { Self::from(account).and_then(|state: Self| state.meta()) } @@ -190,9 +190,28 @@ impl Meta { &mut self, lockup: &LockupArgs, signers: &HashSet, + clock: Option<&Clock>, ) -> Result<(), InstructionError> { - if !signers.contains(&self.lockup.custodian) { - return Err(InstructionError::MissingRequiredSignature); + match clock { + None => { + // pre-stake_program_v4 behavior: custodian can set lockups at any time + if !signers.contains(&self.lockup.custodian) { + return Err(InstructionError::MissingRequiredSignature); + } + } + Some(clock) => { + // post-stake_program_v4 behavior: + // * custodian can update the lockup while in force + // * withdraw authority can set a new lockup + // + if self.lockup.is_in_force(clock, None) { + if !signers.contains(&self.lockup.custodian) { + return Err(InstructionError::MissingRequiredSignature); + } + } else if !signers.contains(&self.authorized.withdrawer) { + return Err(InstructionError::MissingRequiredSignature); + } + } } if let Some(unix_timestamp) = lockup.unix_timestamp { self.lockup.unix_timestamp = unix_timestamp; @@ -766,13 +785,28 @@ impl Stake { clock: &Clock, stake_history: &StakeHistory, config: &Config, + can_reverse_deactivation: bool, ) -> Result<(), StakeError> { - // can't redelegate if stake is active. either the stake - // is freshly activated or has fully de-activated. redelegation - // implies re-activation + // If stake is currently active: if self.stake(clock.epoch, Some(stake_history), true) != 0 { - return Err(StakeError::TooSoonToRedelegate); + // If pubkey of new voter is the same as current, + // and we are scheduled to start deactivating this epoch, + // we rescind deactivation + if self.delegation.voter_pubkey == *voter_pubkey + && clock.epoch == self.delegation.deactivation_epoch + && can_reverse_deactivation + { + self.delegation.deactivation_epoch = std::u64::MAX; + return Ok(()); + } else { + // can't redelegate to another pubkey if stake is active. + return Err(StakeError::TooSoonToRedelegate); + } } + // Either the stake is freshly activated, is active but has been + // deactivated this epoch, or has fully de-activated. + // Redelegation implies either re-activation or un-deactivation + self.delegation.stake = stake_lamports; self.delegation.activation_epoch = clock.epoch; self.delegation.deactivation_epoch = std::u64::MAX; @@ -863,12 +897,14 @@ pub trait StakeAccount { stake_history: &StakeHistory, config: &Config, signers: &HashSet, + can_reverse_deactivation: bool, ) -> Result<(), InstructionError>; fn deactivate(&self, clock: &Clock, signers: &HashSet) -> Result<(), InstructionError>; fn set_lockup( &self, lockup: &LockupArgs, signers: &HashSet, + clock: Option<&Clock>, ) -> Result<(), InstructionError>; fn split( &self, @@ -883,6 +919,7 @@ pub trait StakeAccount { clock: &Clock, stake_history: &StakeHistory, signers: &HashSet, + can_merge_expired_lockups: bool, ) -> Result<(), InstructionError>; fn withdraw( &self, @@ -892,6 +929,7 @@ pub trait StakeAccount { stake_history: &StakeHistory, withdraw_authority: &KeyedAccount, custodian: Option<&KeyedAccount>, + prevent_withdraw_to_zero: bool, ) -> Result<(), InstructionError>; } @@ -999,6 +1037,7 @@ impl<'a> StakeAccount for KeyedAccount<'a> { stake_history: &StakeHistory, config: &Config, signers: &HashSet, + can_reverse_deactivation: bool, ) -> Result<(), InstructionError> { if vote_account.owner()? != solana_vote_program::id() { return Err(InstructionError::IncorrectProgramId); @@ -1025,6 +1064,7 @@ impl<'a> StakeAccount for KeyedAccount<'a> { clock, stake_history, config, + can_reverse_deactivation, )?; self.set_state(&StakeState::Stake(meta, stake)) } @@ -1045,14 +1085,15 @@ impl<'a> StakeAccount for KeyedAccount<'a> { &self, lockup: &LockupArgs, signers: &HashSet, + clock: Option<&Clock>, ) -> Result<(), InstructionError> { match self.state()? { StakeState::Initialized(mut meta) => { - meta.set_lockup(lockup, signers)?; + meta.set_lockup(lockup, signers, clock)?; self.set_state(&StakeState::Initialized(meta)) } StakeState::Stake(mut meta, stake) => { - meta.set_lockup(lockup, signers)?; + meta.set_lockup(lockup, signers, clock)?; self.set_state(&StakeState::Stake(meta, stake)) } _ => Err(InstructionError::InvalidAccountData), @@ -1068,6 +1109,9 @@ impl<'a> StakeAccount for KeyedAccount<'a> { if split.owner()? != id() { return Err(InstructionError::IncorrectProgramId); } + if split.data_len()? != std::mem::size_of::() { + return Err(InstructionError::InvalidAccountData); + } if let StakeState::Uninitialized = split.state()? { // verify enough account lamports @@ -1083,17 +1127,19 @@ impl<'a> StakeAccount for KeyedAccount<'a> { self.data_len()? as u64, split.data_len()? as u64, ); + + // TODO(velas): check if we can revert updates below to solana upstream let self_min_balance = meta.rent_exempt_reserve + MIN_DELEGATE_STAKE_AMOUNT; let split_min_balance = split_rent_exempt_reserve + MIN_DELEGATE_STAKE_AMOUNT; let split_needs = split_min_balance.saturating_sub(split.lamports()?); let retain_self = lamports != self.lamports()?; - // enough lamports for rent in new stake - if lamports < split_needs - // if not full withdrawal - || (retain_self - // verify more than MIN_DELEGATE_STAKE_AMOUNT stake left in previous stake - && self.lamports()? < lamports + self_min_balance) + // verify enough lamports for rent and more than 0 stake in new split account + if lamports <= split_needs + // if not full withdrawal + || (retain_self + // verify more than MIN_DELEGATE_STAKE_AMOUNT stake left in previous stake + && checked_add(lamports, self_min_balance)? >= self.lamports()?) { return Err(InstructionError::InsufficientFunds); } @@ -1143,17 +1189,19 @@ impl<'a> StakeAccount for KeyedAccount<'a> { self.data_len()? as u64, split.data_len()? as u64, ); + + // TODO(velas): check if we can revert updates below to solana upstream let self_min_balance = meta.rent_exempt_reserve + MIN_DELEGATE_STAKE_AMOUNT; let split_min_balance = split_rent_exempt_reserve + MIN_DELEGATE_STAKE_AMOUNT; let split_needs = split_min_balance.saturating_sub(split.lamports()?); let retain_self = lamports != self.lamports()?; - // enough lamports for rent in new stake - if lamports < split_needs - // if not full withdrawal - || (retain_self - // verify more than MIN_DELEGATE_STAKE_AMOUNT stake left in previous stake - && self.lamports()? < lamports + self_min_balance) + // enough lamports for rent and more than 0 stake in new split account + if lamports <= split_needs + // if not full withdrawal + || (retain_self + // verify more than 0 stake left in previous stake + && checked_add(lamports, self_min_balance)? >= self.lamports()?) { return Err(InstructionError::InsufficientFunds); } @@ -1190,6 +1238,7 @@ impl<'a> StakeAccount for KeyedAccount<'a> { clock: &Clock, stake_history: &StakeHistory, signers: &HashSet, + can_merge_expired_lockups: bool, ) -> Result<(), InstructionError> { // Ensure source isn't spoofed if source_account.owner()? != id() { @@ -1212,8 +1261,16 @@ impl<'a> StakeAccount for KeyedAccount<'a> { let source_merge_kind = MergeKind::get_if_mergeable(invoke_context, source_account, clock, stake_history)?; + let clock = if can_merge_expired_lockups { + Some(clock) + } else { + None + }; + ic_msg!(invoke_context, "Merging stake accounts"); - if let Some(merged_state) = stake_merge_kind.merge(invoke_context, source_merge_kind)? { + if let Some(merged_state) = + stake_merge_kind.merge(invoke_context, source_merge_kind, clock)? + { self.set_state(&merged_state)?; } @@ -1235,6 +1292,7 @@ impl<'a> StakeAccount for KeyedAccount<'a> { stake_history: &StakeHistory, withdraw_authority: &KeyedAccount, custodian: Option<&KeyedAccount>, + prevent_withdraw_to_zero: bool, ) -> Result<(), InstructionError> { let mut signers = HashSet::new(); let withdraw_authority_pubkey = withdraw_authority @@ -1264,6 +1322,11 @@ impl<'a> StakeAccount for KeyedAccount<'a> { StakeState::Initialized(meta) => { meta.authorized .check(&signers, StakeAuthorize::Withdrawer)?; + let reserve = if prevent_withdraw_to_zero { + checked_add(meta.rent_exempt_reserve, 1)? // stake accounts must have a balance > rent_exempt_reserve + } else { + meta.rent_exempt_reserve + }; ( meta.lockup, @@ -1386,13 +1449,24 @@ impl MergeKind { invoke_context: &dyn InvokeContext, stake: &Meta, source: &Meta, + clock: Option<&Clock>, ) -> Result<(), InstructionError> { + let can_merge_lockups = match clock { + // pre-v4 behavior. lockups must match, even when expired + None => stake.lockup == source.lockup, + // v4 behavior. lockups may mismatch so long as both have expired + Some(clock) => { + stake.lockup == source.lockup + || (!stake.lockup.is_in_force(clock, None) + && !source.lockup.is_in_force(clock, None)) + } + }; // `rent_exempt_reserve` has no bearing on the mergeability of accounts, // as the source account will be culled by runtime once the operation // succeeds. Considering it here would needlessly prevent merging stake // accounts with differing data lengths, which already exist in the wild // due to an SDK bug - if stake.authorized == source.authorized && stake.lockup == source.lockup { + if stake.authorized == source.authorized && can_merge_lockups { Ok(()) } else { ic_msg!(invoke_context, "Unable to merge due to metadata mismatch"); @@ -1445,8 +1519,9 @@ impl MergeKind { self, invoke_context: &dyn InvokeContext, source: Self, + clock: Option<&Clock>, ) -> Result, InstructionError> { - Self::metas_can_merge(invoke_context, self.meta(), source.meta())?; + Self::metas_can_merge(invoke_context, self.meta(), source.meta(), clock)?; self.active_stake() .zip(source.active_stake()) .map(|(stake, source)| Self::active_stakes_can_merge(invoke_context, stake, source)) @@ -1488,8 +1563,8 @@ impl MergeKind { // returns a tuple of (stakers_reward,voters_reward) pub fn redeem_rewards( rewarded_epoch: Epoch, - stake_account: &mut Account, - vote_account: &mut Account, + stake_account: &mut AccountSharedData, + vote_account: &mut AccountSharedData, point_value: &PointValue, stake_history: Option<&StakeHistory>, inflation_point_calc_tracer: &mut Option, @@ -1537,8 +1612,8 @@ pub fn redeem_rewards( // utility function, used by runtime pub fn calculate_points( - stake_account: &Account, - vote_account: &Account, + stake_account: &AccountSharedData, + vote_account: &AccountSharedData, stake_history: Option<&StakeHistory>, fix_stake_deactivate: bool, ) -> Result { @@ -1573,12 +1648,12 @@ fn calculate_split_rent_exempt_reserve( pub type RewriteStakeStatus = (&'static str, (u64, u64), (u64, u64)); pub fn rewrite_stakes( - stake_account: &mut Account, + stake_account: &mut AccountSharedData, rent: &Rent, ) -> Result { match stake_account.state()? { StakeState::Initialized(mut meta) => { - let meta_status = meta.rewrite_rent_exempt_reserve(rent, stake_account.data.len()); + let meta_status = meta.rewrite_rent_exempt_reserve(rent, stake_account.data().len()); if meta_status.is_none() { return Err(InstructionError::InvalidAccountData); @@ -1588,7 +1663,7 @@ pub fn rewrite_stakes( Ok(("initialized", meta_status.unwrap_or_default(), (0, 0))) } StakeState::Stake(mut meta, mut stake) => { - let meta_status = meta.rewrite_rent_exempt_reserve(rent, stake_account.data.len()); + let meta_status = meta.rewrite_rent_exempt_reserve(rent, stake_account.data().len()); let stake_status = stake .delegation .rewrite_stake(stake_account.lamports, meta.rent_exempt_reserve); @@ -1643,10 +1718,11 @@ pub fn create_lockup_stake_account( lockup: &Lockup, rent: &Rent, lamports: u64, -) -> Account { - let mut stake_account = Account::new(lamports, std::mem::size_of::(), &id()); +) -> AccountSharedData { + let mut stake_account = + AccountSharedData::new(lamports, std::mem::size_of::(), &id()); - let rent_exempt_reserve = rent.minimum_balance(stake_account.data.len()); + let rent_exempt_reserve = rent.minimum_balance(stake_account.data().len()); assert!( lamports >= rent_exempt_reserve, "lamports: {} is less than rent_exempt_reserve {}", @@ -1669,10 +1745,10 @@ pub fn create_lockup_stake_account( pub fn create_account( authorized: &Pubkey, voter_pubkey: &Pubkey, - vote_account: &Account, + vote_account: &AccountSharedData, rent: &Rent, lamports: u64, -) -> Account { +) -> AccountSharedData { do_create_account( authorized, voter_pubkey, @@ -1687,11 +1763,11 @@ pub fn create_account( pub fn create_account_with_activation_epoch( authorized: &Pubkey, voter_pubkey: &Pubkey, - vote_account: &Account, + vote_account: &AccountSharedData, rent: &Rent, lamports: u64, activation_epoch: Epoch, -) -> Account { +) -> AccountSharedData { do_create_account( authorized, voter_pubkey, @@ -1705,16 +1781,17 @@ pub fn create_account_with_activation_epoch( fn do_create_account( authorized: &Pubkey, voter_pubkey: &Pubkey, - vote_account: &Account, + vote_account: &AccountSharedData, rent: &Rent, lamports: u64, activation_epoch: Epoch, -) -> Account { - let mut stake_account = Account::new(lamports, std::mem::size_of::(), &id()); +) -> AccountSharedData { + let mut stake_account = + AccountSharedData::new(lamports, std::mem::size_of::(), &id()); let vote_state = VoteState::from(vote_account).expect("vote_state"); - let rent_exempt_reserve = rent.minimum_balance(stake_account.data.len()); + let rent_exempt_reserve = rent.minimum_balance(stake_account.data().len()); stake_account .set_state(&StakeState::Stake( @@ -1741,8 +1818,8 @@ mod tests { use super::*; use crate::id; use solana_sdk::{ - account::Account, native_token, process_instruction::MockInvokeContext, pubkey::Pubkey, - system_program, + account::AccountSharedData, native_token, process_instruction::MockInvokeContext, + pubkey::Pubkey, system_program, }; use solana_vote_program::vote_state; use std::{cell::RefCell, iter::FromIterator}; @@ -1896,7 +1973,7 @@ mod tests { #[test] fn test_stake_state_stake_from_fail() { - let mut stake_account = Account::new(0, std::mem::size_of::(), &id()); + let mut stake_account = AccountSharedData::new(0, std::mem::size_of::(), &id()); stake_account .set_state(&StakeState::default()) @@ -1933,10 +2010,16 @@ mod tests { }; let vote_pubkey = solana_sdk::pubkey::new_rand(); + let vote_pubkey_2 = solana_sdk::pubkey::new_rand(); + let mut vote_state = VoteState::default(); for i in 0..1000 { vote_state.process_slot_vote_unchecked(i); } + let mut vote_state_2 = VoteState::default(); + for i in 0..1000 { + vote_state_2.process_slot_vote_unchecked(i); + } let vote_account = RefCell::new(vote_state::create_account( &vote_pubkey, @@ -1944,15 +2027,27 @@ mod tests { 0, 100, )); + let vote_account_2 = RefCell::new(vote_state::create_account( + &vote_pubkey_2, + &solana_sdk::pubkey::new_rand(), + 0, + 100, + )); let vote_keyed_account = KeyedAccount::new(&vote_pubkey, false, &vote_account); + let vote_keyed_account_2 = KeyedAccount::new(&vote_pubkey_2, false, &vote_account_2); + let vote_state_credits = vote_state.credits(); vote_keyed_account .set_state(&VoteStateVersions::new_current(vote_state)) .unwrap(); + let vote_state_credits_2 = vote_state_2.credits(); + vote_keyed_account_2 + .set_state(&VoteStateVersions::new_current(vote_state_2)) + .unwrap(); let stake_pubkey = solana_sdk::pubkey::new_rand(); let stake_lamports = 42; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Initialized(Meta { authorized: Authorized { @@ -1977,6 +2072,7 @@ mod tests { &StakeHistory::default(), &Config::default(), &signers, + true, ), Err(InstructionError::MissingRequiredSignature) ); @@ -1991,6 +2087,7 @@ mod tests { &StakeHistory::default(), &Config::default(), &signers, + true, ) .is_ok()); @@ -2012,14 +2109,63 @@ mod tests { clock.epoch += 1; - // verify that delegate fails if stake is still active + // verify that delegate fails as stake is active and not deactivating + assert_eq!( + stake_keyed_account.delegate( + &vote_keyed_account, + &clock, + &StakeHistory::default(), + &Config::default(), + &signers, + true + ), + Err(StakeError::TooSoonToRedelegate.into()) + ); + + // deactivate + stake_keyed_account.deactivate(&clock, &signers).unwrap(); + + // verify that delegate to a different vote account fails + // during deactivation assert_eq!( stake_keyed_account.delegate( + &vote_keyed_account_2, + &clock, + &StakeHistory::default(), + &Config::default(), + &signers, + true, + ), + Err(StakeError::TooSoonToRedelegate.into()) + ); + + // verify that delegate succeeds to same vote account + // when stake is deactivating + stake_keyed_account + .delegate( &vote_keyed_account, &clock, &StakeHistory::default(), &Config::default(), - &signers + &signers, + true, + ) + .unwrap(); + + // verify that deactivation has been cleared + let stake = StakeState::stake_from(&stake_keyed_account.account.borrow()).unwrap(); + assert_eq!(stake.delegation.deactivation_epoch, std::u64::MAX); + + // verify that delegate to a different vote account fails + // if stake is still active + assert_eq!( + stake_keyed_account.delegate( + &vote_keyed_account_2, + &clock, + &StakeHistory::default(), + &Config::default(), + &signers, + true, ), Err(StakeError::TooSoonToRedelegate.into()) ); @@ -2030,21 +2176,23 @@ mod tests { // without stake history, cool down is instantaneous clock.epoch += 1; - // verify that delegate can be called twice, 2nd is redelegate + // verify that delegate can be called to new vote account, 2nd is redelegate assert!(stake_keyed_account .delegate( - &vote_keyed_account, + &vote_keyed_account_2, &clock, &StakeHistory::default(), &Config::default(), - &signers + &signers, + true, ) .is_ok()); // signed but faked vote account - let faked_vote_account = vote_account.clone(); + let faked_vote_account = vote_account_2.clone(); faked_vote_account.borrow_mut().owner = solana_sdk::pubkey::new_rand(); - let faked_vote_keyed_account = KeyedAccount::new(&vote_pubkey, false, &faked_vote_account); + let faked_vote_keyed_account = + KeyedAccount::new(&vote_pubkey_2, false, &faked_vote_account); assert_eq!( stake_keyed_account.delegate( &faked_vote_keyed_account, @@ -2052,6 +2200,7 @@ mod tests { &StakeHistory::default(), &Config::default(), &signers, + true, ), Err(solana_sdk::instruction::InstructionError::IncorrectProgramId) ); @@ -2062,13 +2211,13 @@ mod tests { stake, Stake { delegation: Delegation { - voter_pubkey: vote_pubkey, + voter_pubkey: vote_pubkey_2, stake: stake_lamports, activation_epoch: clock.epoch, deactivation_epoch: std::u64::MAX, ..Delegation::default() }, - credits_observed: vote_state_credits, + credits_observed: vote_state_credits_2, } ); @@ -2082,7 +2231,8 @@ mod tests { &clock, &StakeHistory::default(), &Config::default(), - &signers + &signers, + true, ) .is_err()); } @@ -2569,13 +2719,12 @@ mod tests { }, ); + let effective_rate_limited = (effective as f64 * stake.warmup_cooldown_rate) as u64; if epoch < stake.deactivation_epoch { - let increase = (effective as f64 * stake.warmup_cooldown_rate) as u64; - effective += increase.min(activating); + effective += effective_rate_limited.min(activating); other_activations.push(0); } else { - let decrease = (effective as f64 * stake.warmup_cooldown_rate) as u64; - effective -= decrease.min(deactivating); + effective -= effective_rate_limited.min(deactivating); effective += other_activation; other_activations.push(other_activation); } @@ -2714,7 +2863,7 @@ mod tests { let stake_pubkey = solana_sdk::pubkey::new_rand(); let stake_lamports = MIN_DELEGATE_STAKE_AMOUNT; let stake_account = - Account::new_ref(stake_lamports, std::mem::size_of::(), &id()); + AccountSharedData::new_ref(stake_lamports, std::mem::size_of::(), &id()); // unsigned keyed account let stake_keyed_account = KeyedAccount::new(&stake_pubkey, false, &stake_account); @@ -2777,8 +2926,11 @@ mod tests { fn test_initialize_incorrect_account_sizes() { let stake_pubkey = solana_sdk::pubkey::new_rand(); let stake_lamports = 42; - let stake_account = - Account::new_ref(stake_lamports, std::mem::size_of::() + 1, &id()); + let stake_account = AccountSharedData::new_ref( + stake_lamports, + std::mem::size_of::() + 1, + &id(), + ); let stake_keyed_account = KeyedAccount::new(&stake_pubkey, false, &stake_account); assert_eq!( @@ -2793,8 +2945,11 @@ mod tests { Err(InstructionError::InvalidAccountData) ); - let stake_account = - Account::new_ref(stake_lamports, std::mem::size_of::() - 1, &id()); + let stake_account = AccountSharedData::new_ref( + stake_lamports, + std::mem::size_of::() - 1, + &id(), + ); let stake_keyed_account = KeyedAccount::new(&stake_pubkey, false, &stake_account); assert_eq!( @@ -2814,7 +2969,7 @@ mod tests { fn test_deactivate() { let stake_pubkey = solana_sdk::pubkey::new_rand(); let stake_lamports = 42; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Initialized(Meta::auto(&stake_pubkey)), std::mem::size_of::(), @@ -2853,7 +3008,8 @@ mod tests { &clock, &StakeHistory::default(), &Config::default(), - &signers + &signers, + true, ), Ok(()) ); @@ -2880,7 +3036,7 @@ mod tests { fn test_set_lockup() { let stake_pubkey = solana_sdk::pubkey::new_rand(); let stake_lamports = MIN_DELEGATE_STAKE_AMOUNT; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Uninitialized, std::mem::size_of::(), @@ -2891,7 +3047,7 @@ mod tests { // wrong state, should fail let stake_keyed_account = KeyedAccount::new(&stake_pubkey, false, &stake_account); assert_eq!( - stake_keyed_account.set_lockup(&LockupArgs::default(), &HashSet::default()), + stake_keyed_account.set_lockup(&LockupArgs::default(), &HashSet::default(), None), Err(InstructionError::InvalidAccountData) ); @@ -2910,7 +3066,7 @@ mod tests { .unwrap(); assert_eq!( - stake_keyed_account.set_lockup(&LockupArgs::default(), &HashSet::default()), + stake_keyed_account.set_lockup(&LockupArgs::default(), &HashSet::default(), None), Err(InstructionError::MissingRequiredSignature) ); @@ -2921,7 +3077,8 @@ mod tests { epoch: Some(1), custodian: Some(custodian), }, - &vec![custodian].into_iter().collect() + &vec![custodian].into_iter().collect(), + None ), Ok(()) ); @@ -2946,6 +3103,7 @@ mod tests { &StakeHistory::default(), &Config::default(), &vec![stake_pubkey].into_iter().collect(), + true, ) .unwrap(); @@ -2957,6 +3115,7 @@ mod tests { custodian: Some(custodian), }, &HashSet::default(), + None ), Err(InstructionError::MissingRequiredSignature) ); @@ -2967,17 +3126,18 @@ mod tests { epoch: Some(1), custodian: Some(custodian), }, - &vec![custodian].into_iter().collect() + &vec![custodian].into_iter().collect(), + None ), Ok(()) ); } #[test] - fn test_optional_lockup() { + fn test_optional_lockup_for_stake_program_v3_and_earlier() { let stake_pubkey = solana_sdk::pubkey::new_rand(); let stake_lamports = MIN_DELEGATE_STAKE_AMOUNT; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Uninitialized, std::mem::size_of::(), @@ -3006,7 +3166,8 @@ mod tests { epoch: None, custodian: None, }, - &vec![custodian].into_iter().collect() + &vec![custodian].into_iter().collect(), + None ), Ok(()) ); @@ -3018,7 +3179,8 @@ mod tests { epoch: None, custodian: None, }, - &vec![custodian].into_iter().collect() + &vec![custodian].into_iter().collect(), + None ), Ok(()) ); @@ -3040,7 +3202,8 @@ mod tests { epoch: Some(3), custodian: None, }, - &vec![custodian].into_iter().collect() + &vec![custodian].into_iter().collect(), + None ), Ok(()) ); @@ -3063,7 +3226,8 @@ mod tests { epoch: None, custodian: Some(new_custodian), }, - &vec![custodian].into_iter().collect() + &vec![custodian].into_iter().collect(), + None ), Ok(()) ); @@ -3081,17 +3245,109 @@ mod tests { assert_eq!( stake_keyed_account.set_lockup( &LockupArgs::default(), - &vec![custodian].into_iter().collect() + &vec![custodian].into_iter().collect(), + None ), Err(InstructionError::MissingRequiredSignature) ); } #[test] - fn test_withdraw_stake() { + fn test_optional_lockup_for_stake_program_v4() { let stake_pubkey = solana_sdk::pubkey::new_rand(); let stake_lamports = MIN_DELEGATE_STAKE_AMOUNT + 400; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( + stake_lamports, + &StakeState::Uninitialized, + std::mem::size_of::(), + &id(), + ) + .expect("stake_account"); + let stake_keyed_account = KeyedAccount::new(&stake_pubkey, false, &stake_account); + + let custodian = solana_sdk::pubkey::new_rand(); + stake_keyed_account + .initialize( + &Authorized::auto(&stake_pubkey), + &Lockup { + unix_timestamp: 1, + epoch: 1, + custodian, + }, + &Rent::free(), + ) + .unwrap(); + + // Lockup in force: authorized withdrawer cannot change it + assert_eq!( + stake_keyed_account.set_lockup( + &LockupArgs { + unix_timestamp: Some(2), + epoch: None, + custodian: None + }, + &vec![stake_pubkey].into_iter().collect(), + Some(&Clock::default()) + ), + Err(InstructionError::MissingRequiredSignature) + ); + + // Lockup in force: custodian can change it + assert_eq!( + stake_keyed_account.set_lockup( + &LockupArgs { + unix_timestamp: Some(2), + epoch: None, + custodian: None + }, + &vec![custodian].into_iter().collect(), + Some(&Clock::default()) + ), + Ok(()) + ); + + // Lockup expired: custodian cannot change it + assert_eq!( + stake_keyed_account.set_lockup( + &LockupArgs { + unix_timestamp: Some(3), + epoch: None, + custodian: None, + }, + &vec![custodian].into_iter().collect(), + Some(&Clock { + unix_timestamp: UnixTimestamp::MAX, + epoch: Epoch::MAX, + ..Clock::default() + }) + ), + Err(InstructionError::MissingRequiredSignature) + ); + + // Lockup expired: authorized withdrawer can change it + assert_eq!( + stake_keyed_account.set_lockup( + &LockupArgs { + unix_timestamp: Some(3), + epoch: None, + custodian: None, + }, + &vec![stake_pubkey].into_iter().collect(), + Some(&Clock { + unix_timestamp: UnixTimestamp::MAX, + epoch: Epoch::MAX, + ..Clock::default() + }) + ), + Ok(()) + ); + } + + #[test] + fn test_withdraw_stake() { + let stake_pubkey = solana_sdk::pubkey::new_rand(); + let stake_lamports = 42; + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Uninitialized, std::mem::size_of::(), @@ -3102,7 +3358,7 @@ mod tests { let mut clock = Clock::default(); let to = solana_sdk::pubkey::new_rand(); - let to_account = Account::new_ref(1, 0, &system_program::id()); + let to_account = AccountSharedData::new_ref(1, 0, &system_program::id()); let to_keyed_account = KeyedAccount::new(&to, false, &to_account); // no signers, should fail @@ -3115,6 +3371,7 @@ mod tests { &StakeHistory::default(), &to_keyed_account, // unsigned account as withdraw authority None, + true, ), Err(InstructionError::MissingRequiredSignature) ); @@ -3130,6 +3387,7 @@ mod tests { &StakeHistory::default(), &stake_keyed_account, None, + true, ), Ok(()) ); @@ -3165,6 +3423,7 @@ mod tests { &StakeHistory::default(), &stake_keyed_account, None, + true, ), Err(InstructionError::InsufficientFunds) ); @@ -3189,6 +3448,7 @@ mod tests { &StakeHistory::default(), &Config::default(), &signers, + true, ), Ok(()) ); @@ -3206,6 +3466,7 @@ mod tests { &StakeHistory::default(), &stake_keyed_account, None, + true, ), Ok(()) ); @@ -3223,6 +3484,7 @@ mod tests { &StakeHistory::default(), &stake_keyed_account, None, + true, ), Err(InstructionError::InsufficientFunds) ); @@ -3242,6 +3504,7 @@ mod tests { &StakeHistory::default(), &stake_keyed_account, None, + true, ), Err(InstructionError::InsufficientFunds) ); @@ -3256,6 +3519,7 @@ mod tests { &StakeHistory::default(), &stake_keyed_account, None, + true, ), Ok(()) ); @@ -3267,7 +3531,7 @@ mod tests { let rent_exempt_reserve = rent.minimum_balance(std::mem::size_of::()); let authority_pubkey = Pubkey::new_unique(); let stake_pubkey = Pubkey::new_unique(); - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( 1_000_000_000, &StakeState::Initialized(Meta { rent_exempt_reserve, @@ -3283,7 +3547,7 @@ mod tests { .expect("stake_account"); let stake2_pubkey = Pubkey::new_unique(); - let stake2_account = Account::new_ref_data_with_space( + let stake2_account = AccountSharedData::new_ref_data_with_space( 1_000_000_000, &StakeState::Initialized(Meta { rent_exempt_reserve, @@ -3300,7 +3564,7 @@ mod tests { let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); let stake2_keyed_account = KeyedAccount::new(&stake2_pubkey, false, &stake2_account); - let authority_account = Account::new_ref(42, 0, &system_program::id()); + let authority_account = AccountSharedData::new_ref(42, 0, &system_program::id()); let authority_keyed_account = KeyedAccount::new(&authority_pubkey, true, &authority_account); @@ -3312,6 +3576,7 @@ mod tests { &StakeHistory::default(), &authority_keyed_account, None, + true, ), Err(InstructionError::InsufficientFunds), ); @@ -3322,7 +3587,7 @@ mod tests { let stake_pubkey = solana_sdk::pubkey::new_rand(); let total_lamports = 100; let stake_lamports = 42; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( total_lamports, &StakeState::Initialized(Meta::auto(&stake_pubkey)), std::mem::size_of::(), @@ -3335,7 +3600,7 @@ mod tests { future.epoch += 16; let to = solana_sdk::pubkey::new_rand(); - let to_account = Account::new_ref(1, 0, &system_program::id()); + let to_account = AccountSharedData::new_ref(1, 0, &system_program::id()); let to_keyed_account = KeyedAccount::new(&to, false, &to_account); let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); @@ -3360,6 +3625,7 @@ mod tests { &StakeHistory::default(), &Config::default(), &signers, + true, ), Ok(()) ); @@ -3383,6 +3649,7 @@ mod tests { &stake_history, &stake_keyed_account, None, + true, ), Err(InstructionError::InsufficientFunds) ); @@ -3392,7 +3659,7 @@ mod tests { fn test_withdraw_stake_invalid_state() { let stake_pubkey = solana_sdk::pubkey::new_rand(); let total_lamports = 100; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( total_lamports, &StakeState::RewardsPool, std::mem::size_of::(), @@ -3401,7 +3668,7 @@ mod tests { .expect("stake_account"); let to = solana_sdk::pubkey::new_rand(); - let to_account = Account::new_ref(1, 0, &system_program::id()); + let to_account = AccountSharedData::new_ref(1, 0, &system_program::id()); let to_keyed_account = KeyedAccount::new(&to, false, &to_account); let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); assert_eq!( @@ -3412,6 +3679,7 @@ mod tests { &StakeHistory::default(), &stake_keyed_account, None, + true, ), Err(InstructionError::InvalidAccountData) ); @@ -3422,7 +3690,7 @@ mod tests { let stake_pubkey = solana_sdk::pubkey::new_rand(); let custodian = solana_sdk::pubkey::new_rand(); let total_lamports = 100; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( total_lamports, &StakeState::Initialized(Meta { lockup: Lockup { @@ -3438,7 +3706,7 @@ mod tests { .expect("stake_account"); let to = solana_sdk::pubkey::new_rand(); - let to_account = Account::new_ref(1, 0, &system_program::id()); + let to_account = AccountSharedData::new_ref(1, 0, &system_program::id()); let to_keyed_account = KeyedAccount::new(&to, false, &to_account); let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); @@ -3454,12 +3722,13 @@ mod tests { &StakeHistory::default(), &stake_keyed_account, None, + true, ), Err(StakeError::LockupInForce.into()) ); { - let custodian_account = Account::new_ref(1, 0, &system_program::id()); + let custodian_account = AccountSharedData::new_ref(1, 0, &system_program::id()); let custodian_keyed_account = KeyedAccount::new(&custodian, true, &custodian_account); assert_eq!( stake_keyed_account.withdraw( @@ -3469,6 +3738,7 @@ mod tests { &StakeHistory::default(), &stake_keyed_account, Some(&custodian_keyed_account), + true, ), Ok(()) ); @@ -3487,6 +3757,7 @@ mod tests { &StakeHistory::default(), &stake_keyed_account, None, + true, ), Ok(()) ); @@ -3498,7 +3769,7 @@ mod tests { let stake_pubkey = solana_sdk::pubkey::new_rand(); let custodian = stake_pubkey; let total_lamports = 100; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( total_lamports, &StakeState::Initialized(Meta { lockup: Lockup { @@ -3514,7 +3785,7 @@ mod tests { .expect("stake_account"); let to = solana_sdk::pubkey::new_rand(); - let to_account = Account::new_ref(1, 0, &system_program::id()); + let to_account = AccountSharedData::new_ref(1, 0, &system_program::id()); let to_keyed_account = KeyedAccount::new(&to, false, &to_account); let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); @@ -3530,6 +3801,7 @@ mod tests { &StakeHistory::default(), &stake_keyed_account, None, + true, ), Err(StakeError::LockupInForce.into()) ); @@ -3544,6 +3816,7 @@ mod tests { &StakeHistory::default(), &stake_keyed_account, Some(&custodian_keyed_account), + true, ), Ok(()) ); @@ -3551,6 +3824,102 @@ mod tests { } } + #[test] + fn test_withdraw_rent_exempt() { + let stake_pubkey = solana_sdk::pubkey::new_rand(); + let clock = Clock::default(); + let rent = Rent::default(); + let rent_exempt_reserve = rent.minimum_balance(std::mem::size_of::()); + let stake = 42; + let stake_account = AccountSharedData::new_ref_data_with_space( + stake + rent_exempt_reserve, + &StakeState::Initialized(Meta { + rent_exempt_reserve, + ..Meta::auto(&stake_pubkey) + }), + std::mem::size_of::(), + &id(), + ) + .expect("stake_account"); + + let to = solana_sdk::pubkey::new_rand(); + let to_account = AccountSharedData::new_ref(1, 0, &system_program::id()); + let to_keyed_account = KeyedAccount::new(&to, false, &to_account); + + // Withdrawing account down to only rent-exempt reserve should succeed before feature, and + // fail after + let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); + assert_eq!( + stake_keyed_account.withdraw( + stake, + &to_keyed_account, + &clock, + &StakeHistory::default(), + &stake_keyed_account, + None, + false, + ), + Ok(()) + ); + stake_account.borrow_mut().lamports += stake; // top up account + let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); + assert_eq!( + stake_keyed_account.withdraw( + stake, + &to_keyed_account, + &clock, + &StakeHistory::default(), + &stake_keyed_account, + None, + true, + ), + Err(InstructionError::InsufficientFunds) + ); + + // Withdrawal that would leave less than rent-exempt reserve should fail + let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); + assert_eq!( + stake_keyed_account.withdraw( + stake + 1, + &to_keyed_account, + &clock, + &StakeHistory::default(), + &stake_keyed_account, + None, + false, + ), + Err(InstructionError::InsufficientFunds) + ); + let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); + assert_eq!( + stake_keyed_account.withdraw( + stake + 1, + &to_keyed_account, + &clock, + &StakeHistory::default(), + &stake_keyed_account, + None, + true, + ), + Err(InstructionError::InsufficientFunds) + ); + + // Withdrawal of complete account should succeed + let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); + assert_eq!( + stake_keyed_account.withdraw( + stake + rent_exempt_reserve, + &to_keyed_account, + &clock, + &StakeHistory::default(), + &stake_keyed_account, + None, + true, + ), + Ok(()) + ); + } + #[test] fn test_stake_state_redeem_rewards() { let mut vote_state = VoteState::default(); @@ -3851,7 +4220,7 @@ mod tests { fn test_authorize_uninit() { let new_authority = solana_sdk::pubkey::new_rand(); let stake_lamports = 42; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::default(), std::mem::size_of::(), @@ -3878,7 +4247,7 @@ mod tests { fn test_authorize_lockup() { let stake_authority = solana_sdk::pubkey::new_rand(); let stake_lamports = 42; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Initialized(Meta::auto(&stake_authority)), std::mem::size_of::(), @@ -3887,7 +4256,7 @@ mod tests { .expect("stake_account"); let to = solana_sdk::pubkey::new_rand(); - let to_account = Account::new_ref(1, 0, &system_program::id()); + let to_account = AccountSharedData::new_ref(1, 0, &system_program::id()); let to_keyed_account = KeyedAccount::new(&to, false, &to_account); let clock = Clock::default(); @@ -3987,6 +4356,7 @@ mod tests { &StakeHistory::default(), &stake_keyed_account, // old signer None, + true, ), Err(InstructionError::MissingRequiredSignature) ); @@ -4003,6 +4373,7 @@ mod tests { &StakeHistory::default(), &stake_keyed_account2, None, + true, ), Ok(()) ); @@ -4015,7 +4386,7 @@ mod tests { let seed = "42"; let withdrawer_pubkey = Pubkey::create_with_seed(&base_pubkey, &seed, &id()).unwrap(); let stake_lamports = 42; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Initialized(Meta::auto(&withdrawer_pubkey)), std::mem::size_of::(), @@ -4023,7 +4394,7 @@ mod tests { ) .expect("stake_account"); - let base_account = Account::new_ref(1, 0, &id()); + let base_account = AccountSharedData::new_ref(1, 0, &id()); let base_keyed_account = KeyedAccount::new(&base_pubkey, true, &base_account); let stake_keyed_account = KeyedAccount::new(&withdrawer_pubkey, true, &stake_account); @@ -4110,7 +4481,7 @@ mod tests { fn test_authorize_override() { let withdrawer_pubkey = solana_sdk::pubkey::new_rand(); let stake_lamports = 42; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Initialized(Meta::auto(&withdrawer_pubkey)), std::mem::size_of::(), @@ -4197,7 +4568,7 @@ mod tests { fn test_split_source_uninitialized() { let stake_pubkey = solana_sdk::pubkey::new_rand(); let stake_lamports = MIN_DELEGATE_STAKE_AMOUNT * 2; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Uninitialized, std::mem::size_of::(), @@ -4206,7 +4577,7 @@ mod tests { .expect("stake_account"); let split_stake_pubkey = solana_sdk::pubkey::new_rand(); - let split_stake_account = Account::new_ref_data_with_space( + let split_stake_account = AccountSharedData::new_ref_data_with_space( 0, &StakeState::Uninitialized, std::mem::size_of::(), @@ -4244,7 +4615,7 @@ mod tests { fn test_split_split_not_uninitialized() { let stake_pubkey = solana_sdk::pubkey::new_rand(); let stake_lamports = MIN_DELEGATE_STAKE_AMOUNT * 2; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Stake(Meta::auto(&stake_pubkey), Stake::just_stake(stake_lamports)), std::mem::size_of::(), @@ -4253,7 +4624,7 @@ mod tests { .expect("stake_account"); let split_stake_pubkey = solana_sdk::pubkey::new_rand(); - let split_stake_account = Account::new_ref_data_with_space( + let split_stake_account = AccountSharedData::new_ref_data_with_space( 0, &StakeState::Initialized(Meta::auto(&stake_pubkey)), std::mem::size_of::(), @@ -4286,7 +4657,7 @@ mod tests { fn test_split_more_than_staked() { let stake_pubkey = solana_sdk::pubkey::new_rand(); let stake_lamports = 42 + MIN_DELEGATE_STAKE_AMOUNT * 2; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Stake( Meta::auto(&stake_pubkey), @@ -4298,7 +4669,7 @@ mod tests { .expect("stake_account"); let split_stake_pubkey = solana_sdk::pubkey::new_rand(); - let split_stake_account = Account::new_ref_data_with_space( + let split_stake_account = AccountSharedData::new_ref_data_with_space( 0, &StakeState::Uninitialized, std::mem::size_of::(), @@ -4344,7 +4715,7 @@ mod tests { Stake::just_stake(stake_lamports - rent_exempt_reserve), ), ] { - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, state, std::mem::size_of::(), @@ -4354,7 +4725,7 @@ mod tests { let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); - let split_stake_account = Account::new_ref_data_with_space( + let split_stake_account = AccountSharedData::new_ref_data_with_space( 0, &StakeState::Uninitialized, std::mem::size_of::(), @@ -4439,7 +4810,7 @@ mod tests { StakeState::Initialized(Meta::auto(&stake_pubkey)), StakeState::Stake(Meta::auto(&stake_pubkey), Stake::just_stake(stake_lamports)), ] { - let split_stake_account = Account::new_ref_data_with_space( + let split_stake_account = AccountSharedData::new_ref_data_with_space( 0, &StakeState::Uninitialized, std::mem::size_of::(), @@ -4450,7 +4821,7 @@ mod tests { let split_stake_keyed_account = KeyedAccount::new(&split_stake_pubkey, true, &split_stake_account); - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, state, std::mem::size_of::(), @@ -4526,7 +4897,7 @@ mod tests { let split_stake_pubkey = solana_sdk::pubkey::new_rand(); let signers = vec![stake_pubkey].into_iter().collect(); - let split_stake_account = Account::new_ref_data_with_space( + let split_stake_account = AccountSharedData::new_ref_data_with_space( 0, &StakeState::Uninitialized, std::mem::size_of::(), @@ -4537,7 +4908,7 @@ mod tests { let split_stake_keyed_account = KeyedAccount::new(&split_stake_pubkey, true, &split_stake_account); - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Stake(Meta::auto(&stake_pubkey), Stake::just_stake(stake_lamports)), std::mem::size_of::(), @@ -4577,7 +4948,7 @@ mod tests { // test_split, since that test uses a Meta with rent_exempt_reserve = 0 let split_lamport_balances = vec![0, 1, rent_exempt_reserve, rent_exempt_reserve + 1]; for initial_balance in split_lamport_balances { - let split_stake_account = Account::new_ref_data_with_space( + let split_stake_account = AccountSharedData::new_ref_data_with_space( initial_balance, &StakeState::Uninitialized, std::mem::size_of::(), @@ -4588,7 +4959,7 @@ mod tests { let split_stake_keyed_account = KeyedAccount::new(&split_stake_pubkey, true, &split_stake_account); - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &state, std::mem::size_of::(), @@ -4692,7 +5063,7 @@ mod tests { expected_rent_exempt_reserve + 1, ]; for initial_balance in split_lamport_balances { - let split_stake_account = Account::new_ref_data_with_space( + let split_stake_account = AccountSharedData::new_ref_data_with_space( initial_balance, &StakeState::Uninitialized, std::mem::size_of::(), @@ -4703,7 +5074,7 @@ mod tests { let split_stake_keyed_account = KeyedAccount::new(&split_stake_pubkey, true, &split_stake_account); - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &state, std::mem::size_of::() + 100, @@ -4811,7 +5182,7 @@ mod tests { MIN_DELEGATE_STAKE_AMOUNT + expected_rent_exempt_reserve + 1, ]; for initial_balance in split_lamport_balances { - let split_stake_account = Account::new_ref_data_with_space( + let split_stake_account = AccountSharedData::new_ref_data_with_space( initial_balance, &StakeState::Uninitialized, std::mem::size_of::() + 100, @@ -4822,7 +5193,7 @@ mod tests { let split_stake_keyed_account = KeyedAccount::new(&split_stake_pubkey, true, &split_stake_account); - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &state, std::mem::size_of::(), @@ -4840,6 +5211,11 @@ mod tests { } else { assert_eq!(split_result, Ok(())); } + + // Splitting 100% of source should not make a difference + let split_result = + stake_keyed_account.split(stake_lamports, &split_stake_keyed_account, &signers); + assert_eq!(split_result, Err(InstructionError::InvalidAccountData)); } } @@ -4867,7 +5243,7 @@ mod tests { Stake::just_stake(stake_lamports - rent_exempt_reserve), ), ] { - let split_stake_account = Account::new_ref_data_with_space( + let split_stake_account = AccountSharedData::new_ref_data_with_space( 0, &StakeState::Uninitialized, std::mem::size_of::(), @@ -4878,7 +5254,7 @@ mod tests { let split_stake_keyed_account = KeyedAccount::new(&split_stake_pubkey, true, &split_stake_account); - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, state, std::mem::size_of::(), @@ -4954,7 +5330,7 @@ mod tests { // covered in test_split_100_percent_of_source, but included here as well for readability let split_lamport_balances = vec![0, 1, rent_exempt_reserve, rent_exempt_reserve + 1]; for initial_balance in split_lamport_balances { - let split_stake_account = Account::new_ref_data_with_space( + let split_stake_account = AccountSharedData::new_ref_data_with_space( initial_balance, &StakeState::Uninitialized, std::mem::size_of::(), @@ -4965,7 +5341,7 @@ mod tests { let split_stake_keyed_account = KeyedAccount::new(&split_stake_pubkey, true, &split_stake_account); - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &state, std::mem::size_of::(), @@ -5030,7 +5406,7 @@ mod tests { ), ] { // Test that splitting to a larger account fails, because of rent exempt - let split_stake_account = Account::new_ref_data_with_space( + let split_stake_account = AccountSharedData::new_ref_data_with_space( 0, &StakeState::Uninitialized, std::mem::size_of::() + 10000, @@ -5040,7 +5416,7 @@ mod tests { let split_stake_keyed_account = KeyedAccount::new(&split_stake_pubkey, true, &split_stake_account); - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &state, std::mem::size_of::(), @@ -5051,12 +5427,12 @@ mod tests { assert_eq!( stake_keyed_account.split(stake_lamports, &split_stake_keyed_account, &signers), - Err(InstructionError::InsufficientFunds) + Err(InstructionError::InvalidAccountData) ); // Test that splitting from a larger account to a smaller one works. // Split amount should not matter, assuming other fund criteria are met - let split_stake_account = Account::new_ref_data_with_space( + let split_stake_account = AccountSharedData::new_ref_data_with_space( 0, &StakeState::Uninitialized, std::mem::size_of::(), @@ -5066,7 +5442,7 @@ mod tests { let split_stake_keyed_account = KeyedAccount::new(&split_stake_pubkey, true, &split_stake_account); - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &state, std::mem::size_of::() + 100, @@ -5159,7 +5535,7 @@ mod tests { Stake::just_stake(stake_lamports), ), ] { - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, state, std::mem::size_of::(), @@ -5168,7 +5544,7 @@ mod tests { .expect("stake_account"); let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); - let source_stake_account = Account::new_ref_data_with_space( + let source_stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, source_state, std::mem::size_of::(), @@ -5185,7 +5561,8 @@ mod tests { &source_stake_keyed_account, &Clock::default(), &StakeHistory::default(), - &HashSet::new() + &HashSet::new(), + false, ), Err(InstructionError::MissingRequiredSignature) ); @@ -5196,7 +5573,8 @@ mod tests { &source_stake_keyed_account, &Clock::default(), &StakeHistory::default(), - &signers + &signers, + false, ), Ok(()) ); @@ -5272,7 +5650,7 @@ mod tests { }, ..Stake::default() }; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Stake(meta, stake), std::mem::size_of::(), @@ -5288,6 +5666,7 @@ mod tests { &Clock::default(), &StakeHistory::default(), &signers, + false, ), Err(InstructionError::InvalidArgument), ); @@ -5319,7 +5698,7 @@ mod tests { Stake::just_stake(stake_lamports), ), ] { - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, state, std::mem::size_of::(), @@ -5328,7 +5707,7 @@ mod tests { .expect("stake_account"); let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); - let source_stake_account = Account::new_ref_data_with_space( + let source_stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, source_state, std::mem::size_of::(), @@ -5345,6 +5724,7 @@ mod tests { &Clock::default(), &StakeHistory::default(), &wrong_signers, + false, ), Err(InstructionError::MissingRequiredSignature) ); @@ -5356,6 +5736,7 @@ mod tests { &Clock::default(), &StakeHistory::default(), &signers, + false, ), Err(StakeError::MergeMismatch.into()) ); @@ -5382,7 +5763,7 @@ mod tests { ), ] { for source_state in &[StakeState::Uninitialized, StakeState::RewardsPool] { - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, state, std::mem::size_of::(), @@ -5391,7 +5772,7 @@ mod tests { .expect("stake_account"); let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); - let source_stake_account = Account::new_ref_data_with_space( + let source_stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, source_state, std::mem::size_of::(), @@ -5408,6 +5789,7 @@ mod tests { &Clock::default(), &StakeHistory::default(), &signers, + false, ), Err(InstructionError::InvalidAccountData) ); @@ -5425,7 +5807,7 @@ mod tests { let signers = vec![authorized_pubkey].into_iter().collect(); - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Stake( Meta::auto(&authorized_pubkey), @@ -5437,7 +5819,7 @@ mod tests { .expect("stake_account"); let stake_keyed_account = KeyedAccount::new(&stake_pubkey, true, &stake_account); - let source_stake_account = Account::new_ref_data_with_space( + let source_stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Stake( Meta::auto(&authorized_pubkey), @@ -5456,7 +5838,8 @@ mod tests { &source_stake_keyed_account, &Clock::default(), &StakeHistory::default(), - &signers + &signers, + false, ), Err(InstructionError::IncorrectProgramId) ); @@ -5489,7 +5872,7 @@ mod tests { }, ..Stake::default() }; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Stake(meta, stake), std::mem::size_of::(), @@ -5507,7 +5890,7 @@ mod tests { }, ..stake }; - let source_account = Account::new_ref_data_with_space( + let source_account = AccountSharedData::new_ref_data_with_space( source_lamports, &StakeState::Stake(meta, source_stake), std::mem::size_of::(), @@ -5553,6 +5936,7 @@ mod tests { clock, stake_history, signers, + false, ); if result.is_ok() { assert_eq!(test_source_keyed.state(), Ok(StakeState::Uninitialized),); @@ -5813,7 +6197,7 @@ mod tests { fn test_authorize_delegated_stake() { let stake_pubkey = solana_sdk::pubkey::new_rand(); let stake_lamports = 42; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Initialized(Meta::auto(&stake_pubkey)), std::mem::size_of::(), @@ -5841,6 +6225,7 @@ mod tests { &StakeHistory::default(), &Config::default(), &signers, + true, ) .unwrap(); @@ -5888,6 +6273,7 @@ mod tests { &StakeHistory::default(), &Config::default(), &other_signers, + true, ), Err(InstructionError::MissingRequiredSignature) ); @@ -5900,7 +6286,8 @@ mod tests { &clock, &StakeHistory::default(), &Config::default(), - &new_signers + &new_signers, + true, ), Ok(()) ); @@ -5924,7 +6311,7 @@ mod tests { rent_exempt_reserve, ..Meta::auto(&withdrawer_pubkey) }; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Initialized(meta), std::mem::size_of::(), @@ -5953,6 +6340,7 @@ mod tests { &stake_history, &config, &signers, + true, ) .unwrap(); @@ -5960,8 +6348,9 @@ mod tests { stake_keyed_account.deactivate(&clock, &signers).unwrap(); clock.epoch += 1; + // Once deactivated, we withdraw stake to new keyed account let to = Pubkey::new_unique(); - let to_account = Account::new_ref(1, 0, &system_program::id()); + let to_account = AccountSharedData::new_ref(1, 0, &system_program::id()); let to_keyed_account = KeyedAccount::new(&to, false, &to_account); let withdraw_lamports = initial_lamports / 2; stake_keyed_account @@ -5972,6 +6361,7 @@ mod tests { &stake_history, &stake_keyed_account, None, + true, ) .unwrap(); let expected_balance = rent_exempt_reserve + initial_lamports - withdraw_lamports; @@ -5985,6 +6375,7 @@ mod tests { &stake_history, &config, &signers, + true, ) .unwrap(); let stake = StakeState::stake_from(&stake_account.borrow()).unwrap(); @@ -6007,6 +6398,7 @@ mod tests { &stake_history, &config, &signers, + true, ) .unwrap(); let stake = StakeState::stake_from(&stake_account.borrow()).unwrap(); @@ -6097,7 +6489,7 @@ mod tests { rent_exempt_reserve: rent_exempt_reserve + offset, ..Meta::default() }; - let mut account = Account::new(account_balance, right_data_len, &id()); + let mut account = AccountSharedData::new(account_balance, right_data_len, &id()); account.set_state(&StakeState::Initialized(meta)).unwrap(); let result = rewrite_stakes(&mut account, &rent); match expected_rewrite { @@ -6133,7 +6525,7 @@ mod tests { }), ..Stake::default() }; - let mut account = Account::new(account_balance, right_data_len, &id()); + let mut account = AccountSharedData::new(account_balance, right_data_len, &id()); account.set_state(&StakeState::Stake(meta, stake)).unwrap(); let result = rewrite_stakes(&mut account, &rent); match expected_rewrite { @@ -6276,11 +6668,19 @@ mod tests { &good_delegation ) .is_err()); + } + #[test] + fn test_metas_can_merge_pre_v4() { + let invoke_context = MockInvokeContext::default(); // Identical Metas can merge - assert!( - MergeKind::metas_can_merge(&invoke_context, &Meta::default(), &Meta::default()).is_ok() - ); + assert!(MergeKind::metas_can_merge( + &invoke_context, + &Meta::default(), + &Meta::default(), + None, + ) + .is_ok()); let mismatched_rent_exempt_reserve_ok = Meta { rent_exempt_reserve: 42, @@ -6293,13 +6693,15 @@ mod tests { assert!(MergeKind::metas_can_merge( &invoke_context, &Meta::default(), - &mismatched_rent_exempt_reserve_ok + &mismatched_rent_exempt_reserve_ok, + None, ) .is_ok()); assert!(MergeKind::metas_can_merge( &invoke_context, &mismatched_rent_exempt_reserve_ok, - &Meta::default() + &Meta::default(), + None, ) .is_ok()); @@ -6317,13 +6719,15 @@ mod tests { assert!(MergeKind::metas_can_merge( &invoke_context, &Meta::default(), - &mismatched_authorized_fails + &mismatched_authorized_fails, + None, ) .is_err()); assert!(MergeKind::metas_can_merge( &invoke_context, &mismatched_authorized_fails, - &Meta::default() + &Meta::default(), + None, ) .is_err()); @@ -6339,15 +6743,163 @@ mod tests { assert!(MergeKind::metas_can_merge( &invoke_context, &Meta::default(), - &mismatched_lockup_fails + &mismatched_lockup_fails, + None, ) .is_err()); assert!(MergeKind::metas_can_merge( &invoke_context, &mismatched_lockup_fails, - &Meta::default() + &Meta::default(), + None, + ) + .is_err()); + } + + #[test] + fn test_metas_can_merge_v4() { + let invoke_context = MockInvokeContext::default(); + // Identical Metas can merge + assert!(MergeKind::metas_can_merge( + &invoke_context, + &Meta::default(), + &Meta::default(), + Some(&Clock::default()) + ) + .is_ok()); + + let mismatched_rent_exempt_reserve_ok = Meta { + rent_exempt_reserve: 42, + ..Meta::default() + }; + assert_ne!( + mismatched_rent_exempt_reserve_ok.rent_exempt_reserve, + Meta::default().rent_exempt_reserve, + ); + assert!(MergeKind::metas_can_merge( + &invoke_context, + &Meta::default(), + &mismatched_rent_exempt_reserve_ok, + Some(&Clock::default()) + ) + .is_ok()); + assert!(MergeKind::metas_can_merge( + &invoke_context, + &mismatched_rent_exempt_reserve_ok, + &Meta::default(), + Some(&Clock::default()) + ) + .is_ok()); + + let mismatched_authorized_fails = Meta { + authorized: Authorized { + staker: Pubkey::new_unique(), + withdrawer: Pubkey::new_unique(), + }, + ..Meta::default() + }; + assert_ne!( + mismatched_authorized_fails.authorized, + Meta::default().authorized, + ); + assert!(MergeKind::metas_can_merge( + &invoke_context, + &Meta::default(), + &mismatched_authorized_fails, + Some(&Clock::default()) ) .is_err()); + assert!(MergeKind::metas_can_merge( + &invoke_context, + &mismatched_authorized_fails, + &Meta::default(), + Some(&Clock::default()) + ) + .is_err()); + + let lockup1_timestamp = 42; + let lockup2_timestamp = 4242; + let lockup1_epoch = 4; + let lockup2_epoch = 42; + let metas_with_lockup1 = Meta { + lockup: Lockup { + unix_timestamp: lockup1_timestamp, + epoch: lockup1_epoch, + custodian: Pubkey::new_unique(), + }, + ..Meta::default() + }; + let metas_with_lockup2 = Meta { + lockup: Lockup { + unix_timestamp: lockup2_timestamp, + epoch: lockup2_epoch, + custodian: Pubkey::new_unique(), + }, + ..Meta::default() + }; + + // Mismatched lockups fail when both in force + assert_ne!(metas_with_lockup1.lockup, Meta::default().lockup); + assert!(MergeKind::metas_can_merge( + &invoke_context, + &metas_with_lockup1, + &metas_with_lockup2, + Some(&Clock::default()) + ) + .is_err()); + assert!(MergeKind::metas_can_merge( + &invoke_context, + &metas_with_lockup2, + &metas_with_lockup1, + Some(&Clock::default()) + ) + .is_err()); + + let clock = Clock { + epoch: lockup1_epoch + 1, + unix_timestamp: lockup1_timestamp + 1, + ..Clock::default() + }; + + // Mismatched lockups fail when either in force + assert_ne!(metas_with_lockup1.lockup, Meta::default().lockup); + assert!(MergeKind::metas_can_merge( + &invoke_context, + &metas_with_lockup1, + &metas_with_lockup2, + Some(&clock) + ) + .is_err()); + assert!(MergeKind::metas_can_merge( + &invoke_context, + &metas_with_lockup2, + &metas_with_lockup1, + Some(&clock) + ) + .is_err()); + + let clock = Clock { + epoch: lockup2_epoch + 1, + unix_timestamp: lockup2_timestamp + 1, + ..Clock::default() + }; + + // Mismatched lockups succeed when both expired + assert_ne!(metas_with_lockup1.lockup, Meta::default().lockup); + assert!(MergeKind::metas_can_merge( + &invoke_context, + &metas_with_lockup1, + &metas_with_lockup2, + Some(&clock) + ) + .is_ok()); + assert!(MergeKind::metas_can_merge( + &invoke_context, + &metas_with_lockup2, + &metas_with_lockup1, + Some(&clock) + ) + .is_ok()); } #[test] @@ -6363,7 +6915,7 @@ mod tests { rent_exempt_reserve, ..Meta::auto(&authority_pubkey) }; - let stake_account = Account::new_ref_data_with_space( + let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, &StakeState::Uninitialized, std::mem::size_of::(), @@ -6605,37 +7157,37 @@ mod tests { assert_eq!( inactive .clone() - .merge(&invoke_context, inactive.clone()) + .merge(&invoke_context, inactive.clone(), None) .unwrap(), None ); assert_eq!( inactive .clone() - .merge(&invoke_context, activation_epoch.clone()) + .merge(&invoke_context, activation_epoch.clone(), None) .unwrap(), None ); assert!(inactive .clone() - .merge(&invoke_context, fully_active.clone()) + .merge(&invoke_context, fully_active.clone(), None) .is_err()); assert!(activation_epoch .clone() - .merge(&invoke_context, fully_active.clone()) + .merge(&invoke_context, fully_active.clone(), None) .is_err()); assert!(fully_active .clone() - .merge(&invoke_context, inactive.clone()) + .merge(&invoke_context, inactive.clone(), None) .is_err()); assert!(fully_active .clone() - .merge(&invoke_context, activation_epoch.clone()) + .merge(&invoke_context, activation_epoch.clone(), None) .is_err()); let new_state = activation_epoch .clone() - .merge(&invoke_context, inactive) + .merge(&invoke_context, inactive, None) .unwrap() .unwrap(); let delegation = new_state.delegation().unwrap(); @@ -6643,7 +7195,7 @@ mod tests { let new_state = activation_epoch .clone() - .merge(&invoke_context, activation_epoch) + .merge(&invoke_context, activation_epoch, None) .unwrap() .unwrap(); let delegation = new_state.delegation().unwrap(); @@ -6654,7 +7206,7 @@ mod tests { let new_state = fully_active .clone() - .merge(&invoke_context, fully_active) + .merge(&invoke_context, fully_active, None) .unwrap() .unwrap(); let delegation = new_state.delegation().unwrap(); diff --git a/programs/vest/Cargo.toml b/programs/vest/Cargo.toml index d774411e8b..395cc0332f 100644 --- a/programs/vest/Cargo.toml +++ b/programs/vest/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-vest-program" -version = "1.5.19" +version = "1.6.14" description = "Solana Vest program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,14 +14,14 @@ bincode = "1.3.1" chrono = { version = "0.4.11", features = ["serde"] } num-derive = "0.3" num-traits = "0.2" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" -solana-sdk = { path = "../../sdk", version = "=1.5.19" } -solana-config-program = { path = "../config", version = "=1.5.19" } +solana-sdk = { path = "../../sdk", version = "=1.6.14" } +solana-config-program = { path = "../config", version = "=1.6.14" } thiserror = "1.0" [dev-dependencies] -solana-runtime = { path = "../../runtime", version = "=1.5.19" } +solana-runtime = { path = "../../runtime", version = "=1.6.14" } [lib] crate-type = ["lib"] diff --git a/programs/vest/src/vest_instruction.rs b/programs/vest/src/vest_instruction.rs index ca944750aa..c72af0ea61 100644 --- a/programs/vest/src/vest_instruction.rs +++ b/programs/vest/src/vest_instruction.rs @@ -75,7 +75,7 @@ fn initialize_account( total_lamports: u64, ) -> Instruction { let keys = vec![AccountMeta::new(*contract_pubkey, false)]; - Instruction::new( + Instruction::new_with_bincode( id(), &VestInstruction::InitializeAccount { terminator_pubkey: *terminator_pubkey, @@ -116,7 +116,7 @@ pub fn set_terminator(contract: &Pubkey, old_pubkey: &Pubkey, new_pubkey: &Pubke AccountMeta::new(*contract, false), AccountMeta::new(*old_pubkey, true), ]; - Instruction::new( + Instruction::new_with_bincode( id(), &VestInstruction::SetTerminator(*new_pubkey), account_metas, @@ -128,7 +128,7 @@ pub fn set_payee(contract: &Pubkey, old_pubkey: &Pubkey, new_pubkey: &Pubkey) -> AccountMeta::new(*contract, false), AccountMeta::new(*old_pubkey, true), ]; - Instruction::new(id(), &VestInstruction::SetPayee(*new_pubkey), account_metas) + Instruction::new_with_bincode(id(), &VestInstruction::SetPayee(*new_pubkey), account_metas) } pub fn redeem_tokens(contract: &Pubkey, date_pubkey: &Pubkey, to: &Pubkey) -> Instruction { @@ -137,7 +137,7 @@ pub fn redeem_tokens(contract: &Pubkey, date_pubkey: &Pubkey, to: &Pubkey) -> In AccountMeta::new_readonly(*date_pubkey, false), AccountMeta::new(*to, false), ]; - Instruction::new(id(), &VestInstruction::RedeemTokens, account_metas) + Instruction::new_with_bincode(id(), &VestInstruction::RedeemTokens, account_metas) } pub fn terminate(contract: &Pubkey, from: &Pubkey, to: &Pubkey) -> Instruction { @@ -148,7 +148,7 @@ pub fn terminate(contract: &Pubkey, from: &Pubkey, to: &Pubkey) -> Instruction { if from != to { account_metas.push(AccountMeta::new(*to, false)); } - Instruction::new(id(), &VestInstruction::Terminate, account_metas) + Instruction::new_with_bincode(id(), &VestInstruction::Terminate, account_metas) } pub fn renege(contract: &Pubkey, from: &Pubkey, to: &Pubkey, lamports: u64) -> Instruction { @@ -159,7 +159,7 @@ pub fn renege(contract: &Pubkey, from: &Pubkey, to: &Pubkey, lamports: u64) -> I if from != to { account_metas.push(AccountMeta::new(*to, false)); } - Instruction::new(id(), &VestInstruction::Renege(lamports), account_metas) + Instruction::new_with_bincode(id(), &VestInstruction::Renege(lamports), account_metas) } pub fn vest_all(contract: &Pubkey, from: &Pubkey) -> Instruction { @@ -167,5 +167,5 @@ pub fn vest_all(contract: &Pubkey, from: &Pubkey) -> Instruction { AccountMeta::new(*contract, false), AccountMeta::new(*from, true), ]; - Instruction::new(id(), &VestInstruction::VestAll, account_metas) + Instruction::new_with_bincode(id(), &VestInstruction::VestAll, account_metas) } diff --git a/programs/vest/src/vest_processor.rs b/programs/vest/src/vest_processor.rs index 5e81346097..3dd5d82cba 100644 --- a/programs/vest/src/vest_processor.rs +++ b/programs/vest/src/vest_processor.rs @@ -7,7 +7,7 @@ use chrono::prelude::*; use solana_config_program::date_instruction::DateConfig; use solana_config_program::get_config_data; use solana_sdk::{ - account::Account, + account::{AccountSharedData, ReadableAccount, WritableAccount}, feature_set, instruction::InstructionError, keyed_account::{next_keyed_account, KeyedAccount}, @@ -28,7 +28,7 @@ fn verify_date_account( let account = verify_account(keyed_account, expected_pubkey)?; let config_data = - get_config_data(&account.data).map_err(|_| InstructionError::InvalidAccountData)?; + get_config_data(&account.data()).map_err(|_| InstructionError::InvalidAccountData)?; let date_config = DateConfig::deserialize(config_data).ok_or(InstructionError::InvalidAccountData)?; @@ -38,7 +38,7 @@ fn verify_date_account( fn verify_account<'a>( keyed_account: &'a KeyedAccount, expected_pubkey: &Pubkey, -) -> Result, InstructionError> { +) -> Result, InstructionError> { if keyed_account.unsigned_key() != expected_pubkey { return Err(VestError::Unauthorized.into()); } @@ -49,7 +49,7 @@ fn verify_account<'a>( fn verify_signed_account<'a>( keyed_account: &'a KeyedAccount, expected_pubkey: &Pubkey, -) -> Result, InstructionError> { +) -> Result, InstructionError> { if keyed_account.signer_key().is_none() { return Err(InstructionError::MissingRequiredSignature); } @@ -90,7 +90,7 @@ pub fn process_instruction( ..VestState::default() } } else { - VestState::deserialize(&contract_account.data)? + VestState::deserialize(&contract_account.data())? }; match instruction { @@ -147,7 +147,7 @@ pub fn process_instruction( } } - vest_state.serialize(&mut contract_account.data) + vest_state.serialize(contract_account.data_as_mut_slice()) } #[cfg(test)] @@ -270,7 +270,7 @@ mod tests { fn test_verify_account_unauthorized() { // Ensure client can't sneak in with an untrusted date account. let date_pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new_ref(1, 0, &solana_config_program::id()); + let account = AccountSharedData::new_ref(1, 0, &solana_config_program::id()); let keyed_account = KeyedAccount::new(&date_pubkey, false, &account); let mallory_pubkey = solana_sdk::pubkey::new_rand(); // <-- Attack! Not the expected account. @@ -284,7 +284,7 @@ mod tests { fn test_verify_signed_account_missing_signature() { // Ensure client can't sneak in with an unsigned account. let date_pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new_ref(1, 0, &solana_config_program::id()); + let account = AccountSharedData::new_ref(1, 0, &solana_config_program::id()); let keyed_account = KeyedAccount::new(&date_pubkey, false, &account); // <-- Attack! Unsigned transaction. assert_eq!( @@ -297,7 +297,7 @@ mod tests { fn test_verify_date_account_incorrect_program_id() { // Ensure client can't sneak in with a non-Config account. let date_pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new_ref(1, 0, &id()); // <-- Attack! Pass Vest account where Config account is expected. + let account = AccountSharedData::new_ref(1, 0, &id()); // <-- Attack! Pass Vest account where Config account is expected. let keyed_account = KeyedAccount::new(&date_pubkey, false, &account); assert_eq!( verify_date_account(&keyed_account, &date_pubkey).unwrap_err(), @@ -309,7 +309,7 @@ mod tests { fn test_verify_date_account_uninitialized_config() { // Ensure no panic when `get_config_data()` returns an error. let date_pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new_ref(1, 0, &solana_config_program::id()); // <-- Attack! Zero space. + let account = AccountSharedData::new_ref(1, 0, &solana_config_program::id()); // <-- Attack! Zero space. let keyed_account = KeyedAccount::new(&date_pubkey, false, &account); assert_eq!( verify_date_account(&keyed_account, &date_pubkey).unwrap_err(), @@ -321,7 +321,7 @@ mod tests { fn test_verify_date_account_invalid_date_config() { // Ensure no panic when `deserialize::()` returns an error. let date_pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new_ref(1, 1, &solana_config_program::id()); // Attack! 1 byte, enough to sneak by `get_config_data()`, but not DateConfig deserialize. + let account = AccountSharedData::new_ref(1, 1, &solana_config_program::id()); // Attack! 1 byte, enough to sneak by `get_config_data()`, but not DateConfig deserialize. let keyed_account = KeyedAccount::new(&date_pubkey, false, &account); assert_eq!( verify_date_account(&keyed_account, &date_pubkey).unwrap_err(), @@ -333,7 +333,7 @@ mod tests { fn test_verify_date_account_deserialize() { // Ensure no panic when `deserialize::()` returns an error. let date_pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new_ref(1, 1, &solana_config_program::id()); // Attack! 1 byte, enough to sneak by `get_config_data()`, but not DateConfig deserialize. + let account = AccountSharedData::new_ref(1, 1, &solana_config_program::id()); // Attack! 1 byte, enough to sneak by `get_config_data()`, but not DateConfig deserialize. let keyed_account = KeyedAccount::new(&date_pubkey, false, &account); assert_eq!( verify_date_account(&keyed_account, &date_pubkey).unwrap_err(), diff --git a/programs/vest/src/vest_state.rs b/programs/vest/src/vest_state.rs index b835e63a4b..f177e85f4c 100644 --- a/programs/vest/src/vest_state.rs +++ b/programs/vest/src/vest_state.rs @@ -7,7 +7,7 @@ use chrono::{ serde::ts_seconds, }; use serde_derive::{Deserialize, Serialize}; -use solana_sdk::{account::Account, instruction::InstructionError, pubkey::Pubkey}; +use solana_sdk::{account::AccountSharedData, instruction::InstructionError, pubkey::Pubkey}; use std::cmp::min; #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] @@ -82,9 +82,9 @@ impl VestState { /// Redeem vested tokens. pub fn redeem_tokens( &mut self, - contract_account: &mut Account, + contract_account: &mut AccountSharedData, current_date: Date, - payee_account: &mut Account, + payee_account: &mut AccountSharedData, ) { let vested_lamports = self.calc_vested_lamports(current_date); let redeemable_lamports = vested_lamports.saturating_sub(self.redeemed_lamports); @@ -98,8 +98,8 @@ impl VestState { /// Renege on the given number of tokens and send them to the given payee. pub fn renege( &mut self, - contract_account: &mut Account, - payee_account: &mut Account, + contract_account: &mut AccountSharedData, + payee_account: &mut AccountSharedData, lamports: u64, ) { let reneged_lamports = min(contract_account.lamports, lamports); @@ -119,24 +119,24 @@ impl VestState { mod test { use super::*; use crate::id; - use solana_sdk::account::Account; + use solana_sdk::account::{AccountSharedData, ReadableAccount, WritableAccount}; use solana_sdk::system_program; #[test] fn test_serializer() { - let mut a = Account::new(0, 512, &id()); + let mut a = AccountSharedData::new(0, 512, &id()); let b = VestState::default(); - b.serialize(&mut a.data).unwrap(); - let c = VestState::deserialize(&a.data).unwrap(); + b.serialize(a.data_as_mut_slice()).unwrap(); + let c = VestState::deserialize(&a.data()).unwrap(); assert_eq!(b, c); } #[test] fn test_serializer_data_too_small() { - let mut a = Account::new(0, 1, &id()); + let mut a = AccountSharedData::new(0, 1, &id()); let b = VestState::default(); assert_eq!( - b.serialize(&mut a.data), + b.serialize(a.data_as_mut_slice()), Err(InstructionError::AccountDataTooSmall) ); } @@ -144,14 +144,16 @@ mod test { #[test] fn test_schedule_after_renege() { let total_lamports = 3; - let mut contract_account = Account::new(total_lamports, 512, &id()); - let mut payee_account = Account::new(0, 0, &system_program::id()); + let mut contract_account = AccountSharedData::new(total_lamports, 512, &id()); + let mut payee_account = AccountSharedData::new(0, 0, &system_program::id()); let mut vest_state = VestState { total_lamports, start_date_time: Utc.ymd(2019, 1, 1).and_hms(0, 0, 0), ..VestState::default() }; - vest_state.serialize(&mut contract_account.data).unwrap(); + vest_state + .serialize(contract_account.data_as_mut_slice()) + .unwrap(); let current_date = Utc.ymd(2020, 1, 1); assert_eq!(vest_state.calc_vested_lamports(current_date), 1); @@ -171,13 +173,15 @@ mod test { #[test] fn test_vest_all() { let total_lamports = 3; - let mut contract_account = Account::new(total_lamports, 512, &id()); + let mut contract_account = AccountSharedData::new(total_lamports, 512, &id()); let mut vest_state = VestState { total_lamports, start_date_time: Utc.ymd(2019, 1, 1).and_hms(0, 0, 0), ..VestState::default() }; - vest_state.serialize(&mut contract_account.data).unwrap(); + vest_state + .serialize(contract_account.data_as_mut_slice()) + .unwrap(); let current_date = Utc.ymd(2020, 1, 1); assert_eq!(vest_state.calc_vested_lamports(current_date), 1); diff --git a/programs/vote/Cargo.toml b/programs/vote/Cargo.toml index 058d95d826..e1a0d8e701 100644 --- a/programs/vote/Cargo.toml +++ b/programs/vote/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-vote-program" -version = "1.5.19" +version = "1.6.14" description = "Solana Vote program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,13 +14,13 @@ bincode = "1.3.1" log = "0.4.11" num-derive = "0.3" num-traits = "0.2" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" -solana-frozen-abi = { path = "../../frozen-abi", version = "=1.5.19" } -solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.5.19" } -solana-logger = { path = "../../logger", version = "=1.5.19" } -solana-metrics = { path = "../../metrics", version = "=1.5.19" } -solana-sdk = { path = "../../sdk", version = "=1.5.19" } +solana-frozen-abi = { path = "../../frozen-abi", version = "=1.6.14" } +solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.6.14" } +solana-logger = { path = "../../logger", version = "=1.6.14" } +solana-metrics = { path = "../../metrics", version = "=1.6.14" } +solana-sdk = { path = "../../sdk", version = "=1.6.14" } thiserror = "1.0" [build-dependencies] diff --git a/programs/vote/src/lib.rs b/programs/vote/src/lib.rs index 57e55b52dd..0af6995358 100644 --- a/programs/vote/src/lib.rs +++ b/programs/vote/src/lib.rs @@ -12,4 +12,4 @@ extern crate solana_metrics; #[macro_use] extern crate solana_frozen_abi_macro; -solana_sdk::declare_id!("Vote111111111111111111111111111111111111111"); +pub use solana_sdk::vote::program::{check_id, id}; diff --git a/programs/vote/src/vote_instruction.rs b/programs/vote/src/vote_instruction.rs index eb869625e4..402a19c067 100644 --- a/programs/vote/src/vote_instruction.rs +++ b/programs/vote/src/vote_instruction.rs @@ -121,7 +121,7 @@ fn initialize_account(vote_pubkey: &Pubkey, vote_init: &VoteInit) -> Instruction AccountMeta::new_readonly(vote_init.node_pubkey, true), ]; - Instruction::new( + Instruction::new_with_bincode( id(), &VoteInstruction::InitializeAccount(*vote_init), account_metas, @@ -175,7 +175,7 @@ pub fn authorize( AccountMeta::new_readonly(*authorized_pubkey, true), ]; - Instruction::new( + Instruction::new_with_bincode( id(), &VoteInstruction::Authorize(*new_authorized_pubkey, vote_authorize), account_metas, @@ -193,7 +193,7 @@ pub fn update_validator_identity( AccountMeta::new_readonly(*authorized_withdrawer_pubkey, true), ]; - Instruction::new( + Instruction::new_with_bincode( id(), &VoteInstruction::UpdateValidatorIdentity, account_metas, @@ -210,7 +210,7 @@ pub fn update_commission( AccountMeta::new_readonly(*authorized_withdrawer_pubkey, true), ]; - Instruction::new( + Instruction::new_with_bincode( id(), &VoteInstruction::UpdateCommission(commission), account_metas, @@ -225,7 +225,7 @@ pub fn vote(vote_pubkey: &Pubkey, authorized_voter_pubkey: &Pubkey, vote: Vote) AccountMeta::new_readonly(*authorized_voter_pubkey, true), ]; - Instruction::new(id(), &VoteInstruction::Vote(vote), account_metas) + Instruction::new_with_bincode(id(), &VoteInstruction::Vote(vote), account_metas) } pub fn vote_switch( @@ -241,7 +241,7 @@ pub fn vote_switch( AccountMeta::new_readonly(*authorized_voter_pubkey, true), ]; - Instruction::new( + Instruction::new_with_bincode( id(), &VoteInstruction::VoteSwitch(vote, proof_hash), account_metas, @@ -260,7 +260,7 @@ pub fn withdraw( AccountMeta::new_readonly(*authorized_withdrawer_pubkey, true), ]; - Instruction::new(id(), &VoteInstruction::Withdraw(lamports), account_metas) + Instruction::new_with_bincode(id(), &VoteInstruction::Withdraw(lamports), account_metas) } fn verify_rent_exemption( @@ -342,7 +342,7 @@ pub fn process_instruction( mod tests { use super::*; use solana_sdk::{ - account::{self, Account}, + account::{self, Account, AccountSharedData}, process_instruction::MockInvokeContext, rent::Rent, }; @@ -370,27 +370,27 @@ mod tests { .iter() .map(|meta| { RefCell::new(if sysvar::clock::check_id(&meta.pubkey) { - account::create_account_for_test(&Clock::default()) + account::create_account_shared_data_for_test(&Clock::default()) } else if sysvar::slot_hashes::check_id(&meta.pubkey) { - account::create_account_for_test(&SlotHashes::default()) + account::create_account_shared_data_for_test(&SlotHashes::default()) } else if sysvar::rent::check_id(&meta.pubkey) { - account::create_account_for_test(&Rent::free()) + account::create_account_shared_data_for_test(&Rent::free()) } else if meta.pubkey == invalid_vote_state_pubkey() { - Account { + AccountSharedData::from(Account { owner: invalid_vote_state_pubkey(), ..Account::default() - } + }) } else { - Account { + AccountSharedData::from(Account { owner: id(), ..Account::default() - } + }) }) }) .collect(); for _ in 0..instruction.accounts.len() { - accounts.push(RefCell::new(Account::default())); + accounts.push(RefCell::new(AccountSharedData::default())); } { let keyed_accounts: Vec<_> = instruction diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index e631a36103..ae64453028 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -6,7 +6,7 @@ use bincode::{deserialize, serialize_into, serialized_size, ErrorKind}; use log::*; use serde_derive::{Deserialize, Serialize}; use solana_sdk::{ - account::Account, + account::{AccountSharedData, ReadableAccount, WritableAccount}, account_utils::State, clock::{Epoch, Slot, UnixTimestamp}, epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET, @@ -31,7 +31,7 @@ pub const MAX_LOCKOUT_HISTORY: usize = 31; pub const INITIAL_LOCKOUT: usize = 2; // Maximum number of credits history to keep around -const MAX_EPOCH_CREDITS_HISTORY: usize = 64; +pub const MAX_EPOCH_CREDITS_HISTORY: usize = 64; // Offset of VoteState::prior_voters, for determining initialization status without deserialization const DEFAULT_PRIOR_VOTERS_OFFSET: usize = 82; @@ -59,6 +59,10 @@ impl Vote { pub fn last_voted_slot(&self) -> Option { self.slots.last().copied() } + + pub fn last_voted_slot_hash(&self) -> Option<(Slot, Hash)> { + self.slots.last().copied().map(|slot| (slot, self.hash)) + } } #[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] @@ -223,13 +227,13 @@ impl VoteState { } // utility function, used by Stakes, tests - pub fn from(account: &Account) -> Option { - Self::deserialize(&account.data).ok() + pub fn from(account: &T) -> Option { + Self::deserialize(&account.data()).ok() } // utility function, used by Stakes, tests - pub fn to(versioned: &VoteStateVersions, account: &mut Account) -> Option<()> { - Self::serialize(versioned, &mut account.data).ok() + pub fn to(versioned: &VoteStateVersions, account: &mut T) -> Option<()> { + Self::serialize(versioned, &mut account.data_as_mut_slice()).ok() } pub fn deserialize(input: &[u8]) -> Result { @@ -248,7 +252,7 @@ impl VoteState { }) } - pub fn credits_from(account: &Account) -> Option { + pub fn credits_from(account: &T) -> Option { Self::from(account).map(|state| state.credits()) } @@ -747,8 +751,8 @@ pub fn create_account_with_authorized( authorized_withdrawer: &Pubkey, commission: u8, lamports: u64, -) -> Account { - let mut vote_account = Account::new(lamports, VoteState::size_of(), &id()); +) -> AccountSharedData { + let mut vote_account = AccountSharedData::new(lamports, VoteState::size_of(), &id()); let vote_state = VoteState::new( &VoteInit { @@ -772,7 +776,7 @@ pub fn create_account( node_pubkey: &Pubkey, commission: u8, lamports: u64, -) -> Account { +) -> AccountSharedData { create_account_with_authorized(node_pubkey, vote_pubkey, vote_pubkey, commission, lamports) } @@ -781,7 +785,7 @@ mod tests { use super::*; use crate::vote_state; use solana_sdk::{ - account::Account, + account::AccountSharedData, account_utils::StateMut, hash::hash, keyed_account::{get_signers, next_keyed_account}, @@ -807,11 +811,11 @@ mod tests { #[test] fn test_initialize_vote_account() { let vote_account_pubkey = solana_sdk::pubkey::new_rand(); - let vote_account = Account::new_ref(100, VoteState::size_of(), &id()); + let vote_account = AccountSharedData::new_ref(100, VoteState::size_of(), &id()); let vote_account = KeyedAccount::new(&vote_account_pubkey, false, &vote_account); let node_pubkey = solana_sdk::pubkey::new_rand(); - let node_account = RefCell::new(Account::default()); + let node_account = RefCell::new(AccountSharedData::default()); let keyed_accounts = &[]; let signers: HashSet = get_signers(keyed_accounts); @@ -864,7 +868,7 @@ mod tests { assert_eq!(res, Err(InstructionError::AccountAlreadyInitialized)); //init should fail, account is too big - let large_vote_account = Account::new_ref(100, 2 * VoteState::size_of(), &id()); + let large_vote_account = AccountSharedData::new_ref(100, 2 * VoteState::size_of(), &id()); let large_vote_account = KeyedAccount::new(&vote_account_pubkey, false, &large_vote_account); let res = initialize_account( @@ -882,7 +886,7 @@ mod tests { assert_eq!(res, Err(InstructionError::InvalidAccountData)); } - fn create_test_account() -> (Pubkey, RefCell) { + fn create_test_account() -> (Pubkey, RefCell) { let vote_pubkey = solana_sdk::pubkey::new_rand(); ( vote_pubkey, @@ -895,7 +899,8 @@ mod tests { ) } - fn create_test_account_with_authorized() -> (Pubkey, Pubkey, Pubkey, RefCell) { + fn create_test_account_with_authorized() -> (Pubkey, Pubkey, Pubkey, RefCell) + { let vote_pubkey = solana_sdk::pubkey::new_rand(); let authorized_voter = solana_sdk::pubkey::new_rand(); let authorized_withdrawer = solana_sdk::pubkey::new_rand(); @@ -916,7 +921,7 @@ mod tests { fn simulate_process_vote( vote_pubkey: &Pubkey, - vote_account: &RefCell, + vote_account: &RefCell, vote: &Vote, slot_hashes: &[SlotHash], epoch: Epoch, @@ -940,7 +945,7 @@ mod tests { /// exercises all the keyed accounts stuff fn simulate_process_vote_unchecked( vote_pubkey: &Pubkey, - vote_account: &RefCell, + vote_account: &RefCell, vote: &Vote, ) -> Result { simulate_process_vote( @@ -1035,8 +1040,8 @@ mod tests { create_test_account_with_authorized(); let node_pubkey = solana_sdk::pubkey::new_rand(); - let node_account = RefCell::new(Account::default()); - let authorized_withdrawer_account = RefCell::new(Account::default()); + let node_account = RefCell::new(AccountSharedData::default()); + let authorized_withdrawer_account = RefCell::new(AccountSharedData::default()); let keyed_accounts = &[ KeyedAccount::new(&vote_pubkey, true, &vote_account), @@ -1083,7 +1088,7 @@ mod tests { let (vote_pubkey, _authorized_voter, authorized_withdrawer, vote_account) = create_test_account_with_authorized(); - let authorized_withdrawer_account = RefCell::new(Account::default()); + let authorized_withdrawer_account = RefCell::new(AccountSharedData::default()); let keyed_accounts = &[ KeyedAccount::new(&vote_pubkey, true, &vote_account), @@ -1207,7 +1212,7 @@ mod tests { assert_eq!(res, Err(VoteError::TooSoonToReauthorize.into())); // verify authorized_voter_pubkey can authorize authorized_voter_pubkey ;) - let authorized_voter_account = RefCell::new(Account::default()); + let authorized_voter_account = RefCell::new(AccountSharedData::default()); let keyed_accounts = &[ KeyedAccount::new(&vote_pubkey, false, &vote_account), KeyedAccount::new(&authorized_voter_pubkey, true, &authorized_voter_account), @@ -1247,7 +1252,7 @@ mod tests { assert_eq!(res, Ok(())); // verify authorized_withdrawer can authorize authorized_withdrawer ;) - let withdrawer_account = RefCell::new(Account::default()); + let withdrawer_account = RefCell::new(AccountSharedData::default()); let keyed_accounts = &[ KeyedAccount::new(&vote_pubkey, false, &vote_account), KeyedAccount::new(&authorized_withdrawer_pubkey, true, &withdrawer_account), @@ -1284,7 +1289,7 @@ mod tests { assert_eq!(res, Err(InstructionError::MissingRequiredSignature)); // signed by authorized voter - let authorized_voter_account = RefCell::new(Account::default()); + let authorized_voter_account = RefCell::new(AccountSharedData::default()); let keyed_accounts = &[ KeyedAccount::new(&vote_pubkey, false, &vote_account), KeyedAccount::new(&authorized_voter_pubkey, true, &authorized_voter_account), @@ -1308,7 +1313,7 @@ mod tests { #[test] fn test_vote_without_initialization() { let vote_pubkey = solana_sdk::pubkey::new_rand(); - let vote_account = RefCell::new(Account::new(100, VoteState::size_of(), &id())); + let vote_account = RefCell::new(AccountSharedData::new(100, VoteState::size_of(), &id())); let res = simulate_process_vote_unchecked( &vote_pubkey, @@ -1646,7 +1651,7 @@ mod tests { &KeyedAccount::new( &solana_sdk::pubkey::new_rand(), false, - &RefCell::new(Account::default()), + &RefCell::new(AccountSharedData::default()), ), &signers, ); @@ -1661,14 +1666,14 @@ mod tests { &KeyedAccount::new( &solana_sdk::pubkey::new_rand(), false, - &RefCell::new(Account::default()), + &RefCell::new(AccountSharedData::default()), ), &signers, ); assert_eq!(res, Err(InstructionError::InsufficientFunds)); // all good - let to_account = RefCell::new(Account::default()); + let to_account = RefCell::new(AccountSharedData::default()); let lamports = vote_account.borrow().lamports; let keyed_accounts = &[KeyedAccount::new(&vote_pubkey, true, &vote_account)]; let signers: HashSet = get_signers(keyed_accounts); @@ -1704,7 +1709,7 @@ mod tests { assert_eq!(res, Ok(())); // withdraw using authorized_withdrawer to authorized_withdrawer's account - let withdrawer_account = RefCell::new(Account::default()); + let withdrawer_account = RefCell::new(AccountSharedData::default()); let keyed_accounts = &[ KeyedAccount::new(&vote_pubkey, false, &vote_account), KeyedAccount::new(&authorized_withdrawer_pubkey, true, &withdrawer_account), diff --git a/ramp-tps/Cargo.toml b/ramp-tps/Cargo.toml index 9ac74f0ce0..cab7369b14 100644 --- a/ramp-tps/Cargo.toml +++ b/ramp-tps/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-ramp-tps" description = "Solana Tour de SOL - TPS ramp up" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/tour-de-sol" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,16 +13,16 @@ documentation = "https://docs.rs/solana-ramp-tps" bzip2 = "0.3.3" clap = "2.33.1" log = "0.4.11" -reqwest = { version = "0.10.8", default-features = false } -serde = "1.0.118" +reqwest = { version = "0.11.2", default-features = false } +serde = "1.0.122" serde_json = "1.0.56" serde_yaml = "0.8.13" -solana-core = { path = "../core", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } -solana-net-utils = { path = "../net-utils", version = "=1.5.19" } -solana-notifier = { path = "../notifier", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } +solana-core = { path = "../core", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } +solana-net-utils = { path = "../net-utils", version = "=1.6.14" } +solana-notifier = { path = "../notifier", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } tar = "0.4.28" diff --git a/ramp-tps/src/stake.rs b/ramp-tps/src/stake.rs index e4ddc0dac9..d8d7fc170b 100644 --- a/ramp-tps/src/stake.rs +++ b/ramp-tps/src/stake.rs @@ -52,7 +52,7 @@ fn calculate_stake_warmup(mut stake_entry: StakeHistoryEntry, stake_config: &Sta fn stake_history_entry(epoch: Epoch, rpc_client: &RpcClient) -> Option { let stake_history_account = rpc_client.get_account(&stake_history::id()).ok()?; - let stake_history = from_account::(&stake_history_account)?; + let stake_history = from_account::(&stake_history_account)?; stake_history.get(&epoch).cloned() } diff --git a/rayon-threadlimit/Cargo.toml b/rayon-threadlimit/Cargo.toml index 2152f9f849..f9fa77841e 100644 --- a/rayon-threadlimit/Cargo.toml +++ b/rayon-threadlimit/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-rayon-threadlimit" -version = "1.5.19" +version = "1.6.14" description = "solana-rayon-threadlimit" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-rayon-threadlimit" diff --git a/remote-wallet/Cargo.toml b/remote-wallet/Cargo.toml index 4cbbe8b809..e59b268f63 100644 --- a/remote-wallet/Cargo.toml +++ b/remote-wallet/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-remote-wallet" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,15 +13,16 @@ documentation = "https://docs.rs/solana-remote-wallet" base32 = "0.4.0" console = "0.11.3" dialoguer = "0.6.2" -hidapi = { version = "1.2.3", default-features = false } +hidapi = { version = "1.2.5", default-features = false } log = "0.4.11" num-derive = { version = "0.3" } num-traits = { version = "0.2" } parking_lot = "0.10" +qstring = "0.7.2" semver = "0.9" -solana-sdk = { path = "../sdk", version = "=1.5.19" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } thiserror = "1.0" -url = "2.1.1" +uriparse = "0.6.3" [features] default = ["linux-static-hidraw"] diff --git a/remote-wallet/src/ledger.rs b/remote-wallet/src/ledger.rs index 19343de1b8..1aa5d1c2e2 100644 --- a/remote-wallet/src/ledger.rs +++ b/remote-wallet/src/ledger.rs @@ -1,6 +1,7 @@ use { crate::{ ledger_error::LedgerError, + locator::Manufacturer, remote_wallet::{RemoteWallet, RemoteWalletError, RemoteWalletInfo, RemoteWalletManager}, }, console::Emoji, @@ -9,7 +10,7 @@ use { num_traits::FromPrimitive, semver::Version as FirmwareVersion, solana_sdk::{derivation_path::DerivationPath, pubkey::Pubkey, signature::Signature}, - std::{cmp::min, fmt, sync::Arc}, + std::{cmp::min, convert::TryFrom, fmt, sync::Arc}, }; static CHECK_MARK: Emoji = Emoji("✅ ", ""); @@ -34,8 +35,6 @@ const MAX_CHUNK_SIZE: usize = 255; const APDU_SUCCESS_CODE: usize = 0x9000; -const SOL_DERIVATION_PATH_BE: [u8; 8] = [0x80, 0, 0, 44, 0x80, 0, 0x01, 0xF5]; // 44'/501', Solana - /// Ledger vendor ID const LEDGER_VID: u16 = 0x2c97; /// Ledger product IDs: Nano S and Nano X @@ -365,21 +364,14 @@ impl RemoteWallet for LedgerWallet { ) -> Result { let manufacturer = dev_info .manufacturer_string() - .clone() - .unwrap_or("Unknown") - .to_lowercase() - .replace(" ", "-"); + .and_then(|s| Manufacturer::try_from(s).ok()) + .unwrap_or_default(); let model = dev_info .product_string() - .clone() .unwrap_or("Unknown") .to_lowercase() .replace(" ", "-"); - let serial = dev_info - .serial_number() - .clone() - .unwrap_or("Unknown") - .to_string(); + let serial = dev_info.serial_number().unwrap_or("Unknown").to_string(); let host_device_path = dev_info.path().to_string_lossy().to_string(); let version = self.get_firmware_version()?; self.version = version; @@ -519,20 +511,16 @@ pub fn is_valid_ledger(vendor_id: u16, product_id: u16) -> bool { /// Build the derivation path byte array from a DerivationPath selection fn extend_and_serialize(derivation_path: &DerivationPath) -> Vec { - let byte = if derivation_path.change.is_some() { + let byte = if derivation_path.change().is_some() { 4 - } else if derivation_path.account.is_some() { + } else if derivation_path.account().is_some() { 3 } else { 2 }; let mut concat_derivation = vec![byte]; - concat_derivation.extend_from_slice(&SOL_DERIVATION_PATH_BE); - if let Some(account) = &derivation_path.account { - concat_derivation.extend_from_slice(&account.as_u32().to_be_bytes()); - if let Some(change) = &derivation_path.change { - concat_derivation.extend_from_slice(&change.as_u32().to_be_bytes()); - } + for index in derivation_path.path() { + concat_derivation.extend_from_slice(&index.to_bits().to_be_bytes()); } concat_derivation } diff --git a/remote-wallet/src/lib.rs b/remote-wallet/src/lib.rs index ee2e2232a0..2e58e2a2f2 100644 --- a/remote-wallet/src/lib.rs +++ b/remote-wallet/src/lib.rs @@ -1,5 +1,6 @@ #![allow(clippy::integer_arithmetic)] pub mod ledger; pub mod ledger_error; +pub mod locator; pub mod remote_keypair; pub mod remote_wallet; diff --git a/remote-wallet/src/locator.rs b/remote-wallet/src/locator.rs new file mode 100644 index 0000000000..1bb65f5a57 --- /dev/null +++ b/remote-wallet/src/locator.rs @@ -0,0 +1,403 @@ +use { + solana_sdk::pubkey::{ParsePubkeyError, Pubkey}, + std::{ + convert::{Infallible, TryFrom, TryInto}, + str::FromStr, + }, + thiserror::Error, + uriparse::{URIReference, URIReferenceBuilder, URIReferenceError}, +}; + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum Manufacturer { + Unknown, + Ledger, +} + +impl Default for Manufacturer { + fn default() -> Self { + Self::Unknown + } +} + +const MANUFACTURER_UNKNOWN: &str = "unknown"; +const MANUFACTURER_LEDGER: &str = "ledger"; + +#[derive(Clone, Debug, Error, PartialEq)] +#[error("not a manufacturer")] +pub struct ManufacturerError; + +impl From for ManufacturerError { + fn from(_: Infallible) -> Self { + ManufacturerError + } +} + +impl FromStr for Manufacturer { + type Err = ManufacturerError; + fn from_str(s: &str) -> Result { + let s = s.to_ascii_lowercase(); + match s.as_str() { + MANUFACTURER_LEDGER => Ok(Self::Ledger), + _ => Err(ManufacturerError), + } + } +} + +impl TryFrom<&str> for Manufacturer { + type Error = ManufacturerError; + fn try_from(s: &str) -> Result { + Manufacturer::from_str(s) + } +} + +impl AsRef for Manufacturer { + fn as_ref(&self) -> &str { + match self { + Self::Unknown => MANUFACTURER_UNKNOWN, + Self::Ledger => MANUFACTURER_LEDGER, + } + } +} + +impl std::fmt::Display for Manufacturer { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let s: &str = self.as_ref(); + write!(f, "{}", s) + } +} + +#[derive(Clone, Debug, Error, PartialEq)] +pub enum LocatorError { + #[error(transparent)] + ManufacturerError(#[from] ManufacturerError), + #[error(transparent)] + PubkeyError(#[from] ParsePubkeyError), + #[error(transparent)] + UriReferenceError(#[from] URIReferenceError), + #[error("unimplemented scheme")] + UnimplementedScheme, + #[error("infallible")] + Infallible, +} + +impl From for LocatorError { + fn from(_: Infallible) -> Self { + Self::Infallible + } +} + +#[derive(Debug, PartialEq)] +pub struct Locator { + pub manufacturer: Manufacturer, + pub pubkey: Option, +} + +impl std::fmt::Display for Locator { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let maybe_path = self.pubkey.map(|p| p.to_string()); + let path = maybe_path.as_deref().unwrap_or("/"); + + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("usb")) + .unwrap() + .try_authority(Some(self.manufacturer.as_ref())) + .unwrap() + .try_path(path) + .unwrap(); + + let uri = builder.build().unwrap(); + write!(f, "{}", uri) + } +} + +impl Locator { + pub fn new_from_path>(path: P) -> Result { + let path = path.as_ref(); + let uri = URIReference::try_from(path)?; + Self::new_from_uri(&uri) + } + + pub fn new_from_uri(uri: &URIReference<'_>) -> Result { + let scheme = uri.scheme().map(|s| s.as_str().to_ascii_lowercase()); + let host = uri.host().map(|h| h.to_string()); + match (scheme, host) { + (Some(scheme), Some(host)) if scheme == "usb" => { + let path = uri.path().segments().get(0).and_then(|s| { + if !s.is_empty() { + Some(s.as_str()) + } else { + None + } + }); + Self::new_from_parts(host.as_str(), path) + } + (Some(_scheme), Some(_host)) => Err(LocatorError::UnimplementedScheme), + (None, Some(_host)) => Err(LocatorError::UnimplementedScheme), + (_, None) => Err(LocatorError::ManufacturerError(ManufacturerError)), + } + } + + pub fn new_from_parts( + manufacturer: V, + pubkey: Option

, + ) -> Result + where + VE: Into, + V: TryInto, + PE: Into, + P: TryInto, + { + let manufacturer = manufacturer.try_into().map_err(|e| e.into())?; + let pubkey = if let Some(pubkey) = pubkey { + Some(pubkey.try_into().map_err(|e| e.into())?) + } else { + None + }; + Ok(Self { + manufacturer, + pubkey, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_manufacturer() { + assert_eq!(MANUFACTURER_LEDGER.try_into(), Ok(Manufacturer::Ledger)); + assert!( + matches!(Manufacturer::from_str(MANUFACTURER_LEDGER), Ok(v) if v == Manufacturer::Ledger) + ); + assert_eq!(Manufacturer::Ledger.as_ref(), MANUFACTURER_LEDGER); + + assert!( + matches!(Manufacturer::from_str("bad-manufacturer"), Err(e) if e == ManufacturerError) + ); + } + + #[test] + fn test_locator_new_from_parts() { + let manufacturer = Manufacturer::Ledger; + let manufacturer_str = "ledger"; + let pubkey = Pubkey::new_unique(); + let pubkey_str = pubkey.to_string(); + + let expect = Locator { + manufacturer, + pubkey: None, + }; + assert!(matches!( + Locator::new_from_parts(manufacturer, None::), + Ok(e) if e == expect, + )); + assert!(matches!( + Locator::new_from_parts(manufacturer_str, None::), + Ok(e) if e == expect, + )); + + let expect = Locator { + manufacturer, + pubkey: Some(pubkey), + }; + assert!(matches!( + Locator::new_from_parts(manufacturer, Some(pubkey)), + Ok(e) if e == expect, + )); + assert!(matches!( + Locator::new_from_parts(manufacturer_str, Some(pubkey_str.as_str())), + Ok(e) if e == expect, + )); + + assert!(matches!( + Locator::new_from_parts("bad-manufacturer", None::), + Err(LocatorError::ManufacturerError(e)) if e == ManufacturerError, + )); + assert!(matches!( + Locator::new_from_parts(manufacturer, Some("bad-pubkey")), + Err(LocatorError::PubkeyError(e)) if e == ParsePubkeyError::Invalid, + )); + } + + #[test] + fn test_locator_new_from_uri() { + let manufacturer = Manufacturer::Ledger; + let pubkey = Pubkey::new_unique(); + let pubkey_str = pubkey.to_string(); + + // usb://ledger/{PUBKEY}?key=0/0 + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("usb")) + .unwrap() + .try_authority(Some(Manufacturer::Ledger.as_ref())) + .unwrap() + .try_path(pubkey_str.as_str()) + .unwrap() + .try_query(Some("key=0/0")) + .unwrap(); + let uri = builder.build().unwrap(); + let expect = Locator { + manufacturer, + pubkey: Some(pubkey), + }; + assert_eq!(Locator::new_from_uri(&uri), Ok(expect)); + + // usb://ledger/{PUBKEY} + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("usb")) + .unwrap() + .try_authority(Some(Manufacturer::Ledger.as_ref())) + .unwrap() + .try_path(pubkey_str.as_str()) + .unwrap(); + let uri = builder.build().unwrap(); + let expect = Locator { + manufacturer, + pubkey: Some(pubkey), + }; + assert_eq!(Locator::new_from_uri(&uri), Ok(expect)); + + // usb://ledger + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("usb")) + .unwrap() + .try_authority(Some(Manufacturer::Ledger.as_ref())) + .unwrap() + .try_path("") + .unwrap(); + let uri = builder.build().unwrap(); + let expect = Locator { + manufacturer, + pubkey: None, + }; + assert_eq!(Locator::new_from_uri(&uri), Ok(expect)); + + // usb://ledger/ + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("usb")) + .unwrap() + .try_authority(Some(Manufacturer::Ledger.as_ref())) + .unwrap() + .try_path("/") + .unwrap(); + let uri = builder.build().unwrap(); + let expect = Locator { + manufacturer, + pubkey: None, + }; + assert_eq!(Locator::new_from_uri(&uri), Ok(expect)); + + // bad-scheme://ledger + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("bad-scheme")) + .unwrap() + .try_authority(Some(Manufacturer::Ledger.as_ref())) + .unwrap() + .try_path("") + .unwrap(); + let uri = builder.build().unwrap(); + assert_eq!( + Locator::new_from_uri(&uri), + Err(LocatorError::UnimplementedScheme) + ); + + // usb://bad-manufacturer + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("usb")) + .unwrap() + .try_authority(Some("bad-manufacturer")) + .unwrap() + .try_path("") + .unwrap(); + let uri = builder.build().unwrap(); + assert_eq!( + Locator::new_from_uri(&uri), + Err(LocatorError::ManufacturerError(ManufacturerError)) + ); + + // usb://ledger/bad-pubkey + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("usb")) + .unwrap() + .try_authority(Some(Manufacturer::Ledger.as_ref())) + .unwrap() + .try_path("bad-pubkey") + .unwrap(); + let uri = builder.build().unwrap(); + assert_eq!( + Locator::new_from_uri(&uri), + Err(LocatorError::PubkeyError(ParsePubkeyError::Invalid)) + ); + } + + #[test] + fn test_locator_new_from_path() { + let manufacturer = Manufacturer::Ledger; + let pubkey = Pubkey::new_unique(); + let path = format!("usb://ledger/{}?key=0/0", pubkey); + Locator::new_from_path(path).unwrap(); + + // usb://ledger/{PUBKEY}?key=0'/0' + let path = format!("usb://ledger/{}?key=0'/0'", pubkey); + let expect = Locator { + manufacturer, + pubkey: Some(pubkey), + }; + assert_eq!(Locator::new_from_path(path), Ok(expect)); + + // usb://ledger/{PUBKEY} + let path = format!("usb://ledger/{}", pubkey); + let expect = Locator { + manufacturer, + pubkey: Some(pubkey), + }; + assert_eq!(Locator::new_from_path(path), Ok(expect)); + + // usb://ledger + let path = "usb://ledger"; + let expect = Locator { + manufacturer, + pubkey: None, + }; + assert_eq!(Locator::new_from_path(path), Ok(expect)); + + // usb://ledger/ + let path = "usb://ledger/"; + let expect = Locator { + manufacturer, + pubkey: None, + }; + assert_eq!(Locator::new_from_path(path), Ok(expect)); + + // bad-scheme://ledger + let path = "bad-scheme://ledger"; + assert_eq!( + Locator::new_from_path(path), + Err(LocatorError::UnimplementedScheme) + ); + + // usb://bad-manufacturer + let path = "usb://bad-manufacturer"; + assert_eq!( + Locator::new_from_path(path), + Err(LocatorError::ManufacturerError(ManufacturerError)) + ); + + // usb://ledger/bad-pubkey + let path = "usb://ledger/bad-pubkey"; + assert_eq!( + Locator::new_from_path(path), + Err(LocatorError::PubkeyError(ParsePubkeyError::Invalid)) + ); + } +} diff --git a/remote-wallet/src/remote_keypair.rs b/remote-wallet/src/remote_keypair.rs index 9028b7a6ce..28ccbe83c4 100644 --- a/remote-wallet/src/remote_keypair.rs +++ b/remote-wallet/src/remote_keypair.rs @@ -1,6 +1,7 @@ use { crate::{ ledger::get_ledger_from_info, + locator::{Locator, Manufacturer}, remote_wallet::{ RemoteWallet, RemoteWalletError, RemoteWalletInfo, RemoteWalletManager, RemoteWalletType, @@ -55,13 +56,14 @@ impl Signer for RemoteKeypair { } pub fn generate_remote_keypair( - path: String, + locator: Locator, + derivation_path: DerivationPath, wallet_manager: &RemoteWalletManager, confirm_key: bool, keypair_name: &str, ) -> Result { - let (remote_wallet_info, derivation_path) = RemoteWalletInfo::parse_path(path)?; - if remote_wallet_info.manufacturer == "ledger" { + let remote_wallet_info = RemoteWalletInfo::parse_locator(locator); + if remote_wallet_info.manufacturer == Manufacturer::Ledger { let ledger = get_ledger_from_info(remote_wallet_info, keypair_name, wallet_manager)?; let path = format!("{}{}", ledger.pretty_path, derivation_path.get_query()); Ok(RemoteKeypair::new( diff --git a/remote-wallet/src/remote_wallet.rs b/remote-wallet/src/remote_wallet.rs index a676e51bf4..afa5abc2f5 100644 --- a/remote-wallet/src/remote_wallet.rs +++ b/remote-wallet/src/remote_wallet.rs @@ -2,21 +2,20 @@ use { crate::{ ledger::{is_valid_ledger, LedgerWallet}, ledger_error::LedgerError, + locator::{Locator, LocatorError, Manufacturer}, }, log::*, parking_lot::{Mutex, RwLock}, solana_sdk::{ - derivation_path::{DerivationPath, DerivationPathComponent, DerivationPathError}, + derivation_path::{DerivationPath, DerivationPathError}, pubkey::Pubkey, signature::{Signature, SignerError}, }, std::{ - str::FromStr, sync::Arc, time::{Duration, Instant}, }, thiserror::Error, - url::Url, }; const HID_GLOBAL_USAGE_PAGE: u16 = 0xFF00; @@ -57,6 +56,9 @@ pub enum RemoteWalletError { #[error("remote wallet operation rejected by the user")] UserCancel, + + #[error(transparent)] + LocatorError(#[from] LocatorError), } impl From for RemoteWalletError { @@ -211,7 +213,7 @@ pub trait RemoteWallet { confirm_key: bool, ) -> Result; - /// Sign transaction data with wallet managing pubkey at derivation path m/44'/5655640'/'/'. + /// Sign transaction data with wallet managing pubkey at derivation path m/44'/501'/'/'. fn sign_message( &self, derivation_path: &DerivationPath, @@ -239,7 +241,7 @@ pub struct RemoteWalletInfo { /// RemoteWallet device model pub model: String, /// RemoteWallet device manufacturer - pub manufacturer: String, + pub manufacturer: Manufacturer, /// RemoteWallet device serial number pub serial: String, /// RemoteWallet host device path @@ -251,80 +253,12 @@ pub struct RemoteWalletInfo { } impl RemoteWalletInfo { - pub fn parse_path(path: String) -> Result<(Self, DerivationPath), RemoteWalletError> { - let wallet_path = Url::parse(&path).map_err(|e| { - Into::::into(DerivationPathError::InvalidDerivationPath(format!( - "parse error: {:?}", - e - ))) - })?; - - if wallet_path.host_str().is_none() { - return Err(DerivationPathError::InvalidDerivationPath( - "missing remote wallet type".to_string(), - ) - .into()); - } - - let mut wallet_info = RemoteWalletInfo { - manufacturer: wallet_path.host_str().unwrap().to_string(), + pub fn parse_locator(locator: Locator) -> Self { + RemoteWalletInfo { + manufacturer: locator.manufacturer, + pubkey: locator.pubkey.unwrap_or_default(), ..RemoteWalletInfo::default() - }; - - if let Some(wallet_id) = wallet_path.path_segments().map(|c| c.collect::>()) { - if !wallet_id[0].is_empty() { - wallet_info.pubkey = Pubkey::from_str(wallet_id[0]).map_err(|e| { - Into::::into(DerivationPathError::InvalidDerivationPath( - format!("pubkey from_str error: {:?}", e), - )) - })?; - } - } - - let mut derivation_path = DerivationPath::default(); - let mut query_pairs = wallet_path.query_pairs(); - if query_pairs.count() > 0 { - for _ in 0..query_pairs.count() { - if let Some(mut pair) = query_pairs.next() { - if pair.0 == "key" { - let key_path = pair.1.to_mut(); - let _key_path = key_path.clone(); - if key_path.ends_with('/') { - key_path.pop(); - } - let mut parts = key_path.split('/'); - if let Some(account) = parts.next() { - derivation_path.account = - Some(DerivationPathComponent::from_str(account)?); - } - if let Some(change) = parts.next() { - derivation_path.change = - Some(DerivationPathComponent::from_str(change)?); - } - if parts.next().is_some() { - return Err(DerivationPathError::InvalidDerivationPath(format!( - "key path `{}` too deep, only / supported", - _key_path - )) - .into()); - } - } else { - return Err(DerivationPathError::InvalidDerivationPath(format!( - "invalid query string `{}={}`, only `key` supported", - pair.0, pair.1 - )) - .into()); - } - } - if query_pairs.next().is_some() { - return Err(DerivationPathError::InvalidDerivationPath( - "invalid query string, extra fields not supported".to_string(), - ) - .into()); - } - } } - Ok((wallet_info, derivation_path)) } pub fn get_pretty_path(&self) -> String { @@ -366,151 +300,43 @@ mod tests { use super::*; #[test] - fn test_parse_path() { + fn test_parse_locator() { let pubkey = solana_sdk::pubkey::new_rand(); - let (wallet_info, derivation_path) = - RemoteWalletInfo::parse_path(format!("usb://ledger/{:?}?key=1/2", pubkey)).unwrap(); - assert!(wallet_info.matches(&RemoteWalletInfo { - model: "nano-s".to_string(), - manufacturer: "ledger".to_string(), - serial: "".to_string(), - host_device_path: "/host/device/path".to_string(), - pubkey, - error: None, - })); - assert_eq!( - derivation_path, - DerivationPath { - account: Some(1.into()), - change: Some(2.into()), - } - ); - let (wallet_info, derivation_path) = - RemoteWalletInfo::parse_path(format!("usb://ledger/{:?}?key=1'/2'", pubkey)).unwrap(); - assert!(wallet_info.matches(&RemoteWalletInfo { - model: "nano-s".to_string(), - manufacturer: "ledger".to_string(), - serial: "".to_string(), - host_device_path: "/host/device/path".to_string(), - pubkey, - error: None, - })); - assert_eq!( - derivation_path, - DerivationPath { - account: Some(1.into()), - change: Some(2.into()), - } - ); - let (wallet_info, derivation_path) = - RemoteWalletInfo::parse_path(format!("usb://ledger/{:?}?key=1\'/2\'", pubkey)).unwrap(); - assert!(wallet_info.matches(&RemoteWalletInfo { - model: "nano-s".to_string(), - manufacturer: "ledger".to_string(), - serial: "".to_string(), - host_device_path: "/host/device/path".to_string(), - pubkey, - error: None, - })); - assert_eq!( - derivation_path, - DerivationPath { - account: Some(1.into()), - change: Some(2.into()), - } - ); - let (wallet_info, derivation_path) = - RemoteWalletInfo::parse_path(format!("usb://ledger/{:?}?key=1/2/", pubkey)).unwrap(); - assert!(wallet_info.matches(&RemoteWalletInfo { - model: "nano-s".to_string(), - manufacturer: "ledger".to_string(), - serial: "".to_string(), - host_device_path: "/host/device/path".to_string(), - pubkey, - error: None, - })); - assert_eq!( - derivation_path, - DerivationPath { - account: Some(1.into()), - change: Some(2.into()), - } - ); - let (wallet_info, derivation_path) = - RemoteWalletInfo::parse_path(format!("usb://ledger/{:?}?key=1/", pubkey)).unwrap(); + let locator = Locator { + manufacturer: Manufacturer::Ledger, + pubkey: Some(pubkey), + }; + let wallet_info = RemoteWalletInfo::parse_locator(locator); assert!(wallet_info.matches(&RemoteWalletInfo { model: "nano-s".to_string(), - manufacturer: "ledger".to_string(), + manufacturer: Manufacturer::Ledger, serial: "".to_string(), host_device_path: "/host/device/path".to_string(), pubkey, error: None, })); - assert_eq!( - derivation_path, - DerivationPath { - account: Some(1.into()), - change: None, - } - ); - // Test that wallet id need not be complete for key derivation to work - let (wallet_info, derivation_path) = - RemoteWalletInfo::parse_path("usb://ledger?key=1".to_string()).unwrap(); + // Test that pubkey need not be populated + let locator = Locator { + manufacturer: Manufacturer::Ledger, + pubkey: None, + }; + let wallet_info = RemoteWalletInfo::parse_locator(locator); assert!(wallet_info.matches(&RemoteWalletInfo { model: "nano-s".to_string(), - manufacturer: "ledger".to_string(), + manufacturer: Manufacturer::Ledger, serial: "".to_string(), host_device_path: "/host/device/path".to_string(), pubkey: Pubkey::default(), error: None, })); - assert_eq!( - derivation_path, - DerivationPath { - account: Some(1.into()), - change: None, - } - ); - let (wallet_info, derivation_path) = - RemoteWalletInfo::parse_path("usb://ledger/?key=1/2".to_string()).unwrap(); - assert!(wallet_info.matches(&RemoteWalletInfo { - model: "".to_string(), - manufacturer: "ledger".to_string(), - serial: "".to_string(), - host_device_path: "/host/device/path".to_string(), - pubkey: Pubkey::default(), - error: None, - })); - assert_eq!( - derivation_path, - DerivationPath { - account: Some(1.into()), - change: Some(2.into()), - } - ); - - // Failure cases - assert!( - RemoteWalletInfo::parse_path("usb://ledger/bad-pubkey?key=1/2".to_string()).is_err() - ); - assert!(RemoteWalletInfo::parse_path("usb://?key=1/2".to_string()).is_err()); - assert!(RemoteWalletInfo::parse_path("usb:/ledger?key=1/2".to_string()).is_err()); - assert!(RemoteWalletInfo::parse_path("ledger?key=1/2".to_string()).is_err()); - assert!(RemoteWalletInfo::parse_path("usb://ledger?key=1/2/3".to_string()).is_err()); - // Other query strings cause an error - assert!( - RemoteWalletInfo::parse_path("usb://ledger/?key=1/2&test=other".to_string()).is_err() - ); - assert!(RemoteWalletInfo::parse_path("usb://ledger/?Key=1/2".to_string()).is_err()); - assert!(RemoteWalletInfo::parse_path("usb://ledger/?test=other".to_string()).is_err()); } #[test] fn test_remote_wallet_info_matches() { let pubkey = solana_sdk::pubkey::new_rand(); let info = RemoteWalletInfo { - manufacturer: "Ledger".to_string(), + manufacturer: Manufacturer::Ledger, model: "Nano S".to_string(), serial: "0001".to_string(), host_device_path: "/host/device/path".to_string(), @@ -518,11 +344,11 @@ mod tests { error: None, }; let mut test_info = RemoteWalletInfo { - manufacturer: "Not Ledger".to_string(), + manufacturer: Manufacturer::Unknown, ..RemoteWalletInfo::default() }; assert!(!info.matches(&test_info)); - test_info.manufacturer = "Ledger".to_string(); + test_info.manufacturer = Manufacturer::Ledger; assert!(info.matches(&test_info)); test_info.model = "Other".to_string(); assert!(info.matches(&test_info)); @@ -543,7 +369,7 @@ mod tests { let pubkey_str = pubkey.to_string(); let remote_wallet_info = RemoteWalletInfo { model: "nano-s".to_string(), - manufacturer: "ledger".to_string(), + manufacturer: Manufacturer::Ledger, serial: "".to_string(), host_device_path: "/host/device/path".to_string(), pubkey, diff --git a/run.sh b/run.sh index 477877a8f6..2f63877313 100755 --- a/run.sh +++ b/run.sh @@ -73,9 +73,9 @@ else --hashes-per-tick sleep \ --faucet-lamports 500000000000000000 \ --bootstrap-validator \ - "$dataDir"/validator-identity.json \ - "$dataDir"/validator-vote-account.json \ - "$dataDir"/validator-stake-account.json \ + "$validator_identity" \ + "$validator_vote_account" \ + "$validator_stake_account" \ --ledger "$ledgerDir" \ --cluster-type "$SOLANA_RUN_SH_CLUSTER_TYPE" \ $SPL_GENESIS_ARGS \ @@ -104,14 +104,14 @@ args=( --rpc-port 8899 --rpc-faucet-address 127.0.0.1:9900 --log - - --enable-rpc-exit --enable-rpc-transaction-history --enable-cpi-and-log-storage --init-complete-file "$dataDir"/init-completed - --snapshot-compression gzip + --snapshot-compression none --accounts-db-caching-enabled --snapshot-interval-slots 100 --require-tower + --no-wait-for-vote-to-start-leader --account-index program-id --account-index spl-token-owner --account-index spl-token-mint diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 86d0358687..075ae9adf3 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-runtime" -version = "1.5.19" +version = "1.6.14" description = "Solana runtime" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -21,7 +21,6 @@ crossbeam-channel = "0.4" dir-diff = "0.3.2" flate2 = "1.0.14" fnv = "1.0.7" -fs_extra = "1.2.0" itertools = "0.9.0" lazy_static = "1.4.0" libc = "0.2.81" @@ -33,30 +32,31 @@ num-traits = { version = "0.2" } num_cpus = "1.13.0" ouroboros = "0.5.1" rand = "0.7.0" -rayon = "1.4.1" +rayon = "1.5.0" regex = "1.3.9" -serde = { version = "1.0.118", features = ["rc"] } +serde = { version = "1.0.122", features = ["rc"] } serde_derive = "1.0.103" -solana-config-program = { path = "../programs/config", version = "=1.5.19" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.5.19" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-measure = { path = "../measure", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } -solana-vote-program = { path = "../programs/vote", version = "=1.5.19" } -solana-secp256k1-program = { path = "../programs/secp256k1", version = "=1.5.19" } -velas-account-program = { path = "../programs/velas-account-program" } -solana-evm-loader-program = { path = "../evm-utils/programs/evm_loader" } +solana-config-program = { path = "../programs/config", version = "=1.6.14" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.6.14" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-measure = { path = "../measure", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } +solana-vote-program = { path = "../programs/vote", version = "=1.6.14" } +solana-secp256k1-program = { path = "../programs/secp256k1", version = "=1.6.14" } symlink = "0.1.0" tar = "0.4.28" tempfile = "3.1.0" thiserror = "1.0" zstd = "0.5.1" + evm-state = { path = "../evm-utils/evm-state" } evm-rpc = { path = "../evm-utils/evm-rpc" } +solana-evm-loader-program = { path = "../evm-utils/programs/evm_loader" } +velas-account-program = { path = "../programs/velas-account-program" } [lib] @@ -65,8 +65,7 @@ name = "solana_runtime" [dev-dependencies] assert_matches = "1.3.0" -solana-noop-program = { path = "../programs/noop", version = "=1.5.19" } -solana-sleep-program = { path = "../programs/sleep", version = "1.5.14" } +solana-noop-program = { path = "../programs/noop", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index e1a207c7d6..5fd759c590 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -8,10 +8,11 @@ use rand::Rng; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use solana_runtime::{ accounts::{create_test_accounts, AccountAddressFilter, Accounts}, + accounts_index::AccountSecondaryIndexes, bank::*, }; use solana_sdk::{ - account::Account, + account::AccountSharedData, genesis_config::{create_genesis_config, ClusterType}, hash::Hash, pubkey::Pubkey, @@ -22,13 +23,13 @@ use std::{ sync::{Arc, RwLock}, thread::Builder, }; -use tempfile::TempDir; use test::Bencher; fn deposit_many(bank: &Bank, pubkeys: &mut Vec, num: usize) { for t in 0..num { let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new((t + 1) as u64, 0, &Account::default().owner); + let account = + AccountSharedData::new((t + 1) as u64, 0, &AccountSharedData::default().owner); pubkeys.push(pubkey); assert!(bank.get_account(&pubkey).is_none()); bank.deposit(&pubkey, (t + 1) as u64); @@ -47,15 +48,13 @@ fn bench_has_duplicates(bencher: &mut Bencher) { #[bench] fn test_accounts_create(bencher: &mut Bencher) { let (genesis_config, _) = create_genesis_config(10_000); - let evm_state_dir = TempDir::new().unwrap(); let bank0 = Bank::new_with_paths( &genesis_config, - Some((evm_state_dir.as_ref(), evm_state_dir.as_ref())), vec![PathBuf::from("bench_a0")], &[], None, None, - HashSet::new(), + AccountSecondaryIndexes::default(), false, ); bencher.iter(|| { @@ -68,15 +67,13 @@ fn test_accounts_create(bencher: &mut Bencher) { fn test_accounts_squash(bencher: &mut Bencher) { let (mut genesis_config, _) = create_genesis_config(100_000); genesis_config.rent.burn_percent = 100; // Avoid triggering an assert in Bank::distribute_rent_to_validators() - let evm_state_dir = TempDir::new().unwrap(); let mut prev_bank = Arc::new(Bank::new_with_paths( &genesis_config, - Some((evm_state_dir.as_ref(), evm_state_dir.as_ref())), vec![PathBuf::from("bench_a1")], &[], None, None, - HashSet::new(), + AccountSecondaryIndexes::default(), false, )); let mut pubkeys: Vec = vec![]; @@ -101,7 +98,7 @@ fn test_accounts_hash_bank_hash(bencher: &mut Bencher) { let accounts = Accounts::new_with_config( vec![PathBuf::from("bench_accounts_hash_internal")], &ClusterType::Development, - HashSet::new(), + AccountSecondaryIndexes::default(), false, ); let mut pubkeys: Vec = vec![]; @@ -119,7 +116,7 @@ fn test_update_accounts_hash(bencher: &mut Bencher) { let accounts = Accounts::new_with_config( vec![PathBuf::from("update_accounts_hash")], &ClusterType::Development, - HashSet::new(), + AccountSecondaryIndexes::default(), false, ); let mut pubkeys: Vec = vec![]; @@ -136,7 +133,7 @@ fn test_accounts_delta_hash(bencher: &mut Bencher) { let accounts = Accounts::new_with_config( vec![PathBuf::from("accounts_delta_hash")], &ClusterType::Development, - HashSet::new(), + AccountSecondaryIndexes::default(), false, ); let mut pubkeys: Vec = vec![]; @@ -152,14 +149,15 @@ fn bench_delete_dependencies(bencher: &mut Bencher) { let accounts = Accounts::new_with_config( vec![PathBuf::from("accounts_delete_deps")], &ClusterType::Development, - HashSet::new(), + AccountSecondaryIndexes::default(), false, ); let mut old_pubkey = Pubkey::default(); - let zero_account = Account::new(0, 0, &Account::default().owner); + let zero_account = AccountSharedData::new(0, 0, &AccountSharedData::default().owner); for i in 0..1000 { let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new((i + 1) as u64, 0, &Account::default().owner); + let account = + AccountSharedData::new((i + 1) as u64, 0, &AccountSharedData::default().owner); accounts.store_slow_uncached(i, &pubkey, &account); accounts.store_slow_uncached(i, &old_pubkey, &zero_account); old_pubkey = pubkey; @@ -184,7 +182,7 @@ fn store_accounts_with_possible_contention( .join(bench_name), ], &ClusterType::Development, - HashSet::new(), + AccountSecondaryIndexes::default(), false, )); let num_keys = 1000; @@ -194,7 +192,7 @@ fn store_accounts_with_possible_contention( (0..num_keys) .map(|_| { let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new(1, 0, &Account::default().owner); + let account = AccountSharedData::new(1, 0, &AccountSharedData::default().owner); accounts.store_slow_uncached(slot, &pubkey, &account); pubkey }) @@ -214,7 +212,7 @@ fn store_accounts_with_possible_contention( let num_new_keys = 1000; let new_accounts: Vec<_> = (0..num_new_keys) - .map(|_| Account::new(1, 0, &Account::default().owner)) + .map(|_| AccountSharedData::new(1, 0, &AccountSharedData::default().owner)) .collect(); bencher.iter(|| { for account in &new_accounts { @@ -246,7 +244,9 @@ fn bench_concurrent_read_write(bencher: &mut Bencher) { #[ignore] fn bench_concurrent_scan_write(bencher: &mut Bencher) { store_accounts_with_possible_contention("concurrent_scan_write", bencher, |accounts, _| loop { - test::black_box(accounts.load_by_program(&HashMap::new(), &Account::default().owner)); + test::black_box( + accounts.load_by_program(&HashMap::new(), &AccountSharedData::default().owner), + ); }) } @@ -300,14 +300,14 @@ fn bench_rwlock_hashmap_single_reader_with_n_writers(bencher: &mut Bencher) { }) } -fn setup_bench_dashmap_iter() -> (Arc, DashMap) { +fn setup_bench_dashmap_iter() -> (Arc, DashMap) { let accounts = Arc::new(Accounts::new_with_config( vec![ PathBuf::from(std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string())) .join("bench_dashmap_par_iter"), ], &ClusterType::Development, - HashSet::new(), + AccountSecondaryIndexes::default(), false, )); @@ -319,7 +319,7 @@ fn setup_bench_dashmap_iter() -> (Arc, DashMap, -} - #[derive(Debug, Default, AbiExample)] pub struct AccountLocks { write_locks: HashSet, @@ -88,12 +84,6 @@ impl AccountLocks { /// This structure handles synchronization for db #[derive(Default, Debug, AbiExample)] pub struct Accounts { - /// my slot - pub slot: Slot, - - /// my epoch - pub epoch: Epoch, - /// Single global AccountsDb pub accounts_db: Arc, @@ -103,20 +93,20 @@ pub struct Accounts { } // for the load instructions -pub type TransactionAccounts = Vec; -pub type TransactionAccountDeps = Vec<(Pubkey, Account)>; +pub type TransactionAccounts = Vec; +pub type TransactionAccountDeps = Vec<(Pubkey, AccountSharedData)>; pub type TransactionRent = u64; -pub type TransactionLoaders = Vec>; - -pub type TransactionLoadResult = ( - Result<( - TransactionAccounts, - TransactionAccountDeps, - TransactionLoaders, - TransactionRent, - )>, - Option, -); +pub type TransactionLoaders = Vec>; +#[derive(PartialEq, Debug, Clone)] +pub struct LoadedTransaction { + pub accounts: TransactionAccounts, + pub account_deps: TransactionAccountDeps, + pub loaders: TransactionLoaders, + pub rent: TransactionRent, + pub rent_debits: RentDebits, +} + +pub type TransactionLoadResult = (Result, Option); pub enum AccountAddressFilter { Exclude, // exclude all addresses matching the filter @@ -125,13 +115,18 @@ pub enum AccountAddressFilter { impl Accounts { pub fn new(paths: Vec, cluster_type: &ClusterType) -> Self { - Self::new_with_config(paths, cluster_type, HashSet::new(), false) + Self::new_with_config( + paths, + cluster_type, + AccountSecondaryIndexes::default(), + false, + ) } pub fn new_with_config( paths: Vec, cluster_type: &ClusterType, - account_indexes: HashSet, + account_indexes: AccountSecondaryIndexes, caching_enabled: bool, ) -> Self { Self { @@ -142,16 +137,13 @@ impl Accounts { caching_enabled, )), account_locks: Mutex::new(AccountLocks::default()), - ..Self::default() } } - pub fn new_from_parent(parent: &Accounts, slot: Slot, parent_slot: Slot, epoch: Epoch) -> Self { + pub fn new_from_parent(parent: &Accounts, slot: Slot, parent_slot: Slot) -> Self { let accounts_db = parent.accounts_db.clone(); accounts_db.set_hash(slot, parent_slot); Self { - slot, - epoch, accounts_db, account_locks: Mutex::new(AccountLocks::default()), } @@ -161,7 +153,6 @@ impl Accounts { Self { accounts_db: Arc::new(accounts_db), account_locks: Mutex::new(AccountLocks::default()), - ..Self::default() } } @@ -178,18 +169,20 @@ impl Accounts { false } - fn construct_instructions_account(message: &Message) -> Account { - let mut account = Account { - data: message.serialize_instructions(), - ..Account::default() - }; - + fn construct_instructions_account( + message: &Message, + demote_sysvar_write_locks: bool, + ) -> AccountSharedData { + let mut data = message.serialize_instructions(demote_sysvar_write_locks); // add room for current instruction index. - account.data.resize(account.data.len() + 2, 0); - account + data.resize(data.len() + 2, 0); + AccountSharedData::from(Account { + data, + ..Account::default() + }) } - fn load_tx_accounts( + fn load_transaction( &self, ancestors: &Ancestors, tx: &Transaction, @@ -197,7 +190,7 @@ impl Accounts { error_counters: &mut ErrorCounters, rent_collector: &RentCollector, feature_set: &FeatureSet, - ) -> Result<(TransactionAccounts, TransactionAccountDeps, TransactionRent)> { + ) -> Result { // Copy all the accounts let message = tx.message(); if tx.signatures.is_empty() && fee != 0 { @@ -209,6 +202,9 @@ impl Accounts { let mut tx_rent: TransactionRent = 0; let mut accounts = Vec::with_capacity(message.account_keys.len()); let mut account_deps = Vec::with_capacity(message.account_keys.len()); + let demote_sysvar_write_locks = + feature_set.is_active(&feature_set::demote_sysvar_write_locks::id()); + let mut rent_debits = RentDebits::default(); for (i, key) in message.account_keys.iter().enumerate() { let account = if message.is_non_loader_key(key, i) { @@ -219,16 +215,16 @@ impl Accounts { if solana_sdk::sysvar::instructions::check_id(key) && feature_set.is_active(&feature_set::instructions_sysvar_enabled::id()) { - if message.is_writable(i) { + if message.is_writable(i, demote_sysvar_write_locks) { return Err(TransactionError::InvalidAccountIndex); } - Self::construct_instructions_account(message) + Self::construct_instructions_account(message, demote_sysvar_write_locks) } else { let (account, rent) = self .accounts_db .load(ancestors, key) .map(|(mut account, _)| { - if message.is_writable(i) { + if message.is_writable(i, demote_sysvar_write_locks) { let rent_due = rent_collector .collect_from_existing_account(&key, &mut account); (account, rent_due) @@ -261,11 +257,13 @@ impl Accounts { } tx_rent += rent; + rent_debits.push(key, rent, account.lamports); + account } } else { // Fill in an empty account for the program slots. - Account::default() + AccountSharedData::default() }; accounts.push(account); } @@ -297,7 +295,31 @@ impl Accounts { Err(TransactionError::InsufficientFundsForFee) } else { accounts[payer_index].lamports -= fee; - Ok((accounts, account_deps, tx_rent)) + + let message = tx.message(); + let loaders = message + .instructions + .iter() + .map(|ix| { + if message.account_keys.len() <= ix.program_id_index as usize { + error_counters.account_not_found += 1; + return Err(TransactionError::AccountNotFound); + } + let program_id = message.account_keys[ix.program_id_index as usize]; + self.load_executable_accounts( + ancestors, + &program_id, + error_counters, + ) + }) + .collect::>()?; + Ok(LoadedTransaction { + accounts, + account_deps, + loaders, + rent: tx_rent, + rent_debits, + }) } } } else { @@ -312,7 +334,7 @@ impl Accounts { ancestors: &Ancestors, program_id: &Pubkey, error_counters: &mut ErrorCounters, - ) -> Result> { + ) -> Result> { let mut accounts = Vec::new(); let mut depth = 0; let mut program_id = *program_id; @@ -375,28 +397,6 @@ impl Accounts { Ok(accounts) } - /// For each program_id in the transaction, load its loaders. - fn load_loaders( - &self, - ancestors: &Ancestors, - tx: &Transaction, - error_counters: &mut ErrorCounters, - ) -> Result { - let message = tx.message(); - message - .instructions - .iter() - .map(|ix| { - if message.account_keys.len() <= ix.program_id_index as usize { - error_counters.account_not_found += 1; - return Err(TransactionError::AccountNotFound); - } - let program_id = message.account_keys[ix.program_id_index as usize]; - self.load_executable_accounts(ancestors, &program_id, error_counters) - }) - .collect() - } - pub fn load_accounts<'a>( &self, ancestors: &Ancestors, @@ -428,22 +428,15 @@ impl Accounts { return (Err(TransactionError::BlockhashNotFound), None); }; - let load_res = self.load_tx_accounts( + let loaded_transaction = match self.load_transaction( ancestors, tx, fee, error_counters, rent_collector, feature_set, - ); - let (accounts, account_deps, rents) = match load_res { - Ok((a, d, r)) => (a, d, r), - Err(e) => return (Err(e), None), - }; - - let load_res = self.load_loaders(ancestors, tx, error_counters); - let loaders = match load_res { - Ok(loaders) => loaders, + ) { + Ok(loaded_transaction) => loaded_transaction, Err(e) => return (Err(e), None), }; @@ -452,7 +445,7 @@ impl Accounts { match NonceRollbackFull::from_partial( nonce_rollback, tx.message(), - &accounts, + &loaded_transaction.accounts, ) { Ok(nonce_rollback) => Some(nonce_rollback), Err(e) => return (Err(e), None), @@ -461,7 +454,7 @@ impl Accounts { None }; - (Ok((accounts, account_deps, loaders, rents)), nonce_rollback) + (Ok(loaded_transaction), nonce_rollback) } (_, (Err(e), _nonce_rollback)) => (Err(e), None), }) @@ -469,11 +462,12 @@ impl Accounts { } /// Slow because lock is held for 1 operation instead of many - pub fn load_slow(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Option<(Account, Slot)> { - let (account, slot) = self - .accounts_db - .load_slow(ancestors, pubkey) - .unwrap_or((Account::default(), self.slot)); + pub fn load_slow( + &self, + ancestors: &Ancestors, + pubkey: &Pubkey, + ) -> Option<(AccountSharedData, Slot)> { + let (account, slot) = self.accounts_db.load_slow(ancestors, pubkey)?; if account.lamports > 0 { Some((account, slot)) @@ -536,7 +530,7 @@ impl Accounts { &self, slot: Slot, program_id: Option<&Pubkey>, - ) -> Vec<(Pubkey, Account)> { + ) -> Vec<(Pubkey, AccountSharedData)> { self.scan_slot(slot, |stored_account| { let hit = match program_id { None => true, @@ -642,9 +636,9 @@ impl Accounts { lamports > 0 } - fn load_while_filtering bool>( - collector: &mut Vec<(Pubkey, Account)>, - some_account_tuple: Option<(&Pubkey, Account, Slot)>, + fn load_while_filtering bool>( + collector: &mut Vec<(Pubkey, AccountSharedData)>, + some_account_tuple: Option<(&Pubkey, AccountSharedData, Slot)>, filter: F, ) { if let Some(mapped_account_tuple) = some_account_tuple @@ -659,10 +653,10 @@ impl Accounts { &self, ancestors: &Ancestors, program_id: &Pubkey, - ) -> Vec<(Pubkey, Account)> { + ) -> Vec<(Pubkey, AccountSharedData)> { self.accounts_db.scan_accounts( ancestors, - |collector: &mut Vec<(Pubkey, Account)>, some_account_tuple| { + |collector: &mut Vec<(Pubkey, AccountSharedData)>, some_account_tuple| { Self::load_while_filtering(collector, some_account_tuple, |account| { account.owner == *program_id }) @@ -670,15 +664,15 @@ impl Accounts { ) } - pub fn load_by_program_with_filter bool>( + pub fn load_by_program_with_filter bool>( &self, ancestors: &Ancestors, program_id: &Pubkey, filter: F, - ) -> Vec<(Pubkey, Account)> { + ) -> Vec<(Pubkey, AccountSharedData)> { self.accounts_db.scan_accounts( ancestors, - |collector: &mut Vec<(Pubkey, Account)>, some_account_tuple| { + |collector: &mut Vec<(Pubkey, AccountSharedData)>, some_account_tuple| { Self::load_while_filtering(collector, some_account_tuple, |account| { account.owner == *program_id && filter(account) }) @@ -686,25 +680,33 @@ impl Accounts { ) } - pub fn load_by_index_key_with_filter bool>( + pub fn load_by_index_key_with_filter bool>( &self, ancestors: &Ancestors, index_key: &IndexKey, filter: F, - ) -> Vec<(Pubkey, Account)> { - self.accounts_db.index_scan_accounts( - ancestors, - *index_key, - |collector: &mut Vec<(Pubkey, Account)>, some_account_tuple| { - Self::load_while_filtering(collector, some_account_tuple, |account| filter(account)) - }, - ) + ) -> Vec<(Pubkey, AccountSharedData)> { + self.accounts_db + .index_scan_accounts( + ancestors, + *index_key, + |collector: &mut Vec<(Pubkey, AccountSharedData)>, some_account_tuple| { + Self::load_while_filtering(collector, some_account_tuple, |account| { + filter(account) + }) + }, + ) + .0 + } + + pub fn account_indexes_include_key(&self, key: &Pubkey) -> bool { + self.accounts_db.account_indexes.include_key(key) } - pub fn load_all(&self, ancestors: &Ancestors) -> Vec<(Pubkey, Account, Slot)> { + pub fn load_all(&self, ancestors: &Ancestors) -> Vec<(Pubkey, AccountSharedData, Slot)> { self.accounts_db.scan_accounts( ancestors, - |collector: &mut Vec<(Pubkey, Account, Slot)>, some_account_tuple| { + |collector: &mut Vec<(Pubkey, AccountSharedData, Slot)>, some_account_tuple| { if let Some((pubkey, account, slot)) = some_account_tuple.filter(|(_, account, _)| Self::is_loadable(account.lamports)) { @@ -718,12 +720,12 @@ impl Accounts { &self, ancestors: &Ancestors, range: R, - ) -> Vec<(Pubkey, Account)> { + ) -> Vec<(Pubkey, AccountSharedData)> { self.accounts_db.range_scan_accounts( "load_to_collect_rent_eagerly_scan_elapsed", ancestors, range, - |collector: &mut Vec<(Pubkey, Account)>, option| { + |collector: &mut Vec<(Pubkey, AccountSharedData)>, option| { Self::load_while_filtering(collector, option, |_| true) }, ) @@ -732,11 +734,11 @@ impl Accounts { /// Slow because lock is held for 1 operation instead of many. /// WARNING: This noncached version is only to be used for tests/benchmarking /// as bypassing the cache in general is not supported - pub fn store_slow_uncached(&self, slot: Slot, pubkey: &Pubkey, account: &Account) { + pub fn store_slow_uncached(&self, slot: Slot, pubkey: &Pubkey, account: &AccountSharedData) { self.accounts_db.store_uncached(slot, &[(pubkey, account)]); } - pub fn store_slow_cached(&self, slot: Slot, pubkey: &Pubkey, account: &Account) { + pub fn store_slow_cached(&self, slot: Slot, pubkey: &Pubkey, account: &AccountSharedData) { self.accounts_db.store_cached(slot, &[(pubkey, account)]); } @@ -772,13 +774,21 @@ impl Accounts { Ok(()) } - fn unlock_account(&self, tx: &Transaction, result: &Result<()>, locks: &mut AccountLocks) { + fn unlock_account( + &self, + tx: &Transaction, + result: &Result<()>, + locks: &mut AccountLocks, + demote_sysvar_write_locks: bool, + ) { match result { Err(TransactionError::AccountInUse) => (), Err(TransactionError::SanitizeFailure) => (), Err(TransactionError::AccountLoadedTwice) => (), _ => { - let (writable_keys, readonly_keys) = &tx.message().get_account_keys_by_lock_type(); + let (writable_keys, readonly_keys) = &tx + .message() + .get_account_keys_by_lock_type(demote_sysvar_write_locks); for k in writable_keys { locks.unlock_write(k); } @@ -807,7 +817,11 @@ impl Accounts { /// This function will prevent multiple threads from modifying the same account state at the /// same time #[must_use] - pub fn lock_accounts<'a>(&self, txs: impl Iterator) -> Vec> { + pub fn lock_accounts<'a>( + &self, + txs: impl Iterator, + demote_sysvar_write_locks: bool, + ) -> Vec> { use solana_sdk::sanitize::Sanitize; let keys: Vec> = txs .map(|tx| { @@ -817,7 +831,9 @@ impl Accounts { return Err(TransactionError::AccountLoadedTwice); } - Ok(tx.message().get_account_keys_by_lock_type()) + Ok(tx + .message() + .get_account_keys_by_lock_type(demote_sysvar_write_locks)) }) .collect(); let mut account_locks = &mut self.account_locks.lock().unwrap(); @@ -836,11 +852,17 @@ impl Accounts { &self, txs: impl Iterator, results: &[Result<()>], + demote_sysvar_write_locks: bool, ) { let mut account_locks = self.account_locks.lock().unwrap(); debug!("bank unlock accounts"); for (tx, lock_result) in txs.zip(results) { - self.unlock_account(tx, lock_result, &mut account_locks); + self.unlock_account( + tx, + lock_result, + &mut account_locks, + demote_sysvar_write_locks, + ); } } @@ -856,6 +878,7 @@ impl Accounts { rent_collector: &RentCollector, last_blockhash_with_fee_calculator: &(Hash, FeeCalculator), fix_recent_blockhashes_sysvar_delay: bool, + demote_sysvar_write_locks: bool, ) { let accounts_to_store = self.collect_accounts_to_store( txs, @@ -864,6 +887,7 @@ impl Accounts { rent_collector, last_blockhash_with_fee_calculator, fix_recent_blockhashes_sysvar_delay, + demote_sysvar_write_locks, ); self.accounts_db.store_cached(slot, &accounts_to_store); } @@ -887,7 +911,8 @@ impl Accounts { rent_collector: &RentCollector, last_blockhash_with_fee_calculator: &(Hash, FeeCalculator), fix_recent_blockhashes_sysvar_delay: bool, - ) -> Vec<(&'a Pubkey, &'a Account)> { + demote_sysvar_write_locks: bool, + ) -> Vec<(&'a Pubkey, &'a AccountSharedData)> { let mut accounts = Vec::with_capacity(loaded.len()); for (i, ((raccs, _nonce_rollback), tx)) in loaded.iter_mut().zip(txs).enumerate() { if raccs.is_err() { @@ -912,13 +937,13 @@ impl Accounts { }; let message = &tx.message(); - let acc = raccs.as_mut().unwrap(); + let loaded_transaction = raccs.as_mut().unwrap(); let mut fee_payer_index = None; for ((i, key), account) in message .account_keys .iter() .enumerate() - .zip(acc.0.iter_mut()) + .zip(loaded_transaction.accounts.iter_mut()) .filter(|((i, key), _account)| message.is_non_loader_key(key, *i)) { let is_nonce_account = prepare_if_nonce_account( @@ -933,7 +958,7 @@ impl Accounts { fee_payer_index = Some(i); } let is_fee_payer = Some(i) == fee_payer_index; - if message.is_writable(i) + if message.is_writable(i, demote_sysvar_write_locks) && (res.is_ok() || (maybe_nonce_rollback.is_some() && (is_nonce_account || is_fee_payer))) { @@ -951,7 +976,11 @@ impl Accounts { } } if account.rent_epoch == INITIAL_RENT_EPOCH { - acc.3 += rent_collector.collect_from_created_account(&key, account); + let rent = rent_collector.collect_from_created_account(&key, account); + loaded_transaction.rent += rent; + loaded_transaction + .rent_debits + .push(key, rent, account.lamports); } accounts.push((key, &*account)); } @@ -962,10 +991,10 @@ impl Accounts { } pub fn prepare_if_nonce_account( - account: &mut Account, + account: &mut AccountSharedData, account_pubkey: &Pubkey, tx_result: &Result<()>, - maybe_nonce_rollback: Option<(&Pubkey, &Account, Option<&Account>)>, + maybe_nonce_rollback: Option<(&Pubkey, &AccountSharedData, Option<&AccountSharedData>)>, last_blockhash_with_fee_calculator: &(Hash, FeeCalculator), fix_recent_blockhashes_sysvar_delay: bool, ) -> bool { @@ -1011,7 +1040,8 @@ pub fn create_test_accounts( ) { for t in 0..num { let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new((t + 1) as u64, 0, &Account::default().owner); + let account = + AccountSharedData::new((t + 1) as u64, 0, &AccountSharedData::default().owner); accounts.store_slow_uncached(slot, &pubkey, &account); pubkeys.push(pubkey); } @@ -1022,19 +1052,17 @@ pub fn create_test_accounts( pub fn update_accounts_bench(accounts: &Accounts, pubkeys: &[Pubkey], slot: u64) { for pubkey in pubkeys { let amount = thread_rng().gen_range(0, 10); - let account = Account::new(amount, 0, &Account::default().owner); + let account = AccountSharedData::new(amount, 0, &AccountSharedData::default().owner); accounts.store_slow_uncached(slot, &pubkey, &account); } } #[cfg(test)] mod tests { - // TODO: all the bank tests are bank specific, issue: 2194 - use super::*; use crate::rent_collector::RentCollector; use solana_sdk::{ - account::Account, + account::{AccountSharedData, WritableAccount}, epoch_schedule::EpochSchedule, fee_calculator::FeeCalculator, genesis_config::ClusterType, @@ -1053,15 +1081,19 @@ mod tests { fn load_accounts_with_fee_and_rent( tx: Transaction, - ka: &[(Pubkey, Account)], + ka: &[(Pubkey, AccountSharedData)], fee_calculator: &FeeCalculator, rent_collector: &RentCollector, error_counters: &mut ErrorCounters, ) -> Vec { let mut hash_queue = BlockhashQueue::new(100); hash_queue.register_hash(&tx.message().recent_blockhash, &fee_calculator); - let accounts = - Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); + let accounts = Accounts::new_with_config( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + ); for ka in ka.iter() { accounts.store_slow_uncached(0, &ka.0, &ka.1); } @@ -1080,7 +1112,7 @@ mod tests { fn load_accounts_with_fee( tx: Transaction, - ka: &[(Pubkey, Account)], + ka: &[(Pubkey, AccountSharedData)], fee_calculator: &FeeCalculator, error_counters: &mut ErrorCounters, ) -> Vec { @@ -1090,7 +1122,7 @@ mod tests { fn load_accounts( tx: Transaction, - ka: &[(Pubkey, Account)], + ka: &[(Pubkey, AccountSharedData)], error_counters: &mut ErrorCounters, ) -> Vec { let fee_calculator = FeeCalculator::default(); @@ -1099,7 +1131,7 @@ mod tests { #[test] fn test_load_accounts_no_key() { - let accounts: Vec<(Pubkey, Account)> = Vec::new(); + let accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let instructions = vec![CompiledInstruction::new(0, &(), vec![0])]; @@ -1123,7 +1155,7 @@ mod tests { #[test] fn test_load_accounts_no_account_0_exists() { - let accounts: Vec<(Pubkey, Account)> = Vec::new(); + let accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); @@ -1149,17 +1181,17 @@ mod tests { #[test] fn test_load_accounts_unknown_program_id() { - let mut accounts: Vec<(Pubkey, Account)> = Vec::new(); + let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let key1 = Pubkey::new(&[5u8; 32]); - let account = Account::new(1, 0, &Pubkey::default()); + let account = AccountSharedData::new(1, 0, &Pubkey::default()); accounts.push((key0, account)); - let account = Account::new(2, 1, &Pubkey::default()); + let account = AccountSharedData::new(2, 1, &Pubkey::default()); accounts.push((key1, account)); let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; @@ -1183,13 +1215,13 @@ mod tests { #[test] fn test_load_accounts_insufficient_funds() { - let mut accounts: Vec<(Pubkey, Account)> = Vec::new(); + let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); - let account = Account::new(1, 0, &Pubkey::default()); + let account = AccountSharedData::new(1, 0, &Pubkey::default()); accounts.push((key0, account)); let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; @@ -1217,13 +1249,13 @@ mod tests { #[test] fn test_load_accounts_invalid_account_for_fee() { - let mut accounts: Vec<(Pubkey, Account)> = Vec::new(); + let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); - let account = Account::new(1, 1, &solana_sdk::pubkey::new_rand()); // <-- owner is not the system program + let account = AccountSharedData::new(1, 1, &solana_sdk::pubkey::new_rand()); // <-- owner is not the system program accounts.push((key0, account)); let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; @@ -1262,7 +1294,7 @@ mod tests { let nonce = Keypair::new(); let mut accounts = vec![( nonce.pubkey(), - Account::new_data( + AccountSharedData::new_data( min_balance * 2, &nonce::state::Versions::new_current(nonce::State::Initialized( nonce::state::Data::default(), @@ -1290,8 +1322,8 @@ mod tests { ); assert_eq!(loaded_accounts.len(), 1); let (load_res, _nonce_rollback) = &loaded_accounts[0]; - let (tx_accounts, _account_deps, _loaders, _rents) = load_res.as_ref().unwrap(); - assert_eq!(tx_accounts[0].lamports, min_balance); + let loaded_transaction = load_res.as_ref().unwrap(); + assert_eq!(loaded_transaction.accounts[0].lamports, min_balance); // Fee leaves zero balance fails accounts[0].1.lamports = min_balance; @@ -1322,18 +1354,18 @@ mod tests { #[test] fn test_load_accounts_no_loaders() { - let mut accounts: Vec<(Pubkey, Account)> = Vec::new(); + let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let key1 = Pubkey::new(&[5u8; 32]); - let mut account = Account::new(1, 0, &Pubkey::default()); + let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); account.rent_epoch = 1; accounts.push((key0, account)); - let mut account = Account::new(2, 1, &Pubkey::default()); + let mut account = AccountSharedData::new(2, 1, &Pubkey::default()); account.rent_epoch = 1; accounts.push((key1, account)); @@ -1351,19 +1383,11 @@ mod tests { assert_eq!(error_counters.account_not_found, 0); assert_eq!(loaded_accounts.len(), 1); match &loaded_accounts[0] { - ( - Ok(( - transaction_accounts, - _transaction_account_deps, - transaction_loaders, - _transaction_rents, - )), - _nonce_rollback, - ) => { - assert_eq!(transaction_accounts.len(), 3); - assert_eq!(transaction_accounts[0], accounts[0].1); - assert_eq!(transaction_loaders.len(), 1); - assert_eq!(transaction_loaders[0].len(), 0); + (Ok(loaded_transaction), _nonce_rollback) => { + assert_eq!(loaded_transaction.accounts.len(), 3); + assert_eq!(loaded_transaction.accounts[0], accounts[0].1); + assert_eq!(loaded_transaction.loaders.len(), 1); + assert_eq!(loaded_transaction.loaders[0].len(), 0); } (Err(e), _nonce_rollback) => Err(e).unwrap(), } @@ -1371,7 +1395,7 @@ mod tests { #[test] fn test_load_accounts_max_call_depth() { - let mut accounts: Vec<(Pubkey, Account)> = Vec::new(); + let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); @@ -1383,35 +1407,35 @@ mod tests { let key5 = Pubkey::new(&[9u8; 32]); let key6 = Pubkey::new(&[10u8; 32]); - let account = Account::new(1, 0, &Pubkey::default()); + let account = AccountSharedData::new(1, 0, &Pubkey::default()); accounts.push((key0, account)); - let mut account = Account::new(40, 1, &Pubkey::default()); + let mut account = AccountSharedData::new(40, 1, &Pubkey::default()); account.executable = true; account.owner = native_loader::id(); accounts.push((key1, account)); - let mut account = Account::new(41, 1, &Pubkey::default()); + let mut account = AccountSharedData::new(41, 1, &Pubkey::default()); account.executable = true; account.owner = key1; accounts.push((key2, account)); - let mut account = Account::new(42, 1, &Pubkey::default()); + let mut account = AccountSharedData::new(42, 1, &Pubkey::default()); account.executable = true; account.owner = key2; accounts.push((key3, account)); - let mut account = Account::new(43, 1, &Pubkey::default()); + let mut account = AccountSharedData::new(43, 1, &Pubkey::default()); account.executable = true; account.owner = key3; accounts.push((key4, account)); - let mut account = Account::new(44, 1, &Pubkey::default()); + let mut account = AccountSharedData::new(44, 1, &Pubkey::default()); account.executable = true; account.owner = key4; accounts.push((key5, account)); - let mut account = Account::new(45, 1, &Pubkey::default()); + let mut account = AccountSharedData::new(45, 1, &Pubkey::default()); account.executable = true; account.owner = key5; accounts.push((key6, account)); @@ -1437,17 +1461,17 @@ mod tests { #[test] fn test_load_accounts_bad_program_id() { - let mut accounts: Vec<(Pubkey, Account)> = Vec::new(); + let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let key1 = Pubkey::new(&[5u8; 32]); - let account = Account::new(1, 0, &Pubkey::default()); + let account = AccountSharedData::new(1, 0, &Pubkey::default()); accounts.push((key0, account)); - let mut account = Account::new(40, 1, &native_loader::id()); + let mut account = AccountSharedData::new(40, 1, &native_loader::id()); account.executable = true; accounts.push((key1, account)); @@ -1472,17 +1496,17 @@ mod tests { #[test] fn test_load_accounts_bad_owner() { - let mut accounts: Vec<(Pubkey, Account)> = Vec::new(); + let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let key1 = Pubkey::new(&[5u8; 32]); - let account = Account::new(1, 0, &Pubkey::default()); + let account = AccountSharedData::new(1, 0, &Pubkey::default()); accounts.push((key0, account)); - let mut account = Account::new(40, 1, &Pubkey::default()); + let mut account = AccountSharedData::new(40, 1, &Pubkey::default()); account.executable = true; accounts.push((key1, account)); @@ -1507,17 +1531,17 @@ mod tests { #[test] fn test_load_accounts_not_executable() { - let mut accounts: Vec<(Pubkey, Account)> = Vec::new(); + let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let key1 = Pubkey::new(&[5u8; 32]); - let account = Account::new(1, 0, &Pubkey::default()); + let account = AccountSharedData::new(1, 0, &Pubkey::default()); accounts.push((key0, account)); - let account = Account::new(40, 1, &native_loader::id()); + let account = AccountSharedData::new(40, 1, &native_loader::id()); accounts.push((key1, account)); let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; @@ -1541,7 +1565,7 @@ mod tests { #[test] fn test_load_accounts_multiple_loaders() { - let mut accounts: Vec<(Pubkey, Account)> = Vec::new(); + let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); @@ -1549,17 +1573,17 @@ mod tests { let key1 = Pubkey::new(&[5u8; 32]); let key2 = Pubkey::new(&[6u8; 32]); - let mut account = Account::new(1, 0, &Pubkey::default()); + let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); account.rent_epoch = 1; accounts.push((key0, account)); - let mut account = Account::new(40, 1, &Pubkey::default()); + let mut account = AccountSharedData::new(40, 1, &Pubkey::default()); account.executable = true; account.rent_epoch = 1; account.owner = native_loader::id(); accounts.push((key1, account)); - let mut account = Account::new(41, 1, &Pubkey::default()); + let mut account = AccountSharedData::new(41, 1, &Pubkey::default()); account.executable = true; account.rent_epoch = 1; account.owner = key1; @@ -1582,21 +1606,13 @@ mod tests { assert_eq!(error_counters.account_not_found, 0); assert_eq!(loaded_accounts.len(), 1); match &loaded_accounts[0] { - ( - Ok(( - transaction_accounts, - _transaction_account_deps, - transaction_loaders, - _transaction_rents, - )), - _nonce_rollback, - ) => { - assert_eq!(transaction_accounts.len(), 3); - assert_eq!(transaction_accounts[0], accounts[0].1); - assert_eq!(transaction_loaders.len(), 2); - assert_eq!(transaction_loaders[0].len(), 1); - assert_eq!(transaction_loaders[1].len(), 2); - for loaders in transaction_loaders.iter() { + (Ok(loaded_transaction), _nonce_rollback) => { + assert_eq!(loaded_transaction.accounts.len(), 3); + assert_eq!(loaded_transaction.accounts[0], accounts[0].1); + assert_eq!(loaded_transaction.loaders.len(), 2); + assert_eq!(loaded_transaction.loaders[0].len(), 1); + assert_eq!(loaded_transaction.loaders[1].len(), 2); + for loaders in loaded_transaction.loaders.iter() { for (i, accounts_subset) in loaders.iter().enumerate() { // +1 to skip first not loader account assert_eq!(*accounts_subset, accounts[i + 1]); @@ -1609,18 +1625,22 @@ mod tests { #[test] fn test_load_by_program_slot() { - let accounts = - Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); + let accounts = Accounts::new_with_config( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + ); // Load accounts owned by various programs into AccountsDb let pubkey0 = solana_sdk::pubkey::new_rand(); - let account0 = Account::new(1, 0, &Pubkey::new(&[2; 32])); + let account0 = AccountSharedData::new(1, 0, &Pubkey::new(&[2; 32])); accounts.store_slow_uncached(0, &pubkey0, &account0); let pubkey1 = solana_sdk::pubkey::new_rand(); - let account1 = Account::new(1, 0, &Pubkey::new(&[2; 32])); + let account1 = AccountSharedData::new(1, 0, &Pubkey::new(&[2; 32])); accounts.store_slow_uncached(0, &pubkey1, &account1); let pubkey2 = solana_sdk::pubkey::new_rand(); - let account2 = Account::new(1, 0, &Pubkey::new(&[3; 32])); + let account2 = AccountSharedData::new(1, 0, &Pubkey::new(&[3; 32])); accounts.store_slow_uncached(0, &pubkey2, &account2); let loaded = accounts.load_by_program_slot(0, Some(&Pubkey::new(&[2; 32]))); @@ -1633,8 +1653,12 @@ mod tests { #[test] fn test_accounts_account_not_found() { - let accounts = - Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); + let accounts = Accounts::new_with_config( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + ); let mut error_counters = ErrorCounters::default(); let ancestors = vec![(0, 0)].into_iter().collect(); @@ -1652,8 +1676,12 @@ mod tests { #[test] #[should_panic] fn test_accounts_empty_bank_hash() { - let accounts = - Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); + let accounts = Accounts::new_with_config( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + ); accounts.bank_hash_at(1); } @@ -1664,13 +1692,17 @@ mod tests { let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); - let account0 = Account::new(1, 0, &Pubkey::default()); - let account1 = Account::new(2, 0, &Pubkey::default()); - let account2 = Account::new(3, 0, &Pubkey::default()); - let account3 = Account::new(4, 0, &Pubkey::default()); + let account0 = AccountSharedData::new(1, 0, &Pubkey::default()); + let account1 = AccountSharedData::new(2, 0, &Pubkey::default()); + let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); + let account3 = AccountSharedData::new(4, 0, &Pubkey::default()); - let accounts = - Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); + let accounts = Accounts::new_with_config( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + ); accounts.store_slow_uncached(0, &keypair0.pubkey(), &account0); accounts.store_slow_uncached(0, &keypair1.pubkey(), &account1); accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2); @@ -1686,7 +1718,10 @@ mod tests { instructions, ); let tx = Transaction::new(&[&keypair0], message, Hash::default()); - let results0 = accounts.lock_accounts([tx.clone()].iter()); + let results0 = accounts.lock_accounts( + [tx.clone()].iter(), + true, // demote_sysvar_write_locks + ); assert!(results0[0].is_ok()); assert_eq!( @@ -1721,7 +1756,10 @@ mod tests { ); let tx1 = Transaction::new(&[&keypair1], message, Hash::default()); let txs = vec![tx0, tx1]; - let results1 = accounts.lock_accounts(txs.iter()); + let results1 = accounts.lock_accounts( + txs.iter(), + true, // demote_sysvar_write_locks + ); assert!(results1[0].is_ok()); // Read-only account (keypair1) can be referenced multiple times assert!(results1[1].is_err()); // Read-only account (keypair1) cannot also be locked as writable @@ -1736,8 +1774,16 @@ mod tests { 2 ); - accounts.unlock_accounts([tx].iter(), &results0); - accounts.unlock_accounts(txs.iter(), &results1); + accounts.unlock_accounts( + [tx].iter(), + &results0, + true, // demote_sysvar_write_locks + ); + accounts.unlock_accounts( + txs.iter(), + &results1, + true, // demote_sysvar_write_locks + ); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, @@ -1748,7 +1794,10 @@ mod tests { instructions, ); let tx = Transaction::new(&[&keypair1], message, Hash::default()); - let results2 = accounts.lock_accounts([tx].iter()); + let results2 = accounts.lock_accounts( + [tx].iter(), + true, // demote_sysvar_write_locks + ); assert!(results2[0].is_ok()); // Now keypair1 account can be locked as writable // Check that read-only lock with zero references is deleted @@ -1770,12 +1819,16 @@ mod tests { let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); - let account0 = Account::new(1, 0, &Pubkey::default()); - let account1 = Account::new(2, 0, &Pubkey::default()); - let account2 = Account::new(3, 0, &Pubkey::default()); + let account0 = AccountSharedData::new(1, 0, &Pubkey::default()); + let account1 = AccountSharedData::new(2, 0, &Pubkey::default()); + let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); - let accounts = - Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); + let accounts = Accounts::new_with_config( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + ); accounts.store_slow_uncached(0, &keypair0.pubkey(), &account0); accounts.store_slow_uncached(0, &keypair1.pubkey(), &account1); accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2); @@ -1812,13 +1865,20 @@ mod tests { let exit_clone = exit_clone.clone(); loop { let txs = vec![writable_tx.clone()]; - let results = accounts_clone.clone().lock_accounts(txs.iter()); + let results = accounts_clone.clone().lock_accounts( + txs.iter(), + true, // demote_sysvar_write_locks + ); for result in results.iter() { if result.is_ok() { counter_clone.clone().fetch_add(1, Ordering::SeqCst); } } - accounts_clone.unlock_accounts(txs.iter(), &results); + accounts_clone.unlock_accounts( + txs.iter(), + &results, + true, // demote_sysvar_write_locks + ); if exit_clone.clone().load(Ordering::Relaxed) { break; } @@ -1827,13 +1887,20 @@ mod tests { let counter_clone = counter; for _ in 0..5 { let txs = vec![readonly_tx.clone()]; - let results = accounts_arc.clone().lock_accounts(txs.iter()); + let results = accounts_arc.clone().lock_accounts( + txs.iter(), + true, // demote_sysvar_write_locks + ); if results[0].is_ok() { let counter_value = counter_clone.clone().load(Ordering::SeqCst); thread::sleep(time::Duration::from_millis(50)); assert_eq!(counter_value, counter_clone.clone().load(Ordering::SeqCst)); } - accounts_arc.unlock_accounts(txs.iter(), &results); + accounts_arc.unlock_accounts( + txs.iter(), + &results, + true, // demote_sysvar_write_locks + ); thread::sleep(time::Duration::from_millis(50)); } exit.store(true, Ordering::Relaxed); @@ -1872,20 +1939,21 @@ mod tests { let loaders = vec![(Ok(()), None), (Ok(()), None)]; - let account0 = Account::new(1, 0, &Pubkey::default()); - let account1 = Account::new(2, 0, &Pubkey::default()); - let account2 = Account::new(3, 0, &Pubkey::default()); + let account0 = AccountSharedData::new(1, 0, &Pubkey::default()); + let account1 = AccountSharedData::new(2, 0, &Pubkey::default()); + let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); let transaction_accounts0 = vec![account0, account2.clone()]; let transaction_loaders0 = vec![]; let transaction_rent0 = 0; let loaded0 = ( - Ok(( - transaction_accounts0, - vec![], - transaction_loaders0, - transaction_rent0, - )), + Ok(LoadedTransaction { + accounts: transaction_accounts0, + account_deps: vec![], + loaders: transaction_loaders0, + rent: transaction_rent0, + rent_debits: RentDebits::default(), + }), None, ); @@ -1893,19 +1961,24 @@ mod tests { let transaction_loaders1 = vec![]; let transaction_rent1 = 0; let loaded1 = ( - Ok(( - transaction_accounts1, - vec![], - transaction_loaders1, - transaction_rent1, - )), + Ok(LoadedTransaction { + accounts: transaction_accounts1, + account_deps: vec![], + loaders: transaction_loaders1, + rent: transaction_rent1, + rent_debits: RentDebits::default(), + }), None, ); let mut loaded = vec![loaded0, loaded1]; - let accounts = - Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); + let accounts = Accounts::new_with_config( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + ); { accounts .account_locks @@ -1916,10 +1989,11 @@ mod tests { let collected_accounts = accounts.collect_accounts_to_store( txs.iter(), &loaders, - &mut loaded, + loaded.as_mut_slice(), &rent_collector, &(Hash::default(), FeeCalculator::default()), true, + true, // demote_sysvar_write_locks ); assert_eq!(collected_accounts.len(), 2); assert!(collected_accounts @@ -1951,14 +2025,19 @@ mod tests { #[test] fn huge_clean() { solana_logger::setup(); - let accounts = - Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); + let accounts = Accounts::new_with_config( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + ); let mut old_pubkey = Pubkey::default(); - let zero_account = Account::new(0, 0, &Account::default().owner); + let zero_account = AccountSharedData::new(0, 0, &AccountSharedData::default().owner); info!("storing.."); for i in 0..2_000 { let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new((i + 1) as u64, 0, &Account::default().owner); + let account = + AccountSharedData::new((i + 1) as u64, 0, &AccountSharedData::default().owner); accounts.store_slow_uncached(i, &pubkey, &account); accounts.store_slow_uncached(i, &old_pubkey, &zero_account); old_pubkey = pubkey; @@ -1993,8 +2072,12 @@ mod tests { #[test] fn test_instructions() { solana_logger::setup(); - let accounts = - Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); + let accounts = Accounts::new_with_config( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + ); let instructions_key = solana_sdk::sysvar::instructions::id(); let keypair = Keypair::new(); @@ -2014,20 +2097,18 @@ mod tests { fn create_accounts_prepare_if_nonce_account() -> ( Pubkey, - Account, - Account, + AccountSharedData, + AccountSharedData, Hash, FeeCalculator, - Option, + Option, ) { let data = nonce::state::Versions::new_current(nonce::State::Initialized( nonce::state::Data::default(), )); - let account = Account::new_data(42, &data, &system_program::id()).unwrap(); - let pre_account = Account { - lamports: 43, - ..account.clone() - }; + let account = AccountSharedData::new_data(42, &data, &system_program::id()).unwrap(); + let mut pre_account = account.clone(); + pre_account.set_lamports(43); ( Pubkey::default(), pre_account, @@ -2041,12 +2122,12 @@ mod tests { } fn run_prepare_if_nonce_account_test( - account: &mut Account, + account: &mut AccountSharedData, account_pubkey: &Pubkey, tx_result: &Result<()>, - maybe_nonce_rollback: Option<(&Pubkey, &Account, Option<&Account>)>, + maybe_nonce_rollback: Option<(&Pubkey, &AccountSharedData, Option<&AccountSharedData>)>, last_blockhash_with_fee_calculator: &(Hash, FeeCalculator), - expect_account: &Account, + expect_account: &AccountSharedData, ) -> bool { // Verify expect_account's relationship match maybe_nonce_rollback { @@ -2118,7 +2199,7 @@ mod tests { ) = create_accounts_prepare_if_nonce_account(); let post_account_pubkey = pre_account_pubkey; - let mut post_account = Account::default(); + let mut post_account = AccountSharedData::default(); let expect_account = post_account.clone(); assert!(run_prepare_if_nonce_account_test( &mut post_account, @@ -2222,8 +2303,9 @@ mod tests { blockhash, fee_calculator: FeeCalculator::default(), })); - let nonce_account_pre = Account::new_data(42, &nonce_state, &system_program::id()).unwrap(); - let from_account_pre = Account::new(4242, 0, &Pubkey::default()); + let nonce_account_pre = + AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); + let from_account_pre = AccountSharedData::new(4242, 0, &Pubkey::default()); let nonce_rollback = Some(NonceRollbackFull::new( nonce_address, @@ -2245,12 +2327,12 @@ mod tests { fee_calculator: FeeCalculator::default(), })); let nonce_account_post = - Account::new_data(43, &nonce_state, &system_program::id()).unwrap(); + AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); - let from_account_post = Account::new(4199, 0, &Pubkey::default()); - let to_account = Account::new(2, 0, &Pubkey::default()); - let nonce_authority_account = Account::new(3, 0, &Pubkey::default()); - let recent_blockhashes_sysvar_account = Account::new(4, 0, &Pubkey::default()); + let from_account_post = AccountSharedData::new(4199, 0, &Pubkey::default()); + let to_account = AccountSharedData::new(2, 0, &Pubkey::default()); + let nonce_authority_account = AccountSharedData::new(3, 0, &Pubkey::default()); + let recent_blockhashes_sysvar_account = AccountSharedData::new(4, 0, &Pubkey::default()); let transaction_accounts = vec![ from_account_post, @@ -2262,27 +2344,33 @@ mod tests { let transaction_loaders = vec![]; let transaction_rent = 0; let loaded = ( - Ok(( - transaction_accounts, - vec![], - transaction_loaders, - transaction_rent, - )), + Ok(LoadedTransaction { + accounts: transaction_accounts, + account_deps: vec![], + loaders: transaction_loaders, + rent: transaction_rent, + rent_debits: RentDebits::default(), + }), nonce_rollback, ); let mut loaded = vec![loaded]; let next_blockhash = Hash::new_unique(); - let accounts = - Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); + let accounts = Accounts::new_with_config( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + ); let collected_accounts = accounts.collect_accounts_to_store( txs.iter(), &loaders, - &mut loaded, + loaded.as_mut_slice(), &rent_collector, &(next_blockhash, FeeCalculator::default()), true, + true, // demote_sysvar_write_locks ); assert_eq!(collected_accounts.len(), 2); assert_eq!( @@ -2332,7 +2420,8 @@ mod tests { blockhash, fee_calculator: FeeCalculator::default(), })); - let nonce_account_pre = Account::new_data(42, &nonce_state, &system_program::id()).unwrap(); + let nonce_account_pre = + AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); let nonce_rollback = Some(NonceRollbackFull::new( nonce_address, @@ -2354,12 +2443,12 @@ mod tests { fee_calculator: FeeCalculator::default(), })); let nonce_account_post = - Account::new_data(43, &nonce_state, &system_program::id()).unwrap(); + AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); - let from_account_post = Account::new(4200, 0, &Pubkey::default()); - let to_account = Account::new(2, 0, &Pubkey::default()); - let nonce_authority_account = Account::new(3, 0, &Pubkey::default()); - let recent_blockhashes_sysvar_account = Account::new(4, 0, &Pubkey::default()); + let from_account_post = AccountSharedData::new(4200, 0, &Pubkey::default()); + let to_account = AccountSharedData::new(2, 0, &Pubkey::default()); + let nonce_authority_account = AccountSharedData::new(3, 0, &Pubkey::default()); + let recent_blockhashes_sysvar_account = AccountSharedData::new(4, 0, &Pubkey::default()); let transaction_accounts = vec![ from_account_post, @@ -2371,27 +2460,33 @@ mod tests { let transaction_loaders = vec![]; let transaction_rent = 0; let loaded = ( - Ok(( - transaction_accounts, - vec![], - transaction_loaders, - transaction_rent, - )), + Ok(LoadedTransaction { + accounts: transaction_accounts, + account_deps: vec![], + loaders: transaction_loaders, + rent: transaction_rent, + rent_debits: RentDebits::default(), + }), nonce_rollback, ); let mut loaded = vec![loaded]; let next_blockhash = Hash::new_unique(); - let accounts = - Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); + let accounts = Accounts::new_with_config( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + ); let collected_accounts = accounts.collect_accounts_to_store( txs.iter(), &loaders, - &mut loaded, + loaded.as_mut_slice(), &rent_collector, &(next_blockhash, FeeCalculator::default()), true, + true, // demote_sysvar_write_locks ); assert_eq!(collected_accounts.len(), 1); let collected_nonce_account = collected_accounts @@ -2409,17 +2504,21 @@ mod tests { #[test] fn test_load_largest_accounts() { - let accounts = - Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); + let accounts = Accounts::new_with_config( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + ); let pubkey0 = Pubkey::new_unique(); - let account0 = Account::new(42, 0, &Pubkey::default()); + let account0 = AccountSharedData::new(42, 0, &Pubkey::default()); accounts.store_slow_uncached(0, &pubkey0, &account0); let pubkey1 = Pubkey::new_unique(); - let account1 = Account::new(42, 0, &Pubkey::default()); + let account1 = AccountSharedData::new(42, 0, &Pubkey::default()); accounts.store_slow_uncached(0, &pubkey1, &account1); let pubkey2 = Pubkey::new_unique(); - let account2 = Account::new(41, 0, &Pubkey::default()); + let account2 = AccountSharedData::new(41, 0, &Pubkey::default()); accounts.store_slow_uncached(0, &pubkey2, &account2); let ancestors = vec![(0, 0)].into_iter().collect(); diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 34c33db5df..4522f8003b 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -12,7 +12,7 @@ use crossbeam_channel::{Receiver, SendError, Sender}; use log::*; use rand::{thread_rng, Rng}; use solana_measure::measure::Measure; -use solana_sdk::clock::Slot; +use solana_sdk::{clock::Slot, hash::Hash}; use std::{ boxed::Box, fmt::{Debug, Formatter}, @@ -99,6 +99,14 @@ impl SnapshotRequestHandler { status_cache_slot_deltas, } = snapshot_request; + let previous_hash = if test_hash_calculation { + // We have to use the index version here. + // We cannot calculate the non-index way because cache has not been flushed and stores don't match reality. + snapshot_root_bank.update_accounts_hash_with_index_option(true, false) + } else { + Hash::default() + }; + let mut shrink_time = Measure::start("shrink_time"); if !accounts_db_caching_enabled { snapshot_root_bank @@ -129,11 +137,12 @@ impl SnapshotRequestHandler { flush_accounts_cache_time.stop(); let mut hash_time = Measure::start("hash_time"); - snapshot_root_bank.update_accounts_hash_with_index_option( + let this_hash = snapshot_root_bank.update_accounts_hash_with_index_option( use_index_hash_calculation, test_hash_calculation, ); let hash_for_testing = if test_hash_calculation { + assert_eq!(previous_hash, this_hash); Some(snapshot_root_bank.get_accounts_hash()) } else { None @@ -345,7 +354,7 @@ impl AccountsBackgroundService { } else { // under sustained writes, shrink can lag behind so cap to // SHRUNKEN_ACCOUNT_PER_INTERVAL (which is based on INTERVAL_MS, - // which in turn roughly asscociated block time) + // which in turn roughly associated block time) consumed_budget = bank .process_stale_slot_with_budget( consumed_budget, @@ -415,7 +424,7 @@ mod test { use super::*; use crate::genesis_utils::create_genesis_config; use crossbeam_channel::unbounded; - use solana_sdk::{account::Account, pubkey::Pubkey}; + use solana_sdk::{account::AccountSharedData, pubkey::Pubkey}; #[test] fn test_accounts_background_service_remove_dead_slots() { @@ -429,7 +438,10 @@ mod test { // Store an account in slot 0 let account_key = Pubkey::new_unique(); - bank0.store_account(&account_key, &Account::new(264, 0, &Pubkey::default())); + bank0.store_account( + &account_key, + &AccountSharedData::new(264, 0, &Pubkey::default()), + ); assert!(bank0.get_account(&account_key).is_some()); pruned_banks_sender.send(0).unwrap(); AccountsBackgroundService::remove_dead_slots(&bank0, &request_handler, &mut 0, &mut 0); diff --git a/runtime/src/accounts_cache.rs b/runtime/src/accounts_cache.rs index ce70a3c677..73bdb50191 100644 --- a/runtime/src/accounts_cache.rs +++ b/runtime/src/accounts_cache.rs @@ -1,5 +1,10 @@ use dashmap::DashMap; -use solana_sdk::{account::Account, clock::Slot, hash::Hash, pubkey::Pubkey}; +use solana_sdk::{ + account::{AccountSharedData, ReadableAccount}, + clock::Slot, + hash::Hash, + pubkey::Pubkey, +}; use std::{ collections::BTreeSet, ops::Deref, @@ -42,14 +47,14 @@ impl SlotCacheInner { ); } - pub fn insert(&self, pubkey: &Pubkey, account: Account, hash: Hash) { + pub fn insert(&self, pubkey: &Pubkey, account: AccountSharedData, hash: Hash) { if self.cache.contains_key(pubkey) { self.same_account_writes.fetch_add(1, Ordering::Relaxed); self.same_account_writes_size - .fetch_add(account.data.len() as u64, Ordering::Relaxed); + .fetch_add(account.data().len() as u64, Ordering::Relaxed); } else { self.unique_account_writes_size - .fetch_add(account.data.len() as u64, Ordering::Relaxed); + .fetch_add(account.data().len() as u64, Ordering::Relaxed); } self.cache.insert(*pubkey, CachedAccount { account, hash }); } @@ -58,8 +63,8 @@ impl SlotCacheInner { self.cache .get(pubkey) // 1) Maybe can eventually use a Cow to avoid a clone on every read - // 2) Popping is only safe if its guaranteed only replay/banking threads - // are reading from the AccountsDb + // 2) Popping is only safe if it's guaranteed that only + // replay/banking threads are reading from the AccountsDb .map(|account_ref| account_ref.value().clone()) } @@ -86,7 +91,7 @@ impl Deref for SlotCacheInner { #[derive(Debug, Clone)] pub struct CachedAccount { - pub account: Account, + pub account: AccountSharedData, pub hash: Hash, } @@ -123,7 +128,7 @@ impl AccountsCache { ); } - pub fn store(&self, slot: Slot, pubkey: &Pubkey, account: Account, hash: Hash) { + pub fn store(&self, slot: Slot, pubkey: &Pubkey, account: AccountSharedData, hash: Hash) { let slot_cache = self.slot_cache(slot).unwrap_or_else(|| // DashMap entry.or_insert() returns a RefMut, essentially a write lock, // which is dropped after this block ends, minimizing time held by the lock. @@ -169,7 +174,7 @@ impl AccountsCache { // we return all slots <= `max_root` std::mem::replace(&mut w_maybe_unflushed_roots, greater_than_max_root) } else { - std::mem::replace(&mut *w_maybe_unflushed_roots, BTreeSet::new()) + std::mem::take(&mut *w_maybe_unflushed_roots) } } @@ -235,7 +240,7 @@ pub mod tests { cache.store( inserted_slot, &Pubkey::new_unique(), - Account::new(1, 0, &Pubkey::default()), + AccountSharedData::new(1, 0, &Pubkey::default()), Hash::default(), ); // If the cache is told the size limit is 0, it should return the one slot @@ -253,7 +258,7 @@ pub mod tests { cache.store( inserted_slot, &Pubkey::new_unique(), - Account::new(1, 0, &Pubkey::default()), + AccountSharedData::new(1, 0, &Pubkey::default()), Hash::default(), ); diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index f69d7cc7c4..7d73007d6a 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -20,9 +20,10 @@ use crate::{ accounts_cache::{AccountsCache, CachedAccount, SlotCache}, + accounts_hash::{AccountsHash, CalculateHashIntermediate, HashStats}, accounts_index::{ - AccountIndex, AccountsIndex, AccountsIndexRootsStats, Ancestors, IndexKey, IsCached, - SlotList, SlotSlice, ZeroLamport, + AccountSecondaryIndexes, AccountsIndex, AccountsIndexRootsStats, Ancestors, IndexKey, + IsCached, SlotList, SlotSlice, ZeroLamport, }, append_vec::{AppendVec, StoredAccountMeta, StoredMeta}, contains::Contains, @@ -40,7 +41,7 @@ use serde::{Deserialize, Serialize}; use solana_measure::measure::Measure; use solana_rayon_threadlimit::get_thread_count; use solana_sdk::{ - account::Account, + account::{AccountSharedData, ReadableAccount}, clock::{Epoch, Slot}, genesis_config::ClusterType, hash::{Hash, Hasher}, @@ -51,7 +52,7 @@ use std::{ borrow::Cow, boxed::Box, collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, - convert::{TryFrom, TryInto}, + convert::TryFrom, io::{Error as IoError, Result as IoResult}, ops::RangeBounds, path::{Path, PathBuf}, @@ -92,8 +93,6 @@ const CACHE_VIRTUAL_WRITE_VERSION: u64 = 0; const CACHE_VIRTUAL_OFFSET: usize = 0; const CACHE_VIRTUAL_STORED_SIZE: usize = 0; -const MERKLE_FANOUT: usize = 16; - type DashMapVersionHash = DashMap; lazy_static! { @@ -167,29 +166,6 @@ type ReclaimResult = (AccountSlots, AppendVecOffsets); type StorageFinder<'a> = Box Arc + 'a>; type ShrinkCandidates = HashMap>>; -const ZERO_RAW_LAMPORTS_SENTINEL: u64 = std::u64::MAX; - -#[derive(Default, Debug, PartialEq, Clone)] -struct CalculateHashIntermediate { - pub version: u64, - pub hash: Hash, - pub lamports: u64, - pub slot: Slot, - pub pubkey: Pubkey, -} - -impl CalculateHashIntermediate { - pub fn new(version: u64, hash: Hash, lamports: u64, slot: Slot, pubkey: Pubkey) -> Self { - Self { - version, - hash, - lamports, - slot, - pubkey, - } - } -} - trait Versioned { fn version(&self) -> u64; } @@ -300,7 +276,7 @@ impl<'a> LoadedAccount<'a> { } } - pub fn account(self) -> Account { + pub fn account(self) -> AccountSharedData { match self { LoadedAccount::Stored(stored_account_meta) => stored_account_meta.clone_account(), LoadedAccount::Cached((_, cached_account)) => match cached_account { @@ -404,8 +380,8 @@ pub struct AccountStorageEntry { impl AccountStorageEntry { pub fn new(path: &Path, slot: Slot, id: usize, file_size: u64) -> Self { - let tail = AppendVec::new_relative_path(slot, id); - let path = Path::new(path).join(&tail); + let tail = AppendVec::file_name(slot, id); + let path = Path::new(path).join(tail); let accounts = AppendVec::new(&path, true, file_size as usize); Self { @@ -567,10 +543,6 @@ impl AccountStorageEntry { count } - pub fn get_relative_path(&self) -> Option { - AppendVec::get_relative_path(self.accounts.get_path()) - } - pub fn get_path(&self) -> PathBuf { self.accounts.get_path() } @@ -593,13 +565,15 @@ pub struct BankHashStats { } impl BankHashStats { - pub fn update(&mut self, account: &Account) { + pub fn update(&mut self, account: &AccountSharedData) { if account.lamports == 0 { self.num_removed_accounts += 1; } else { self.num_updated_accounts += 1; } - self.total_data_len = self.total_data_len.wrapping_add(account.data.len() as u64); + self.total_data_len = self + .total_data_len + .wrapping_add(account.data().len() as u64); if account.executable { self.num_executable_accounts += 1; } @@ -736,7 +710,7 @@ pub struct AccountsDb { pub shrink_paths: RwLock>>, /// Directory of paths this accounts_db needs to hold/remove - temp_paths: Option>, + pub(crate) temp_paths: Option>, /// Starting file size of appendvecs file_size: u64, @@ -766,7 +740,7 @@ pub struct AccountsDb { pub cluster_type: Option, - pub account_indexes: HashSet, + pub account_indexes: AccountSecondaryIndexes, pub caching_enabled: bool, @@ -883,6 +857,15 @@ impl PurgeStats { } } +#[derive(Debug)] +struct FlushStats { + slot: Slot, + num_flushed: usize, + num_purged: usize, + total_size: u64, + did_flush: bool, +} + #[derive(Debug, Default)] struct LatestAccountsIndexRootsStats { roots_len: AtomicUsize, @@ -1067,7 +1050,7 @@ impl solana_frozen_abi::abi_example::AbiExample for AccountsDb { let key = Pubkey::default(); let some_data_len = 5; let some_slot: Slot = 0; - let account = Account::new(1, some_data_len, &key); + let account = AccountSharedData::new(1, some_data_len, &key); accounts_db.store_uncached(some_slot, &[(&key, &account)]); accounts_db.add_root(0); @@ -1109,7 +1092,7 @@ impl Default for AccountsDb { shrink_stats: ShrinkStats::default(), stats: AccountsStats::default(), cluster_type: None, - account_indexes: HashSet::new(), + account_indexes: AccountSecondaryIndexes::default(), caching_enabled: false, } } @@ -1117,13 +1100,18 @@ impl Default for AccountsDb { impl AccountsDb { pub fn new(paths: Vec, cluster_type: &ClusterType) -> Self { - AccountsDb::new_with_config(paths, cluster_type, HashSet::new(), false) + AccountsDb::new_with_config( + paths, + cluster_type, + AccountSecondaryIndexes::default(), + false, + ) } pub fn new_with_config( paths: Vec, cluster_type: &ClusterType, - account_indexes: HashSet, + account_indexes: AccountSecondaryIndexes, caching_enabled: bool, ) -> Self { let new = if !paths.is_empty() { @@ -1185,6 +1173,11 @@ impl AccountsDb { ) } + pub fn expected_cluster_type(&self) -> ClusterType { + self.cluster_type + .expect("Cluster type must be set at initialization") + } + // Reclaim older states of rooted accounts for AccountsDb bloat mitigation fn clean_old_rooted_accounts( &self, @@ -1209,7 +1202,6 @@ impl AccountsDb { &pubkey, &mut reclaims, max_clean_root, - &self.account_indexes, ); } reclaims @@ -1321,12 +1313,9 @@ impl AccountsDb { let mut dead_keys = Vec::new(); for (pubkey, slots_set) in pubkey_to_slot_set { - let is_empty = self.accounts_index.purge_exact( - &pubkey, - slots_set, - &mut reclaims, - &self.account_indexes, - ); + let is_empty = self + .accounts_index + .purge_exact(&pubkey, slots_set, &mut reclaims); if is_empty { dead_keys.push(pubkey); } @@ -1736,7 +1725,7 @@ impl AccountsDb { I: Iterator>, { struct FoundStoredAccount { - account: Account, + account: AccountSharedData, account_hash: Hash, account_size: usize, store_id: AppendVecId, @@ -1880,7 +1869,7 @@ impl AccountsDb { // `store_accounts_frozen()` above may have purged accounts from some // other storage entries (the ones that were just overwritten by this // new storage entry). This means some of those stores might have caused - // this slot to be readded to `self.shrink_candidate_slots`, so delete + // this slot to be read to `self.shrink_candidate_slots`, so delete // those here self.shrink_candidate_slots.lock().unwrap().remove(&slot); @@ -2002,10 +1991,7 @@ impl AccountsDb { } pub fn shrink_candidate_slots(&self) -> usize { - let shrink_slots = std::mem::replace( - &mut *self.shrink_candidate_slots.lock().unwrap(), - HashMap::new(), - ); + let shrink_slots = std::mem::take(&mut *self.shrink_candidate_slots.lock().unwrap()); let num_candidates = shrink_slots.len(); for (slot, slot_shrink_candidates) in shrink_slots { let mut measure = Measure::start("shrink_candidate_slots-ms"); @@ -2028,7 +2014,7 @@ impl AccountsDb { pub fn scan_accounts(&self, ancestors: &Ancestors, scan_func: F) -> A where - F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>), + F: Fn(&mut A, Option<(&Pubkey, AccountSharedData, Slot)>), A: Default, { let mut collector = A::default(); @@ -2087,7 +2073,7 @@ impl AccountsDb { scan_func: F, ) -> A where - F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>), + F: Fn(&mut A, Option<(&Pubkey, AccountSharedData, Slot)>), A: Default, R: RangeBounds, { @@ -2117,11 +2103,26 @@ impl AccountsDb { ancestors: &Ancestors, index_key: IndexKey, scan_func: F, - ) -> A + ) -> (A, bool) where - F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>), + F: Fn(&mut A, Option<(&Pubkey, AccountSharedData, Slot)>), A: Default, { + let key = match &index_key { + IndexKey::ProgramId(key) + | IndexKey::SplTokenMint(key) + | IndexKey::SplTokenOwner(key) + | IndexKey::VelasAccountStorage(key) + | IndexKey::VelasAccountOwner(key) + | IndexKey::VelasAccountOperational(key) => key, + }; + + if !self.account_indexes.include_key(key) { + // the requested key was not indexed in the secondary index, so do a normal scan + let used_index = false; + return (self.scan_accounts(ancestors, scan_func), used_index); + } + let mut collector = A::default(); self.accounts_index.index_scan_accounts( ancestors, @@ -2139,7 +2140,8 @@ impl AccountsDb { scan_func(&mut collector, account_slot) }, ); - collector + let used_index = true; + (collector, used_index) } /// Scan a specific slot through all the account storage in parallel @@ -2219,7 +2221,11 @@ impl AccountsDb { bank_hashes.insert(slot, new_hash_info); } - pub fn load(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Option<(Account, Slot)> { + pub fn load( + &self, + ancestors: &Ancestors, + pubkey: &Pubkey, + ) -> Option<(AccountSharedData, Slot)> { self.do_load(ancestors, pubkey, None) } @@ -2228,7 +2234,7 @@ impl AccountsDb { ancestors: &Ancestors, pubkey: &Pubkey, max_root: Option, - ) -> Option<(Account, Slot)> { + ) -> Option<(AccountSharedData, Slot)> { let (slot, store_id, offset) = { let (lock, index) = self.accounts_index.get(pubkey, Some(ancestors), max_root)?; let slot_list = lock.slot_list(); @@ -2271,7 +2277,11 @@ impl AccountsDb { .unwrap() } - pub fn load_slow(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Option<(Account, Slot)> { + pub fn load_slow( + &self, + ancestors: &Ancestors, + pubkey: &Pubkey, + ) -> Option<(AccountSharedData, Slot)> { self.load(ancestors, pubkey) } @@ -2758,22 +2768,14 @@ impl AccountsDb { match scan_result { ScanStorageResult::Cached(cached_keys) => { for pubkey in cached_keys.iter() { - self.accounts_index.purge_exact( - pubkey, - &purge_slot, - &mut reclaims, - &self.account_indexes, - ); + self.accounts_index + .purge_exact(pubkey, &purge_slot, &mut reclaims); } } ScanStorageResult::Stored(stored_keys) => { for set_ref in stored_keys.iter() { - self.accounts_index.purge_exact( - set_ref.key(), - &purge_slot, - &mut reclaims, - &self.account_indexes, - ); + self.accounts_index + .purge_exact(set_ref.key(), &purge_slot, &mut reclaims); } } } @@ -2829,7 +2831,7 @@ impl AccountsDb { pub fn hash_account( slot: Slot, - account: &Account, + account: &AccountSharedData, pubkey: &Pubkey, cluster_type: &ClusterType, ) -> Hash { @@ -2842,7 +2844,7 @@ impl AccountsDb { &account.owner, account.executable, account.rent_epoch, - &account.data, + &account.data(), pubkey, include_owner, ) @@ -2853,17 +2855,17 @@ impl AccountsDb { &account.owner, account.executable, account.rent_epoch, - &account.data, + &account.data(), pubkey, include_owner, ) } } - fn hash_frozen_account_data(account: &Account) -> Hash { + fn hash_frozen_account_data(account: &AccountSharedData) -> Hash { let mut hasher = Hasher::default(); - hasher.hash(&account.data); + hasher.hash(&account.data()); hasher.hash(&account.owner.as_ref()); if account.executable { @@ -2973,7 +2975,7 @@ impl AccountsDb { slot: Slot, hashes: &[Hash], mut storage_finder: F, - accounts_and_meta_to_store: &[(StoredMeta, &Account)], + accounts_and_meta_to_store: &[(StoredMeta, &AccountSharedData)], ) -> Vec { assert_eq!(hashes.len(), accounts_and_meta_to_store.len()); let mut infos: Vec = Vec::with_capacity(accounts_and_meta_to_store.len()); @@ -2983,7 +2985,7 @@ impl AccountsDb { let mut storage_find = Measure::start("storage_finder"); let storage = storage_finder( slot, - accounts_and_meta_to_store[infos.len()].1.data.len() + STORE_META_OVERHEAD, + accounts_and_meta_to_store[infos.len()].1.data().len() + STORE_META_OVERHEAD, ); storage_find.stop(); total_storage_find_us += storage_find.as_us(); @@ -2999,7 +3001,7 @@ impl AccountsDb { storage.set_status(AccountStorageStatus::Full); // See if an account overflows the append vecs in the slot. - let data_len = (accounts_and_meta_to_store[infos.len()].1.data.len() + let data_len = (accounts_and_meta_to_store[infos.len()].1.data().len() + STORE_META_OVERHEAD) as u64; if !self.has_space_available(slot, data_len) { let special_store_size = std::cmp::max(data_len * 2, self.file_size); @@ -3122,14 +3124,22 @@ impl AccountsDb { let excess_slot_count = old_slots.len(); let mut unflushable_unrooted_slot_count = 0; let max_flushed_root = self.accounts_cache.fetch_max_flush_root(); - for old_slot in old_slots { - // Don't flush slots that are known to be unrooted - if old_slot > max_flushed_root { - self.flush_slot_cache(old_slot, None::<&mut fn(&_, &_) -> bool>); - } else { - unflushable_unrooted_slot_count += 1; - } - } + let old_slot_flush_stats: Vec<_> = old_slots + .into_iter() + .filter_map(|old_slot| { + // Don't flush slots that are known to be unrooted + if old_slot > max_flushed_root { + Some(self.flush_slot_cache(old_slot, None::<&mut fn(&_, &_) -> bool>)) + } else { + unflushable_unrooted_slot_count += 1; + None + } + }) + .collect(); + info!( + "req_flush_root: {:?} old_slot_flushes: {:?}", + requested_flush_root, old_slot_flush_stats + ); datapoint_info!( "accounts_db-flush_accounts_cache", @@ -3163,11 +3173,12 @@ impl AccountsDb { // Remove a random index 0 <= i < `frozen_slots.len()` let rand_slot = frozen_slots.choose(&mut thread_rng()); if let Some(rand_slot) = rand_slot { + let random_flush_stats = + self.flush_slot_cache(*rand_slot, None::<&mut fn(&_, &_) -> bool>); info!( - "Flushing random slot: {}, num_remaining: {}", - *rand_slot, num_slots_remaining + "Flushed random slot: num_remaining: {} {:?}", + num_slots_remaining, random_flush_stats, ); - self.flush_slot_cache(*rand_slot, None::<&mut fn(&_, &_) -> bool>); } } } @@ -3189,7 +3200,7 @@ impl AccountsDb { // If `should_clean` is None, then`should_flush_f` is also None, which will cause // `flush_slot_cache` to flush all accounts to storage without cleaning any accounts. let mut should_flush_f = should_clean.map(|(account_bytes_saved, num_accounts_saved)| { - move |&pubkey: &Pubkey, account: &Account| { + move |&pubkey: &Pubkey, account: &AccountSharedData| { use std::collections::hash_map::Entry::{Occupied, Vacant}; let should_flush = match written_accounts.entry(pubkey) { Vacant(vacant_entry) => { @@ -3197,7 +3208,7 @@ impl AccountsDb { true } Occupied(_occupied_entry) => { - *account_bytes_saved += account.data.len(); + *account_bytes_saved += account.data().len(); *num_accounts_saved += 1; // If a later root already wrote this account, no point // in flushing it @@ -3228,7 +3239,7 @@ impl AccountsDb { should_flush_f.as_mut() }; - if self.flush_slot_cache(root, should_flush_f) { + if self.flush_slot_cache(root, should_flush_f).did_flush { num_roots_flushed += 1; } @@ -3250,22 +3261,22 @@ impl AccountsDb { (num_new_roots, num_roots_flushed) } - // `should_flush_f` is an optional closure that determines wehther a given + // `should_flush_f` is an optional closure that determines whether a given // account should be flushed. Passing `None` will by default flush all // accounts fn flush_slot_cache( &self, slot: Slot, - mut should_flush_f: Option<&mut impl FnMut(&Pubkey, &Account) -> bool>, - ) -> bool { - info!("flush_slot_cache slot: {}", slot); - let slot_cache = self.accounts_cache.slot_cache(slot); - if let Some(slot_cache) = slot_cache { + mut should_flush_f: Option<&mut impl FnMut(&Pubkey, &AccountSharedData) -> bool>, + ) -> FlushStats { + let mut num_purged = 0; + let mut total_size = 0; + let mut num_flushed = 0; + let did_flush = if let Some(slot_cache) = self.accounts_cache.slot_cache(slot) { let iter_items: Vec<_> = slot_cache.iter().collect(); - let mut total_size = 0; let mut purged_slot_pubkeys: HashSet<(Slot, Pubkey)> = HashSet::new(); let mut pubkey_to_slot_set: Vec<(Pubkey, Slot)> = vec![]; - let (accounts, hashes): (Vec<(&Pubkey, &Account)>, Vec) = iter_items + let (accounts, hashes): (Vec<(&Pubkey, &AccountSharedData)>, Vec) = iter_items .iter() .filter_map(|iter_item| { let key = iter_item.key(); @@ -3276,13 +3287,15 @@ impl AccountsDb { .unwrap_or(true); if should_flush { let hash = iter_item.value().hash; - total_size += (account.data.len() + STORE_META_OVERHEAD) as u64; + total_size += (account.data().len() + STORE_META_OVERHEAD) as u64; + num_flushed += 1; Some(((key, account), hash)) } else { // If we don't flush, we have to remove the entry from the // index, since it's equivalent to purging purged_slot_pubkeys.insert((slot, *key)); pubkey_to_slot_set.push((*key, slot)); + num_purged += 1; None } }) @@ -3331,6 +3344,13 @@ impl AccountsDb { true } else { false + }; + FlushStats { + slot, + num_flushed, + num_purged, + total_size, + did_flush, } } @@ -3338,7 +3358,7 @@ impl AccountsDb { &self, slot: Slot, hashes: &[Hash], - accounts_and_meta_to_store: &[(StoredMeta, &Account)], + accounts_and_meta_to_store: &[(StoredMeta, &AccountSharedData)], ) -> Vec { assert_eq!(hashes.len(), accounts_and_meta_to_store.len()); accounts_and_meta_to_store @@ -3363,14 +3383,14 @@ impl AccountsDb { >( &self, slot: Slot, - accounts: &[(&Pubkey, &Account)], + accounts: &[(&Pubkey, &AccountSharedData)], hashes: &[Hash], storage_finder: F, mut write_version_producer: P, is_cached_store: bool, ) -> Vec { - let default_account = Account::default(); - let accounts_and_meta_to_store: Vec<(StoredMeta, &Account)> = accounts + let default_account = AccountSharedData::default(); + let accounts_and_meta_to_store: Vec<(StoredMeta, &AccountSharedData)> = accounts .iter() .map(|(pubkey, account)| { let account = if account.lamports == 0 { @@ -3378,7 +3398,7 @@ impl AccountsDb { } else { *account }; - let data_len = account.data.len() as u64; + let data_len = account.data().len() as u64; let meta = StoredMeta { write_version: write_version_producer.next().unwrap(), pubkey: **pubkey, @@ -3464,126 +3484,13 @@ impl AccountsDb { ); } - pub fn compute_merkle_root_and_capitalization( - hashes: Vec<(Pubkey, Hash, u64)>, - fanout: usize, - ) -> (Hash, u64) { - Self::compute_merkle_root_and_capitalization_loop(hashes, fanout, |t| (t.1, t.2)) - } - - // this function avoids an infinite recursion compiler error - fn compute_merkle_root_and_capitalization_recurse( - hashes: Vec<(Hash, u64)>, - fanout: usize, - ) -> (Hash, u64) { - Self::compute_merkle_root_and_capitalization_loop(hashes, fanout, |t: &(Hash, u64)| { - (t.0, t.1) - }) - } - - // For the first iteration, there could be more items in the tuple than just hash and lamports. - // Using extractor allows us to avoid an unnecessary array copy on the first iteration. - fn compute_merkle_root_and_capitalization_loop( - hashes: Vec, - fanout: usize, - extractor: F, - ) -> (Hash, u64) - where - F: Fn(&T) -> (Hash, u64) + std::marker::Sync, - T: std::marker::Sync, - { - if hashes.is_empty() { - return (Hasher::default().result(), 0); - } - - let mut time = Measure::start("time"); - - let total_hashes = hashes.len(); - // we need div_ceil here - let mut chunks = total_hashes / fanout; - if total_hashes % fanout != 0 { - chunks += 1; - } - - let result: Vec<_> = (0..chunks) - .into_par_iter() - .map(|i| { - let start_index = i * fanout; - let end_index = std::cmp::min(start_index + fanout, total_hashes); - - let mut hasher = Hasher::default(); - let mut this_sum = 0u128; - for item in hashes.iter().take(end_index).skip(start_index) { - let (h, l) = extractor(&item); - this_sum += l as u128; - hasher.hash(h.as_ref()); - } - - ( - hasher.result(), - Self::checked_cast_for_capitalization(this_sum), - ) - }) - .collect(); - time.stop(); - debug!("hashing {} {}", total_hashes, time); - - if result.len() == 1 { - result[0] - } else { - Self::compute_merkle_root_and_capitalization_recurse(result, fanout) - } - } - - fn accumulate_account_hashes( - hashes: Vec<(Pubkey, Hash, u64)>, - slot: Slot, - debug: bool, - ) -> Hash { - let ((hash, ..), ..) = - Self::accumulate_account_hashes_and_capitalization(hashes, slot, debug); - hash - } - - fn sort_hashes_by_pubkey(hashes: &mut Vec<(Pubkey, Hash, u64)>) { - hashes.par_sort_unstable_by(|a, b| a.0.cmp(&b.0)); - } - - fn accumulate_account_hashes_and_capitalization( - mut hashes: Vec<(Pubkey, Hash, u64)>, - slot: Slot, - debug: bool, - ) -> ((Hash, u64), (Measure, Measure)) { - let mut sort_time = Measure::start("sort"); - Self::sort_hashes_by_pubkey(&mut hashes); - sort_time.stop(); - - if debug { - for (key, hash, _lamports) in &hashes { - info!("slot: {} key {} hash {}", slot, key, hash); - } - } - - let mut hash_time = Measure::start("hash"); - let res = Self::compute_merkle_root_and_capitalization(hashes, MERKLE_FANOUT); - hash_time.stop(); - - (res, (sort_time, hash_time)) - } - - pub fn checked_cast_for_capitalization(balance: u128) -> u64 { - balance - .try_into() - .expect("overflow is detected while summing capitalization") - } - pub fn checked_iterative_sum_for_capitalization(total_cap: u64, new_cap: u64) -> u64 { let new_total = total_cap as u128 + new_cap as u128; - Self::checked_cast_for_capitalization(new_total) + AccountsHash::checked_cast_for_capitalization(new_total) } pub fn checked_sum_for_capitalization>(balances: T) -> u64 { - Self::checked_cast_for_capitalization(balances.map(|b| b as u128).sum::()) + AccountsHash::checked_cast_for_capitalization(balances.map(|b| b as u128).sum::()) } // remove this by inlining and remove extra unused params upto all callchain @@ -3612,52 +3519,70 @@ impl AccountsDb { .cloned() .collect(); let mismatch_found = AtomicU64::new(0); - let hashes: Vec<(Hash, u64)> = { + // Pick a chunk size big enough to allow us to produce output vectors that are smaller than the overall size. + // We'll also accumulate the lamports within each chunk and fewer chunks results in less contention to accumulate the sum. + let chunks = crate::accounts_hash::MERKLE_FANOUT.pow(4); + let total_lamports = Mutex::::new(0); + let hashes: Vec> = { self.thread_pool_clean.install(|| { - keys.par_iter() - .filter_map(|pubkey| { - if let Some((lock, index)) = - self.accounts_index.get(pubkey, Some(ancestors), Some(slot)) - { - let (slot, account_info) = &lock.slot_list()[index]; - if account_info.lamports != 0 { - self.get_account_accessor_from_cache_or_storage( - *slot, - pubkey, - account_info.store_id, - account_info.offset, - ) - .get_loaded_account() - .and_then(|loaded_account| { - let loaded_hash = loaded_account.loaded_hash(); - let balance = Self::account_balance_for_capitalization( - account_info.lamports, - loaded_account.owner(), - loaded_account.executable(), - ); - - if check_hash { - let computed_hash = loaded_account.compute_hash( + keys.par_chunks(chunks) + .map(|pubkeys| { + let mut sum = 0u128; + let result: Vec = pubkeys + .iter() + .filter_map(|pubkey| { + if let Some((lock, index)) = + self.accounts_index.get(pubkey, Some(ancestors), Some(slot)) + { + let (slot, account_info) = &lock.slot_list()[index]; + if account_info.lamports != 0 { + self.get_account_accessor_from_cache_or_storage( *slot, - &self.cluster_type.expect( - "Cluster type must be set at initialization", - ), pubkey, - ); - if computed_hash != *loaded_hash { - mismatch_found.fetch_add(1, Ordering::Relaxed); - return None; - } + account_info.store_id, + account_info.offset, + ) + .get_loaded_account() + .and_then( + |loaded_account| { + let loaded_hash = loaded_account.loaded_hash(); + let balance = + Self::account_balance_for_capitalization( + account_info.lamports, + loaded_account.owner(), + loaded_account.executable(), + ); + + if check_hash { + let computed_hash = loaded_account + .compute_hash( + *slot, + &self.expected_cluster_type(), + pubkey, + ); + if computed_hash != *loaded_hash { + mismatch_found + .fetch_add(1, Ordering::Relaxed); + return None; + } + } + + sum += balance as u128; + Some(*loaded_hash) + }, + ) + } else { + None } - - Some((*loaded_hash, balance)) - }) - } else { - None - } - } else { - None - } + } else { + None + } + }) + .collect(); + let mut total = total_lamports.lock().unwrap(); + *total = + AccountsHash::checked_cast_for_capitalization(*total as u128 + sum); + result }) .collect() }) @@ -3671,10 +3596,10 @@ impl AccountsDb { } scan.stop(); - let hash_total = hashes.len(); + let total_lamports = *total_lamports.lock().unwrap(); + let mut hash_time = Measure::start("hash"); - let (accumulated_hash, total_lamports) = - Self::compute_merkle_root_and_capitalization_recurse(hashes, MERKLE_FANOUT); + let (accumulated_hash, hash_total) = AccountsHash::calculate_hash(hashes); hash_time.stop(); datapoint_info!( "update_accounts_hash", @@ -3702,13 +3627,17 @@ impl AccountsDb { /// Scan through all the account storage in parallel fn scan_account_storage_no_bank( snapshot_storages: &[SnapshotStorage], + stats: &mut crate::accounts_hash::HashStats, scan_func: F, ) -> Vec where F: Fn(LoadedAccount, &mut B, Slot) + Send + Sync, B: Send + Default, { + let mut time = Measure::start("flatten"); let items: Vec<_> = snapshot_storages.iter().flatten().collect(); + time.stop(); + stats.pre_scan_flatten_time_total_us += time.as_us(); // Without chunks, we end up with 1 output vec for each outer snapshot storage. // This results in too many vectors to be efficient. @@ -3733,189 +3662,6 @@ impl AccountsDb { .collect() } - fn flatten_hash_intermediate( - data_sections_by_pubkey: Vec>, - ) -> (Vec, Measure, usize) { - let mut flatten_time = Measure::start("flatten"); - let result: Vec<_> = data_sections_by_pubkey.into_iter().flatten().collect(); - let raw_len = result.len(); - flatten_time.stop(); - (result, flatten_time, raw_len) - } - - fn compare_two_hash_entries( - a: &CalculateHashIntermediate, - b: &CalculateHashIntermediate, - ) -> std::cmp::Ordering { - // note partial_cmp only returns None with floating point comparisons - match a.pubkey.partial_cmp(&b.pubkey).unwrap() { - std::cmp::Ordering::Equal => match b.slot.partial_cmp(&a.slot).unwrap() { - std::cmp::Ordering::Equal => b.version.partial_cmp(&a.version).unwrap(), - other => other, - }, - other => other, - } - } - - fn sort_hash_intermediate( - mut data_by_pubkey: Vec, - ) -> (Vec, Measure) { - // sort each PUBKEY_DIVISION vec - let mut sort_time = Measure::start("sort"); - data_by_pubkey.par_sort_unstable_by(Self::compare_two_hash_entries); - sort_time.stop(); - (data_by_pubkey, sort_time) - } - - fn de_dup_and_eliminate_zeros( - sorted_data_by_pubkey: Vec, - ) -> (Vec>, Measure, u64) { - // 1. eliminate zero lamport accounts - // 2. pick the highest slot or (slot = and highest version) of each pubkey - // 3. produce this output: - // vec: PUBKEY_BINS_FOR_CALCULATING_HASHES in pubkey order - // vec: sorted sections from parallelism, in pubkey order - // vec: individual hashes in pubkey order - let mut zeros = Measure::start("eliminate zeros"); - const CHUNKS: usize = 10; - let (hashes, sum) = Self::de_dup_accounts_in_parallel(&sorted_data_by_pubkey, CHUNKS); - zeros.stop(); - (hashes, zeros, sum) - } - - // 1. eliminate zero lamport accounts - // 2. pick the highest slot or (slot = and highest version) of each pubkey - // 3. produce this output: - // vec: sorted sections from parallelism, in pubkey order - // vec: individual hashes in pubkey order - fn de_dup_accounts_in_parallel( - pubkey_division: &[CalculateHashIntermediate], - chunk_count: usize, - ) -> (Vec>, u64) { - let len = pubkey_division.len(); - let max = if len > chunk_count { chunk_count } else { 1 }; - let chunk_size = len / max; - let overall_sum = Mutex::new(0u64); - let hashes: Vec> = (0..max) - .into_par_iter() - .map(|chunk_index| { - let mut start_index = chunk_index * chunk_size; - let mut end_index = start_index + chunk_size; - if chunk_index == max - 1 { - end_index = len; - } - - let is_first_slice = chunk_index == 0; - if !is_first_slice { - // note that this causes all regions after region 0 to have 1 item that overlaps with the previous region - start_index -= 1; - } - - let (result, sum) = Self::de_dup_accounts_from_stores( - chunk_index == 0, - &pubkey_division[start_index..end_index], - ); - let mut overall = overall_sum.lock().unwrap(); - *overall = Self::checked_cast_for_capitalization(sum + *overall as u128); - - result - }) - .collect(); - - let sum = *overall_sum.lock().unwrap(); - (hashes, sum) - } - - fn de_dup_accounts_from_stores( - is_first_slice: bool, - slice: &[CalculateHashIntermediate], - ) -> (Vec, u128) { - let len = slice.len(); - let mut result: Vec = Vec::with_capacity(len); - - let mut sum: u128 = 0; - if len > 0 { - let mut i = 0; - // look_for_first_key means the first key we find in our slice may be a - // continuation of accounts belonging to a key that started in the last slice. - // so, look_for_first_key=true means we have to find the first key different than - // the first key we encounter in our slice. Note that if this is true, - // our slice begins one index prior to the 'actual' start of our logical range. - let mut look_for_first_key = !is_first_slice; - 'outer: loop { - // at start of loop, item at 'i' is the first entry for a given pubkey - unless look_for_first - let now = &slice[i]; - let last = now.pubkey; - if !look_for_first_key && now.lamports != ZERO_RAW_LAMPORTS_SENTINEL { - // first entry for this key that starts in our slice - result.push(now.hash); - sum += now.lamports as u128; - } - for (k, now) in slice.iter().enumerate().skip(i + 1) { - if now.pubkey != last { - i = k; - look_for_first_key = false; - continue 'outer; - } - } - - break; // ran out of items in our slice, so our slice is done - } - } - (result, sum) - } - - fn flatten_hashes(hashes: Vec>) -> (Vec, Measure, usize) { - // flatten vec/vec into 1d vec of hashes in order - let mut flat2_time = Measure::start("flat2"); - let hashes: Vec = hashes.into_iter().flatten().collect(); - flat2_time.stop(); - let hash_total = hashes.len(); - - (hashes, flat2_time, hash_total) - } - - // input: - // vec: unordered, created by parallelism - // vec: [0..bins] - where bins are pubkey ranges - // vec: [..] - items which fin in the containing bin, unordered within this vec - // so, assumption is middle vec is bins sorted by pubkey - fn rest_of_hash_calculation( - accounts: (Vec>, Measure), - ) -> (Hash, u64) { - let (data_sections_by_pubkey, time_scan) = accounts; - - let (outer, flatten_time, raw_len) = - Self::flatten_hash_intermediate(data_sections_by_pubkey); - - let (sorted_data_by_pubkey, sort_time) = Self::sort_hash_intermediate(outer); - - let (hashes, zeros, total_lamports) = - Self::de_dup_and_eliminate_zeros(sorted_data_by_pubkey); - - let (hashes, flat2_time, hash_total) = Self::flatten_hashes(hashes); - - let mut hash_time = Measure::start("hashes"); - let (hash, _) = - Self::compute_merkle_root_and_capitalization_loop(hashes, MERKLE_FANOUT, |t: &Hash| { - (*t, 0) - }); - hash_time.stop(); - datapoint_info!( - "calculate_accounts_hash_without_index", - ("accounts_scan", time_scan.as_us(), i64), - ("eliminate_zeros", zeros.as_us(), i64), - ("hash", hash_time.as_us(), i64), - ("sort", sort_time.as_us(), i64), - ("hash_total", hash_total, i64), - ("flatten", flatten_time.as_us(), i64), - ("flatten_after_zeros", flat2_time.as_us(), i64), - ("unreduced_entries", raw_len as i64, i64), - ); - - (hash, total_lamports) - } - fn calculate_accounts_hash_helper( &self, use_index: bool, @@ -3963,18 +3709,24 @@ impl AccountsDb { fn scan_snapshot_stores( storage: &[SnapshotStorage], - ) -> (Vec>, Measure) { + mut stats: &mut crate::accounts_hash::HashStats, + bins: usize, + ) -> Vec>> { + let max_plus_1 = std::u8::MAX as usize + 1; + assert!(bins <= max_plus_1 && bins > 0); let mut time = Measure::start("scan all accounts"); - let result: Vec> = Self::scan_account_storage_no_bank( + stats.num_snapshot_storage = storage.len(); + let result: Vec>> = Self::scan_account_storage_no_bank( &storage, + &mut stats, |loaded_account: LoadedAccount, - accum: &mut Vec, + accum: &mut Vec>, slot: Slot| { let version = loaded_account.write_version(); let raw_lamports = loaded_account.lamports(); let zero_raw_lamports = raw_lamports == 0; let balance = if zero_raw_lamports { - ZERO_RAW_LAMPORTS_SENTINEL + crate::accounts_hash::ZERO_RAW_LAMPORTS_SENTINEL } else { Self::account_balance_for_capitalization( raw_lamports, @@ -3991,12 +3743,17 @@ impl AccountsDb { slot, pubkey, ); - accum.push(source_item); + let rng_index = pubkey.as_ref()[0] as usize * bins / max_plus_1; + let max = accum.len(); + if max == 0 { + accum.extend(vec![Vec::new(); bins]); + } + accum[rng_index].push(source_item); }, ); time.stop(); - - (result, time) + stats.scan_time_total_us += time.as_us(); + result } // modeled after get_accounts_delta_hash @@ -4006,9 +3763,16 @@ impl AccountsDb { thread_pool: Option<&ThreadPool>, ) -> (Hash, u64) { let scan_and_hash = || { - let result = Self::scan_snapshot_stores(storages); + let mut stats = HashStats::default(); + // When calculating hashes, it is helpful to break the pubkeys found into bins based on the pubkey value. + const PUBKEY_BINS_FOR_CALCULATING_HASHES: usize = 64; + let result = Self::scan_snapshot_stores( + storages, + &mut stats, + PUBKEY_BINS_FOR_CALCULATING_HASHES, + ); - Self::rest_of_hash_calculation(result) + AccountsHash::rest_of_hash_calculation(result, &mut stats) }; if let Some(thread_pool) = thread_pool { thread_pool.install(scan_and_hash) @@ -4055,16 +3819,12 @@ impl AccountsDb { pub fn get_accounts_delta_hash(&self, slot: Slot) -> Hash { let mut scan = Measure::start("scan"); - let scan_result: ScanStorageResult<(Pubkey, Hash, u64), DashMapVersionHash> = self + let scan_result: ScanStorageResult<(Pubkey, Hash), DashMapVersionHash> = self .scan_account_storage( slot, |loaded_account: LoadedAccount| { // Cache only has one version per key, don't need to worry about versioning - Some(( - *loaded_account.pubkey(), - *loaded_account.loaded_hash(), - CACHE_VIRTUAL_WRITE_VERSION, - )) + Some((*loaded_account.pubkey(), *loaded_account.loaded_hash())) }, |accum: &DashMap, loaded_account: LoadedAccount| { let loaded_write_version = loaded_account.write_version(); @@ -4099,14 +3859,12 @@ impl AccountsDb { ScanStorageResult::Cached(cached_result) => cached_result, ScanStorageResult::Stored(stored_result) => stored_result .into_iter() - .map(|(pubkey, (_latest_write_version, hash))| (pubkey, hash, 0)) + .map(|(pubkey, (_latest_write_version, hash))| (pubkey, hash)) .collect(), }; - let dirty_keys = hashes - .iter() - .map(|(pubkey, _hash, _lamports)| *pubkey) - .collect(); - let ret = Self::accumulate_account_hashes(hashes, slot, false); + let dirty_keys = hashes.iter().map(|(pubkey, _hash)| *pubkey).collect(); + + let ret = AccountsHash::accumulate_account_hashes(hashes); accumulate.stop(); let mut uncleaned_time = Measure::start("uncleaned_index"); self.uncleaned_pubkeys.insert(slot, dirty_keys); @@ -4129,7 +3887,7 @@ impl AccountsDb { &self, slot: Slot, infos: Vec, - accounts: &[(&Pubkey, &Account)], + accounts: &[(&Pubkey, &AccountSharedData)], ) -> SlotList { let mut reclaims = SlotList::::with_capacity(infos.len() * 2); for (info, pubkey_account) in infos.into_iter().zip(accounts.iter()) { @@ -4138,7 +3896,7 @@ impl AccountsDb { slot, pubkey, &pubkey_account.1.owner, - &pubkey_account.1.data, + &pubkey_account.1.data(), &self.account_indexes, info, &mut reclaims, @@ -4245,12 +4003,16 @@ impl AccountsDb { } let mut accounts_index_root_stats = AccountsIndexRootsStats::default(); - for slot in dead_slots_iter.clone() { - info!("finalize_dead_slot_removal slot {}", slot); - if let Some(latest) = self.accounts_index.clean_dead_slot(*slot) { - accounts_index_root_stats = latest; - } - } + let dead_slots: Vec<_> = dead_slots_iter + .clone() + .map(|slot| { + if let Some(latest) = self.accounts_index.clean_dead_slot(*slot) { + accounts_index_root_stats = latest; + } + *slot + }) + .collect(); + info!("finalize_dead_slot_removal: slots {:?}", dead_slots); self.clean_accounts_stats .latest_accounts_index_roots_stats @@ -4307,7 +4069,7 @@ impl AccountsDb { fn hash_accounts( &self, slot: Slot, - accounts: &[(&Pubkey, &Account)], + accounts: &[(&Pubkey, &AccountSharedData)], cluster_type: &ClusterType, ) -> Vec { let mut stats = BankHashStats::default(); @@ -4315,7 +4077,7 @@ impl AccountsDb { let hashes: Vec<_> = accounts .iter() .map(|(pubkey, account)| { - total_data += account.data.len(); + total_data += account.data().len(); stats.update(account); Self::hash_account(slot, account, pubkey, cluster_type) }) @@ -4357,7 +4119,7 @@ impl AccountsDb { } /// Cause a panic if frozen accounts would be affected by data in `accounts` - fn assert_frozen_accounts(&self, accounts: &[(&Pubkey, &Account)]) { + fn assert_frozen_accounts(&self, accounts: &[(&Pubkey, &AccountSharedData)]) { if self.frozen_accounts.is_empty() { return; } @@ -4383,16 +4145,16 @@ impl AccountsDb { } } - pub fn store_cached(&self, slot: Slot, accounts: &[(&Pubkey, &Account)]) { + pub fn store_cached(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)]) { self.store(slot, accounts, self.caching_enabled); } /// Store the account update. - pub fn store_uncached(&self, slot: Slot, accounts: &[(&Pubkey, &Account)]) { + pub fn store_uncached(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)]) { self.store(slot, accounts, false); } - fn store(&self, slot: Slot, accounts: &[(&Pubkey, &Account)], is_cached_store: bool) { + fn store(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)], is_cached_store: bool) { // If all transactions in a batch are errored, // it's possible to get a store with no accounts. if accounts.is_empty() { @@ -4400,13 +4162,7 @@ impl AccountsDb { } self.assert_frozen_accounts(accounts); let mut hash_time = Measure::start("hash_accounts"); - let hashes = self.hash_accounts( - slot, - accounts, - &self - .cluster_type - .expect("Cluster type must be set at initialization"), - ); + let hashes = self.hash_accounts(slot, accounts, &self.expected_cluster_type()); hash_time.stop(); self.stats .store_hash_accounts @@ -4516,7 +4272,7 @@ impl AccountsDb { fn store_accounts_unfrozen( &self, slot: Slot, - accounts: &[(&Pubkey, &Account)], + accounts: &[(&Pubkey, &AccountSharedData)], hashes: &[Hash], is_cached_store: bool, ) { @@ -4542,7 +4298,7 @@ impl AccountsDb { fn store_accounts_frozen<'a>( &'a self, slot: Slot, - accounts: &[(&Pubkey, &Account)], + accounts: &[(&Pubkey, &AccountSharedData)], hashes: &[Hash], storage_finder: Option>, write_version_producer: Option>>, @@ -4566,7 +4322,7 @@ impl AccountsDb { fn store_accounts_custom<'a>( &'a self, slot: Slot, - accounts: &[(&Pubkey, &Account)], + accounts: &[(&Pubkey, &AccountSharedData)], hashes: &[Hash], storage_finder: Option>, write_version_producer: Option>>, @@ -4882,7 +4638,7 @@ impl AccountsDb { // store ref count could become incorrect. fn do_shrink_slot_v1(&self, slot: Slot, forced: bool) -> usize { struct FoundStoredAccount { - account: Account, + account: AccountSharedData, account_hash: Hash, account_size: usize, store_id: AppendVecId, @@ -5234,15 +4990,21 @@ impl AccountsDb { #[cfg(test)] pub mod tests { - // TODO: all the bank tests are bank specific, issue: 2194 use super::*; use crate::{ - accounts_index::tests::*, accounts_index::RefCount, append_vec::AccountMeta, + accounts_hash::MERKLE_FANOUT, + accounts_index::RefCount, + accounts_index::{tests::*, AccountSecondaryIndexesIncludeExclude}, + append_vec::AccountMeta, inline_spl_token_v2_0, }; use assert_matches::assert_matches; use rand::{thread_rng, Rng}; - use solana_sdk::{account::Account, hash::HASH_BYTES, pubkey::PUBKEY_BYTES}; + use solana_sdk::{ + account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, + hash::HASH_BYTES, + pubkey::PUBKEY_BYTES, + }; use std::{ iter::FromIterator, str::FromStr, @@ -5259,309 +5021,150 @@ pub mod tests { } #[test] - fn test_accountsdb_rest_of_hash_calculation() { - solana_logger::setup(); - - let mut account_maps: Vec = Vec::new(); - - let key = Pubkey::new(&[11u8; 32]); - let hash = Hash::new(&[1u8; 32]); - let val = CalculateHashIntermediate::new(0, hash, 88, Slot::default(), key); - account_maps.push(val); - - // 2nd key - zero lamports, so will be removed - let key = Pubkey::new(&[12u8; 32]); - let hash = Hash::new(&[2u8; 32]); - let val = CalculateHashIntermediate::new( - 0, - hash, - ZERO_RAW_LAMPORTS_SENTINEL, - Slot::default(), - key, - ); - account_maps.push(val); - - let result = - AccountsDb::rest_of_hash_calculation((vec![account_maps.clone()], Measure::start(""))); - let expected_hash = Hash::from_str("8j9ARGFv4W2GfML7d3sVJK2MePwrikqYnu6yqer28cCa").unwrap(); - assert_eq!(result, (expected_hash, 88)); - - // 3rd key - with pubkey value before 1st key so it will be sorted first - let key = Pubkey::new(&[10u8; 32]); - let hash = Hash::new(&[2u8; 32]); - let val = CalculateHashIntermediate::new(0, hash, 20, Slot::default(), key); - account_maps.push(val); - - let result = - AccountsDb::rest_of_hash_calculation((vec![account_maps.clone()], Measure::start(""))); - let expected_hash = Hash::from_str("EHv9C5vX7xQjjMpsJMzudnDTzoTSRwYkqLzY8tVMihGj").unwrap(); - assert_eq!(result, (expected_hash, 108)); - - // 3rd key - with later slot - let key = Pubkey::new(&[10u8; 32]); - let hash = Hash::new(&[99u8; 32]); - let val = CalculateHashIntermediate::new(0, hash, 30, Slot::default() + 1, key); - account_maps.push(val); - - let result = AccountsDb::rest_of_hash_calculation((vec![account_maps], Measure::start(""))); - let expected_hash = Hash::from_str("7NNPg5A8Xsg1uv4UFm6KZNwsipyyUnmgCrznP6MBWoBZ").unwrap(); - assert_eq!(result, (expected_hash, 118)); + #[should_panic(expected = "assertion failed: bins <= max_plus_1 && bins > 0")] + fn test_accountsdb_scan_snapshot_stores_illegal_bins2() { + let mut stats = HashStats::default(); + AccountsDb::scan_snapshot_stores(&[], &mut stats, 257); } - #[test] - fn test_accountsdb_de_dup_accounts_from_stores() { - solana_logger::setup(); - - let key_a = Pubkey::new(&[1u8; 32]); - let key_b = Pubkey::new(&[2u8; 32]); - let key_c = Pubkey::new(&[3u8; 32]); - const COUNT: usize = 6; - const VERSION: u64 = 0; - let hashes: Vec<_> = (0..COUNT) - .into_iter() - .map(|i| Hash::new(&[i as u8; 32])) - .collect(); - // create this vector - // abbbcc - let keys = [key_a, key_b, key_b, key_b, key_c, key_c]; - - let accounts: Vec<_> = hashes - .into_iter() - .zip(keys.iter()) - .enumerate() - .map(|(i, (hash, key))| { - CalculateHashIntermediate::new(VERSION, hash, (i + 1) as u64, Slot::default(), *key) - }) - .collect(); - - type ExpectedType = (String, bool, u64, String); - let expected:Vec = vec![ - // ("key/lamports key2/lamports ...", - // is_first_slice - // result lamports - // result hashes) - // "a5" = key_a, 5 lamports - ("a1", false, 0, "[]"), - ("a1b2", false, 2, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), - ("a1b2b3", false, 2, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), - ("a1b2b3b4", false, 2, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), - ("a1b2b3b4c5", false, 7, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi, GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), - ("b2", false, 0, "[]"), - ("b2b3", false, 0, "[]"), - ("b2b3b4", false, 0, "[]"), - ("b2b3b4c5", false, 5, "[GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), - ("b3", false, 0, "[]"), - ("b3b4", false, 0, "[]"), - ("b3b4c5", false, 5, "[GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), - ("b4", false, 0, "[]"), - ("b4c5", false, 5, "[GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), - ("c5", false, 0, "[]"), - ("a1", true, 1, "[11111111111111111111111111111111]"), - ("a1b2", true, 3, "[11111111111111111111111111111111, 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), - ("a1b2b3", true, 3, "[11111111111111111111111111111111, 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), - ("a1b2b3b4", true, 3, "[11111111111111111111111111111111, 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), - ("a1b2b3b4c5", true, 8, "[11111111111111111111111111111111, 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi, GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), - ("b2", true, 2, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), - ("b2b3", true, 2, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), - ("b2b3b4", true, 2, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), - ("b2b3b4c5", true, 7, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi, GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), - ("b3", true, 3, "[8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR]"), - ("b3b4", true, 3, "[8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR]"), - ("b3b4c5", true, 8, "[8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR, GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), - ("b4", true, 4, "[CktRuQ2mttgRGkXJtyksdKHjUdc2C4TgDzyB98oEzy8]"), - ("b4c5", true, 9, "[CktRuQ2mttgRGkXJtyksdKHjUdc2C4TgDzyB98oEzy8, GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), - ("c5", true, 5, "[GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), - ].into_iter().map(|item| { - let result: ExpectedType = ( - item.0.to_string(), - item.1, - item.2, - item.3.to_string(), - ); - result - }).collect(); - - let mut expected_index = 0; - for first_slice in 0..2 { - for start in 0..COUNT { - for end in start + 1..COUNT { - let is_first_slice = first_slice == 1; - let accounts = accounts.clone(); - let slice = &accounts[start..end]; - - let result = AccountsDb::de_dup_accounts_from_stores(is_first_slice, slice); - let (hashes2, lamports2) = AccountsDb::de_dup_accounts_in_parallel(slice, 1); - let (hashes3, lamports3) = AccountsDb::de_dup_accounts_in_parallel(slice, 2); - let (hashes4, _, lamports4) = - AccountsDb::de_dup_and_eliminate_zeros(slice.to_vec()); - - assert_eq!( - hashes2.iter().flatten().collect::>(), - hashes3.iter().flatten().collect::>() - ); - assert_eq!( - hashes2.iter().flatten().collect::>(), - hashes4.iter().flatten().collect::>() - ); - assert_eq!(lamports2, lamports3); - assert_eq!(lamports2, lamports4); - let hashes: Vec<_> = hashes2.into_iter().flatten().collect(); - - let human_readable = slice - .iter() - .map(|v| { - let mut s = (if v.pubkey == key_a { - "a" - } else if v.pubkey == key_b { - "b" - } else { - "c" - }) - .to_string(); - - s.push_str(&v.lamports.to_string()); - s - }) - .collect::(); - - let hash_result_as_string = format!("{:?}", result.0); - - let packaged_result: ExpectedType = ( - human_readable, - is_first_slice, - result.1 as u64, - hash_result_as_string, - ); - - if is_first_slice { - // the parallel version always starts with 'first slice' - assert_eq!( - result.0, hashes, - "description: {:?}, expected index: {}", - packaged_result, expected_index - ); - assert_eq!( - result.1 as u64, lamports2, - "description: {:?}, expected index: {}", - packaged_result, expected_index - ); - } - - assert_eq!(expected[expected_index], packaged_result); - - // for generating expected results - // error!("{:?},", packaged_result); - expected_index += 1; - } - } - } - - for first_slice in 0..2 { - let result = AccountsDb::de_dup_accounts_from_stores(first_slice == 1, &[]); - assert_eq!((vec![Hash::default(); 0], 0), result); - } - } - - #[test] - fn test_accountsdb_flatten_hashes() { - solana_logger::setup(); - const COUNT: usize = 4; - let hashes: Vec<_> = (0..COUNT) - .into_iter() - .map(|i| Hash::new(&[(i) as u8; 32])) - .collect(); - let expected = hashes.clone(); - - assert_eq!(AccountsDb::flatten_hashes(vec![hashes.clone()]).0, expected); - for in_first in 1..COUNT - 1 { - assert_eq!( - AccountsDb::flatten_hashes(vec![ - hashes.clone()[0..in_first].to_vec(), - hashes.clone()[in_first..COUNT].to_vec() - ]) - .0, - expected - ); - } + #[should_panic(expected = "assertion failed: bins <= max_plus_1 && bins > 0")] + fn test_accountsdb_scan_snapshot_stores_illegal_bins() { + let mut stats = HashStats::default(); + AccountsDb::scan_snapshot_stores(&[], &mut stats, 0); } - #[test] - fn test_accountsdb_compare_two_hash_entries() { - solana_logger::setup(); - let key = Pubkey::new_unique(); - let hash = Hash::new_unique(); - let val = CalculateHashIntermediate::new(1, hash, 1, 1, key); + fn sample_storages_and_accounts() -> (SnapshotStorages, Vec) { + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); + let pubkey0 = Pubkey::new(&[0u8; 32]); + let pubkey127 = Pubkey::new(&[0x7fu8; 32]); + let pubkey128 = Pubkey::new(&[0x80u8; 32]); + let pubkey255 = Pubkey::new(&[0xffu8; 32]); + + const SLOT: u64 = 0; + + let raw_expected = vec![ + CalculateHashIntermediate { + version: 0, + hash: Hash::from_str("2UXkyxNEXNRbLo793fkWcdqQDuU8zwFjVhH6sbrcptKH").unwrap(), + lamports: 1, + slot: 0, + pubkey: pubkey0, + }, + CalculateHashIntermediate { + version: 1, + hash: Hash::from_str("E8cioj2q9T6QFhijrUPRnP8iy86NtQievPyRe3GY5TMC").unwrap(), + lamports: 128, + slot: 0, + pubkey: pubkey127, + }, + CalculateHashIntermediate { + version: 2, + hash: Hash::from_str("9yaXmx2ruksV1465BuMffqspjW35ggH8nTs8SW2Lq6NK").unwrap(), + lamports: 129, + slot: 0, + pubkey: pubkey128, + }, + CalculateHashIntermediate { + version: 3, + hash: Hash::from_str("7nhnUMjRsaA83HgvEJVv3YrDqHd1SCoVbvsWDTXzCgfh").unwrap(), + lamports: 256, + slot: 0, + pubkey: pubkey255, + }, + ]; - // slot same, version < - let hash2 = Hash::new_unique(); - let val2 = CalculateHashIntermediate::new(0, hash2, 4, 1, key); - assert_eq!( - std::cmp::Ordering::Less, - AccountsDb::compare_two_hash_entries(&val, &val2) + accounts.store_uncached( + SLOT, + &[( + &pubkey0, + &AccountSharedData::new( + raw_expected[0].lamports, + 1, + &AccountSharedData::default().owner, + ), + )], ); - - let list = vec![val.clone(), val2.clone()]; - let mut list_bkup = list.clone(); - list_bkup.sort_by(AccountsDb::compare_two_hash_entries); - let (list, _) = AccountsDb::sort_hash_intermediate(list); - assert_eq!(list, list_bkup); - - let list = vec![val2, val.clone()]; // reverse args - let mut list_bkup = list.clone(); - list_bkup.sort_by(AccountsDb::compare_two_hash_entries); - let (list, _) = AccountsDb::sort_hash_intermediate(list); - assert_eq!(list, list_bkup); - - // slot same, vers = - let hash3 = Hash::new_unique(); - let val3 = CalculateHashIntermediate::new(1, hash3, 2, 1, key); - assert_eq!( - std::cmp::Ordering::Equal, - AccountsDb::compare_two_hash_entries(&val, &val3) + accounts.store_uncached( + SLOT, + &[( + &pubkey127, + &AccountSharedData::new(128, 1, &AccountSharedData::default().owner), + )], ); - - // slot same, vers > - let hash4 = Hash::new_unique(); - let val4 = CalculateHashIntermediate::new(2, hash4, 6, 1, key); - assert_eq!( - std::cmp::Ordering::Greater, - AccountsDb::compare_two_hash_entries(&val, &val4) + accounts.store_uncached( + SLOT, + &[( + &pubkey128, + &AccountSharedData::new(129, 1, &AccountSharedData::default().owner), + )], ); - - // slot >, version < - let hash5 = Hash::new_unique(); - let val5 = CalculateHashIntermediate::new(0, hash5, 8, 2, key); - assert_eq!( - std::cmp::Ordering::Greater, - AccountsDb::compare_two_hash_entries(&val, &val5) + accounts.store_uncached( + SLOT, + &[( + &pubkey255, + &AccountSharedData::new(256, 1, &AccountSharedData::default().owner), + )], ); + accounts.add_root(SLOT); + + let storages = accounts.get_snapshot_storages(SLOT); + (storages, raw_expected) } #[test] - fn test_accountsdb_remove_zero_balance_accounts() { - solana_logger::setup(); - - let key = Pubkey::new_unique(); - let hash = Hash::new_unique(); - let mut account_maps: Vec = Vec::new(); - let val = CalculateHashIntermediate::new(0, hash, 1, Slot::default(), key); - account_maps.push(val.clone()); + fn test_accountsdb_scan_snapshot_stores() { + let (mut storages, raw_expected) = sample_storages_and_accounts(); + + let bins = 1; + let mut stats = HashStats::default(); + let result = AccountsDb::scan_snapshot_stores(&storages, &mut stats, bins); + assert_eq!(result, vec![vec![raw_expected.clone()]]); + + let bins = 2; + let result = AccountsDb::scan_snapshot_stores(&storages, &mut stats, bins); + let mut expected = vec![Vec::new(); bins]; + expected[0].push(raw_expected[0].clone()); + expected[0].push(raw_expected[1].clone()); + expected[bins - 1].push(raw_expected[2].clone()); + expected[bins - 1].push(raw_expected[3].clone()); + assert_eq!(result, vec![expected]); + + let bins = 4; + let result = AccountsDb::scan_snapshot_stores(&storages, &mut stats, bins); + let mut expected = vec![Vec::new(); bins]; + expected[0].push(raw_expected[0].clone()); + expected[1].push(raw_expected[1].clone()); + expected[2].push(raw_expected[2].clone()); + expected[bins - 1].push(raw_expected[3].clone()); + assert_eq!(result, vec![expected]); + + let bins = 256; + let result = AccountsDb::scan_snapshot_stores(&storages, &mut stats, bins); + let mut expected = vec![Vec::new(); bins]; + expected[0].push(raw_expected[0].clone()); + expected[127].push(raw_expected[1].clone()); + expected[128].push(raw_expected[2].clone()); + expected[bins - 1].push(raw_expected.last().unwrap().clone()); + assert_eq!(result, vec![expected]); + + // enough stores to get to 2nd chunk + let bins = 1; + let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); + let slot_expected: Slot = 0; + let size: usize = 123; + let data = AccountStorageEntry::new(&paths[0], slot_expected, 0, size as u64); - let result = AccountsDb::de_dup_accounts_from_stores(true, &account_maps[..]); - assert_eq!(result, (vec![val.hash], val.lamports as u128)); + let arc = Arc::new(data); - // zero original lamports, higher version - let val = CalculateHashIntermediate::new( - 1, - hash, - ZERO_RAW_LAMPORTS_SENTINEL, - Slot::default(), - key, - ); - account_maps.insert(0, val); // has to be before other entry since sort order matters + const MAX_ITEMS_PER_CHUNK: usize = 5_000; + storages[0].splice(0..0, vec![arc; MAX_ITEMS_PER_CHUNK]); - let result = AccountsDb::de_dup_accounts_from_stores(true, &account_maps[..]); - assert_eq!(result, (vec![], 0)); + let mut stats = HashStats::default(); + let result = AccountsDb::scan_snapshot_stores(&storages, &mut stats, bins); + assert_eq!(result.len(), 2); // 2 chunks + assert_eq!(result[0].len(), 0); // nothing found in first slots + assert_eq!(result[1].len(), bins); + assert_eq!(result[1], vec![raw_expected]); } #[test] @@ -5574,6 +5177,21 @@ pub mod tests { assert_eq!(result, (expected_hash, 0)); } + #[test] + fn test_accountsdb_calculate_accounts_hash_without_index() { + solana_logger::setup(); + + let (storages, raw_expected) = sample_storages_and_accounts(); + let expected_hash = + AccountsHash::compute_merkle_root_loop(raw_expected.clone(), MERKLE_FANOUT, |item| { + item.hash + }); + let sum = raw_expected.iter().map(|item| item.lamports).sum(); + let result = AccountsDb::calculate_accounts_hash_without_index(&storages, None); + + assert_eq!(result, (expected_hash, sum)); + } + fn sample_storage() -> (SnapshotStorages, usize, Slot) { let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); let slot_expected: Slot = 0; @@ -5603,7 +5221,7 @@ pub mod tests { let arc = Arc::new(data); let storages = vec![vec![arc]]; let pubkey = solana_sdk::pubkey::new_rand(); - let acc = Account::new(1, 48, &Account::default().owner); + let acc = AccountSharedData::new(1, 48, &AccountSharedData::default().owner); let sm = StoredMeta { data_len: 1, pubkey, @@ -5616,6 +5234,7 @@ pub mod tests { let calls = AtomicU64::new(0); let result = AccountsDb::scan_account_storage_no_bank( &storages, + &mut HashStats::default(), |loaded_account: LoadedAccount, accum: &mut Vec, slot: Slot| { calls.fetch_add(1, Ordering::Relaxed); assert_eq!(loaded_account.pubkey(), &pubkey); @@ -5627,157 +5246,12 @@ pub mod tests { assert_eq!(result, vec![vec![expected]]); } - #[test] - fn test_accountsdb_flatten_hash_intermediate() { - solana_logger::setup(); - let test = vec![vec![CalculateHashIntermediate::new( - 1, - Hash::new_unique(), - 2, - 3, - Pubkey::new_unique(), - )]]; - let (result, _, len) = AccountsDb::flatten_hash_intermediate(test.clone()); - assert_eq!(result, test[0]); - assert_eq!(len, 1); - - let (result, _, len) = AccountsDb::flatten_hash_intermediate(vec![ - vec![CalculateHashIntermediate::default(); 0], - ]); - assert_eq!(result.len(), 0); - assert_eq!(len, 0); - - let test = vec![ - vec![ - CalculateHashIntermediate::new(1, Hash::new_unique(), 2, 3, Pubkey::new_unique()), - CalculateHashIntermediate::new(8, Hash::new_unique(), 9, 10, Pubkey::new_unique()), - ], - vec![CalculateHashIntermediate::new( - 4, - Hash::new_unique(), - 5, - 6, - Pubkey::new_unique(), - )], - ]; - let (result, _, len) = AccountsDb::flatten_hash_intermediate(test.clone()); - let expected = test.into_iter().flatten().collect::>(); - assert_eq!(result, expected); - assert_eq!(len, expected.len()); - } - - #[test] - fn test_accountsdb_compute_merkle_root_and_capitalization() { - solana_logger::setup(); - - let expected_results = vec![ - (0, 0, "GKot5hBsd81kMupNCXHaqbhv3huEbxAFMLnpcX2hniwn", 0), - (0, 1, "8unXKJYTxrR423HgQxbDmx29mFri1QNrzVKKDxEfc6bj", 0), - (0, 2, "6QfkevXLLqbfAaR1kVjvMLFtEXvNUVrpmkwXqgsYtCFW", 1), - (0, 3, "G3FrJd9JrXcMiqChTSfvEdBL2sCPny3ebiUy9Xxbn7a2", 3), - (0, 4, "G3sZXHhwoCFuNyWy7Efffr47RBW33ibEp7b2hqNDmXdu", 6), - (0, 5, "78atJJYpokAPKMJwHxUW8SBDvPkkSpTBV7GiB27HwosJ", 10), - (0, 6, "7c9SM2BmCRVVXdrEdKcMK91MviPqXqQMd8QAb77tgLEy", 15), - (0, 7, "3hsmnZPhf22UvBLiZ4dVa21Qsdh65CCrtYXsb8MxoVAa", 21), - (0, 8, "5bwXUiC6RCRhb8fqvjvUXT6waU25str3UXA3a6Aq1jux", 28), - (0, 9, "3NNtQKH6PaYpCnFBtyi2icK9eYX3YM5pqA3SKaXtUNzu", 36), - (1, 0, "GKot5hBsd81kMupNCXHaqbhv3huEbxAFMLnpcX2hniwn", 0), - (1, 1, "4GWVCsnEu1iRyxjAB3F7J7C4MMvcoxFWtP9ihvwvDgxY", 0), - (1, 2, "8ML8Te6Uw2mipFr2v9sMZDcziXzhVqJo2qeMJohg1CJx", 1), - (1, 3, "AMEuC3AgqAeRBGBhSfTmuMdfbAiXJnGmKv99kHmcAE1H", 3), - (1, 4, "HEnDuJLHpsQfrApimGrovTqPEF6Vkrx2dKFr3BDtYzWx", 6), - (1, 5, "6rH69iP2yM1o565noZN1EqjySW4PhYUskz3c5tXePUfV", 10), - (1, 6, "7qEQMEXdfSPjbZ3q4cuuZwebDMvTvuaQ3dBiHoDUKo9a", 15), - (1, 7, "GDJz7LSKYjqqz6ujCaaQRJRmQ7TLNCwYJhdT84qT4qwk", 21), - (1, 8, "HT9krPLVTo3rr5WZQBQFrbqWs8SbYScXfnt8EVuobboM", 28), - (1, 9, "8y2pMgqMdRsvqw6BQXm6wtz3qxGPss72i6H6gVpPyeda", 36), - ]; - - let mut expected_index = 0; - let start = 0; - let default_fanout = 2; - // test 0..3 recursions (at fanout = 2) and 1 item remainder. The internals have 1 special case first loop and subsequent loops are the same types. - let iterations = default_fanout * default_fanout * default_fanout + 2; - for pass in 0..2 { - let fanout = if pass == 0 { - default_fanout - } else { - MERKLE_FANOUT - }; - for count in start..iterations { - let mut input: Vec<_> = (0..count) - .map(|i| { - let key = Pubkey::new(&[(pass * iterations + count) as u8; 32]); - let hash = Hash::new(&[(pass * iterations + count + i + 1) as u8; 32]); - (key, hash, i as u64) - }) - .collect(); - let result; - if pass == 0 { - result = - AccountsDb::compute_merkle_root_and_capitalization(input.clone(), fanout); - } else { - result = AccountsDb::accumulate_account_hashes_and_capitalization( - input.clone(), - Slot::default(), - false, - ) - .0; - assert_eq!( - AccountsDb::accumulate_account_hashes( - input.clone(), - Slot::default(), - false - ), - result.0 - ); - AccountsDb::sort_hashes_by_pubkey(&mut input); - } - let mut expected = 0; - if count > 0 { - let count = count as u64; - let last_number = count - 1; - expected = count * last_number / 2; - } - - // compare against calculated result for lamports - assert_eq!( - result.1, - expected, - "failed at size: {}, with inputs: {:?}", - count, - input.into_iter().map(|x| x.2).collect::>() - ); - - // compare against captured, expected results for hash (and lamports) - assert_eq!( - (pass, count, &*(result.0.to_string()), result.1), - expected_results[expected_index] - ); - expected_index += 1; - } - } - } - - #[test] - #[should_panic(expected = "overflow is detected while summing capitalization")] - fn test_accountsdb_compute_merkle_root_and_capitalization_overflow() { - solana_logger::setup(); - - let fanout = 2; - let input = vec![ - (Pubkey::new_unique(), Hash::new_unique(), u64::MAX), - (Pubkey::new_unique(), Hash::new_unique(), 1), - ]; - AccountsDb::compute_merkle_root_and_capitalization(input, fanout); - } - #[test] fn test_accountsdb_add_root() { solana_logger::setup(); let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); - let account0 = Account::new(1, 0, &key); + let account0 = AccountSharedData::new(1, 0, &key); db.store_uncached(0, &[(&key, &account0)]); db.add_root(0); @@ -5790,11 +5264,11 @@ pub mod tests { solana_logger::setup(); let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); - let account0 = Account::new(1, 0, &key); + let account0 = AccountSharedData::new(1, 0, &key); db.store_uncached(0, &[(&key, &account0)]); - let account1 = Account::new(0, 0, &key); + let account1 = AccountSharedData::new(0, 0, &key); db.store_uncached(1, &[(&key, &account1)]); let ancestors = vec![(1, 1)].into_iter().collect(); @@ -5803,10 +5277,13 @@ pub mod tests { let ancestors = vec![(1, 1), (0, 0)].into_iter().collect(); assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1); - let accounts: Vec = - db.unchecked_scan_accounts("", &ancestors, |accounts: &mut Vec, option| { + let accounts: Vec = db.unchecked_scan_accounts( + "", + &ancestors, + |accounts: &mut Vec, option| { accounts.push(option.1.account()); - }); + }, + ); assert_eq!(accounts, vec![account1]); } @@ -5815,11 +5292,11 @@ pub mod tests { solana_logger::setup(); let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); - let account0 = Account::new(1, 0, &key); + let account0 = AccountSharedData::new(1, 0, &key); db.store_uncached(0, &[(&key, &account0)]); - let account1 = Account::new(0, 0, &key); + let account1 = AccountSharedData::new(0, 0, &key); db.store_uncached(1, &[(&key, &account1)]); db.add_root(0); @@ -5836,7 +5313,7 @@ pub mod tests { let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); - let account0 = Account::new(1, 0, &key); + let account0 = AccountSharedData::new(1, 0, &key); // store value 1 in the "root", i.e. db zero db.store_uncached(0, &[(&key, &account0)]); @@ -5851,7 +5328,7 @@ pub mod tests { // (via root0) // store value 0 in one child - let account1 = Account::new(0, 0, &key); + let account1 = AccountSharedData::new(0, 0, &key); db.store_uncached(1, &[(&key, &account1)]); // masking accounts is done at the Accounts level, at accountsDB we see @@ -5882,10 +5359,10 @@ pub mod tests { let idx = thread_rng().gen_range(0, 99); let ancestors = vec![(0, 0)].into_iter().collect(); let account = db.load_slow(&ancestors, &pubkeys[idx]).unwrap(); - let default_account = Account { + let default_account = AccountSharedData::from(Account { lamports: (idx + 1) as u64, ..Account::default() - }; + }); assert_eq!((default_account, 0), account); } @@ -5898,10 +5375,10 @@ pub mod tests { let account0 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap(); let ancestors = vec![(1, 1)].into_iter().collect(); let account1 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap(); - let default_account = Account { + let default_account = AccountSharedData::from(Account { lamports: (idx + 1) as u64, ..Account::default() - }; + }); assert_eq!(&default_account, &account0.0); assert_eq!(&default_account, &account1.0); } @@ -5917,7 +5394,7 @@ pub mod tests { assert!(check_storage(&db, 0, 2)); let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new(1, DEFAULT_FILE_SIZE as usize / 3, &pubkey); + let account = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 3, &pubkey); db.store_uncached(1, &[(&pubkey, &account)]); db.store_uncached(1, &[(&pubkeys[0], &account)]); { @@ -5973,11 +5450,11 @@ pub mod tests { // 1 token in the "root", i.e. db zero let db0 = AccountsDb::new(Vec::new(), &ClusterType::Development); - let account0 = Account::new(1, 0, &key); + let account0 = AccountSharedData::new(1, 0, &key); db0.store_uncached(0, &[(&key, &account0)]); // 0 lamports in the child - let account1 = Account::new(0, 0, &key); + let account1 = AccountSharedData::new(0, 0, &key); db0.store_uncached(1, &[(&key, &account1)]); // masking accounts is done at the Accounts level, at accountsDB we see @@ -5994,7 +5471,7 @@ pub mod tests { let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; let key = Pubkey::default(); - let account0 = Account::new(1, 0, &key); + let account0 = AccountSharedData::new(1, 0, &key); let ancestors: HashMap<_, _> = vec![(unrooted_slot, 1)].into_iter().collect(); db.store_cached(unrooted_slot, &[(&key, &account0)]); db.bank_hashes @@ -6023,7 +5500,7 @@ pub mod tests { .is_none()); // Test we can store for the same slot again and get the right information - let account0 = Account::new(2, 0, &key); + let account0 = AccountSharedData::new(2, 0, &key); db.store_uncached(unrooted_slot, &[(&key, &account0)]); assert_load_account(&db, unrooted_slot, key, 2); } @@ -6034,7 +5511,7 @@ pub mod tests { let unrooted_slot = 9; let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = solana_sdk::pubkey::new_rand(); - let account0 = Account::new(1, 0, &key); + let account0 = AccountSharedData::new(1, 0, &key); db.store_uncached(unrooted_slot, &[(&key, &account0)]); // Purge the slot @@ -6068,14 +5545,16 @@ pub mod tests { let ancestors = vec![(slot, 0)].into_iter().collect(); for t in 0..num { let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new((t + 1) as u64, space, &Account::default().owner); + let account = + AccountSharedData::new((t + 1) as u64, space, &AccountSharedData::default().owner); pubkeys.push(pubkey); assert!(accounts.load_slow(&ancestors, &pubkey).is_none()); accounts.store_uncached(slot, &[(&pubkey, &account)]); } for t in 0..num_vote { let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new((num + t + 1) as u64, space, &solana_vote_program::id()); + let account = + AccountSharedData::new((num + t + 1) as u64, space, &solana_vote_program::id()); pubkeys.push(pubkey); let ancestors = vec![(slot, 0)].into_iter().collect(); assert!(accounts.load_slow(&ancestors, &pubkey).is_none()); @@ -6094,10 +5573,10 @@ pub mod tests { let ancestors = vec![(slot, 0)].into_iter().collect(); assert!(accounts.load_slow(&ancestors, &pubkeys[idx]).is_none()); } else { - let default_account = Account { + let default_account = AccountSharedData::from(Account { lamports: account.lamports, ..Account::default() - }; + }); assert_eq!(default_account, account); } } @@ -6149,7 +5628,11 @@ pub mod tests { let idx = thread_rng().gen_range(0, num); let account = accounts.load_slow(&ancestors, &pubkeys[idx]); let account1 = Some(( - Account::new((idx + count) as u64, 0, &Account::default().owner), + AccountSharedData::new( + (idx + count) as u64, + 0, + &AccountSharedData::default().owner, + ), slot, )); assert_eq!(account, account1); @@ -6165,7 +5648,11 @@ pub mod tests { count: usize, ) { for idx in 0..num { - let account = Account::new((idx + count) as u64, 0, &Account::default().owner); + let account = AccountSharedData::new( + (idx + count) as u64, + 0, + &AccountSharedData::default().owner, + ); accounts.store_uncached(slot, &[(&pubkeys[idx], &account)]); } } @@ -6178,10 +5665,10 @@ pub mod tests { create_account(&db, &mut pubkeys, 0, 1, 0, 0); let ancestors = vec![(0, 0)].into_iter().collect(); let account = db.load_slow(&ancestors, &pubkeys[0]).unwrap(); - let default_account = Account { + let default_account = AccountSharedData::from(Account { lamports: 1, ..Account::default() - }; + }); assert_eq!((default_account, 0), account); } @@ -6211,7 +5698,7 @@ pub mod tests { let mut keys = vec![]; for i in 0..9 { let key = solana_sdk::pubkey::new_rand(); - let account = Account::new(i + 1, size as usize / 4, &key); + let account = AccountSharedData::new(i + 1, size as usize / 4, &key); accounts.store_uncached(0, &[(&key, &account)]); keys.push(key); } @@ -6242,7 +5729,7 @@ pub mod tests { let status = [AccountStorageStatus::Available, AccountStorageStatus::Full]; let pubkey1 = solana_sdk::pubkey::new_rand(); - let account1 = Account::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey1); + let account1 = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey1); accounts.store_uncached(0, &[(&pubkey1, &account1)]); { let stores = &accounts.storage.get_slot_stores(0).unwrap(); @@ -6253,7 +5740,7 @@ pub mod tests { } let pubkey2 = solana_sdk::pubkey::new_rand(); - let account2 = Account::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey2); + let account2 = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey2); accounts.store_uncached(0, &[(&pubkey2, &account2)]); { assert_eq!(accounts.storage.0.len(), 1); @@ -6305,7 +5792,7 @@ pub mod tests { //not root, it means we are retaining dead banks. let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new(1, 0, &Account::default().owner); + let account = AccountSharedData::new(1, 0, &AccountSharedData::default().owner); //store an account accounts.store_uncached(0, &[(&pubkey, &account)]); let ancestors = vec![(0, 0)].into_iter().collect(); @@ -6377,8 +5864,9 @@ pub mod tests { let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); - let account = Account::new(1, 1, &Account::default().owner); - let zero_lamport_account = Account::new(0, 0, &Account::default().owner); + let account = AccountSharedData::new(1, 1, &AccountSharedData::default().owner); + let zero_lamport_account = + AccountSharedData::new(0, 0, &AccountSharedData::default().owner); // Store two accounts accounts.store_uncached(0, &[(&pubkey1, &account)]); @@ -6432,8 +5920,9 @@ pub mod tests { let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new(1, 0, &Account::default().owner); - let zero_lamport_account = Account::new(0, 0, &Account::default().owner); + let account = AccountSharedData::new(1, 0, &AccountSharedData::default().owner); + let zero_lamport_account = + AccountSharedData::new(0, 0, &AccountSharedData::default().owner); // Store a zero-lamport account accounts.store_uncached(0, &[(&pubkey, &account)]); @@ -6471,7 +5960,7 @@ pub mod tests { let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new(1, 0, &Account::default().owner); + let account = AccountSharedData::new(1, 0, &AccountSharedData::default().owner); //store an account accounts.store_uncached(0, &[(&pubkey, &account)]); accounts.store_uncached(1, &[(&pubkey, &account)]); @@ -6500,8 +5989,8 @@ pub mod tests { let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); - let normal_account = Account::new(1, 0, &Account::default().owner); - let zero_account = Account::new(0, 0, &Account::default().owner); + let normal_account = AccountSharedData::new(1, 0, &AccountSharedData::default().owner); + let zero_account = AccountSharedData::new(0, 0, &AccountSharedData::default().owner); //store an account accounts.store_uncached(0, &[(&pubkey1, &normal_account)]); accounts.store_uncached(1, &[(&pubkey1, &zero_account)]); @@ -6531,7 +6020,7 @@ pub mod tests { fn test_clean_old_with_both_normal_and_zero_lamport_accounts() { solana_logger::setup(); - let accounts = AccountsDb::new_with_config( + let mut accounts = AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, spl_token_mint_index_enabled(), @@ -6544,14 +6033,14 @@ pub mod tests { let mint_key = Pubkey::new_unique(); let mut account_data_with_mint = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()]; - account_data_with_mint[..PUBKEY_BYTES].clone_from_slice(&(mint_key.clone().to_bytes())); + account_data_with_mint[..PUBKEY_BYTES].clone_from_slice(&(mint_key.to_bytes())); - let mut normal_account = Account::new(1, 0, &Account::default().owner); + let mut normal_account = AccountSharedData::new(1, 0, &AccountSharedData::default().owner); normal_account.owner = inline_spl_token_v2_0::id(); - normal_account.data = account_data_with_mint.clone(); - let mut zero_account = Account::new(0, 0, &Account::default().owner); + normal_account.set_data(account_data_with_mint.clone()); + let mut zero_account = AccountSharedData::new(0, 0, &AccountSharedData::default().owner); zero_account.owner = inline_spl_token_v2_0::id(); - zero_account.data = account_data_with_mint; + zero_account.set_data(account_data_with_mint); //store an account accounts.store_uncached(0, &[(&pubkey1, &normal_account)]); @@ -6575,17 +6064,52 @@ pub mod tests { // Secondary index should still find both pubkeys let mut found_accounts = HashSet::new(); - accounts.accounts_index.index_scan_accounts( - &HashMap::new(), - IndexKey::SplTokenMint(mint_key), - |key, _| { + let index_key = IndexKey::SplTokenMint(mint_key); + accounts + .accounts_index + .index_scan_accounts(&HashMap::new(), index_key, |key, _| { found_accounts.insert(*key); - }, - ); + }); assert_eq!(found_accounts.len(), 2); assert!(found_accounts.contains(&pubkey1)); assert!(found_accounts.contains(&pubkey2)); + { + accounts.account_indexes.keys = Some(AccountSecondaryIndexesIncludeExclude { + exclude: true, + keys: [mint_key].iter().cloned().collect::>(), + }); + // Secondary index can't be used - do normal scan: should still find both pubkeys + let found_accounts = accounts.index_scan_accounts( + &Ancestors::default(), + index_key, + |collection: &mut HashSet, account| { + collection.insert(*account.unwrap().0); + }, + ); + assert!(!found_accounts.1); + assert_eq!(found_accounts.0.len(), 2); + assert!(found_accounts.0.contains(&pubkey1)); + assert!(found_accounts.0.contains(&pubkey2)); + + accounts.account_indexes.keys = None; + + // Secondary index can now be used since it isn't marked as excluded + let found_accounts = accounts.index_scan_accounts( + &Ancestors::default(), + index_key, + |collection: &mut HashSet, account| { + collection.insert(*account.unwrap().0); + }, + ); + assert!(found_accounts.1); + assert_eq!(found_accounts.0.len(), 2); + assert!(found_accounts.0.contains(&pubkey1)); + assert!(found_accounts.0.contains(&pubkey2)); + + accounts.account_indexes.keys = None; + } + accounts.clean_accounts(None); //both zero lamport and normal accounts are cleaned up @@ -6616,8 +6140,8 @@ pub mod tests { let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new(1, 0, &Account::default().owner); - let zero_account = Account::new(0, 0, &Account::default().owner); + let account = AccountSharedData::new(1, 0, &AccountSharedData::default().owner); + let zero_account = AccountSharedData::new(0, 0, &AccountSharedData::default().owner); // store an account, make it a zero lamport account // in slot 1 @@ -6653,7 +6177,7 @@ pub mod tests { let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new(1, 0, &Account::default().owner); + let account = AccountSharedData::new(1, 0, &AccountSharedData::default().owner); //store an account accounts.store_uncached(0, &[(&pubkey, &account)]); assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); @@ -6711,7 +6235,7 @@ pub mod tests { modify_accounts(&accounts, &pubkeys, latest_slot, 10, 3); // Overwrite account 30 from slot 0 with lamports=0 into slot 1. // Slot 1 should now have 10 + 1 = 11 accounts - let account = Account::new(0, 0, &Account::default().owner); + let account = AccountSharedData::new(0, 0, &AccountSharedData::default().owner); accounts.store_uncached(latest_slot, &[(&pubkeys[30], &account)]); // Create 10 new accounts in slot 1, should now have 11 + 10 = 21 @@ -6731,7 +6255,7 @@ pub mod tests { accounts.clean_accounts(None); // Overwrite account 31 from slot 0 with lamports=0 into slot 2. // Slot 2 should now have 20 + 1 = 21 accounts - let account = Account::new(0, 0, &Account::default().owner); + let account = AccountSharedData::new(0, 0, &AccountSharedData::default().owner); accounts.store_uncached(latest_slot, &[(&pubkeys[31], &account)]); // Create 10 new accounts in slot 2. Slot 2 should now have @@ -6827,15 +6351,15 @@ pub mod tests { let some_lamport = 223; let zero_lamport = 0; let no_data = 0; - let owner = Account::default().owner; + let owner = AccountSharedData::default().owner; - let account = Account::new(some_lamport, no_data, &owner); + let account = AccountSharedData::new(some_lamport, no_data, &owner); let pubkey = solana_sdk::pubkey::new_rand(); - let account2 = Account::new(some_lamport, no_data, &owner); + let account2 = AccountSharedData::new(some_lamport, no_data, &owner); let pubkey2 = solana_sdk::pubkey::new_rand(); - let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); + let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner); let accounts = AccountsDb::new_single(); accounts.add_root(0); @@ -6906,12 +6430,12 @@ pub mod tests { let some_lamport = 223; let zero_lamport = 0; let no_data = 0; - let owner = Account::default().owner; + let owner = AccountSharedData::default().owner; - let account = Account::new(some_lamport, no_data, &owner); + let account = AccountSharedData::new(some_lamport, no_data, &owner); let pubkey = solana_sdk::pubkey::new_rand(); - let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); + let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner); let accounts = AccountsDb::new_single(); accounts.add_root(0); @@ -6966,16 +6490,16 @@ pub mod tests { let some_lamport = 223; let zero_lamport = 0; let no_data = 0; - let owner = Account::default().owner; + let owner = AccountSharedData::default().owner; - let account = Account::new(some_lamport, no_data, &owner); + let account = AccountSharedData::new(some_lamport, no_data, &owner); let pubkey = solana_sdk::pubkey::new_rand(); - let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); + let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner); - let account2 = Account::new(some_lamport + 1, no_data, &owner); + let account2 = AccountSharedData::new(some_lamport + 1, no_data, &owner); let pubkey2 = solana_sdk::pubkey::new_rand(); - let filler_account = Account::new(some_lamport, no_data, &owner); + let filler_account = AccountSharedData::new(some_lamport, no_data, &owner); let filler_account_pubkey = solana_sdk::pubkey::new_rand(); let accounts = AccountsDb::new_single(); @@ -7024,18 +6548,18 @@ pub mod tests { let zero_lamport = 0; let dummy_lamport = 999; let no_data = 0; - let owner = Account::default().owner; + let owner = AccountSharedData::default().owner; - let account = Account::new(some_lamport, no_data, &owner); - let account2 = Account::new(some_lamport + 100_001, no_data, &owner); - let account3 = Account::new(some_lamport + 100_002, no_data, &owner); - let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); + let account = AccountSharedData::new(some_lamport, no_data, &owner); + let account2 = AccountSharedData::new(some_lamport + 100_001, no_data, &owner); + let account3 = AccountSharedData::new(some_lamport + 100_002, no_data, &owner); + let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner); let pubkey = solana_sdk::pubkey::new_rand(); let purged_pubkey1 = solana_sdk::pubkey::new_rand(); let purged_pubkey2 = solana_sdk::pubkey::new_rand(); - let dummy_account = Account::new(dummy_lamport, no_data, &owner); + let dummy_account = AccountSharedData::new(dummy_lamport, no_data, &owner); let dummy_pubkey = Pubkey::default(); let accounts = AccountsDb::new_single(); @@ -7114,7 +6638,7 @@ pub mod tests { .name("account-writers".to_string()) .spawn(move || { let pubkey = solana_sdk::pubkey::new_rand(); - let mut account = Account::new(1, 0, &pubkey); + let mut account = AccountSharedData::new(1, 0, &pubkey); let mut i = 0; loop { let account_bal = thread_rng().gen_range(1, 99); @@ -7145,26 +6669,32 @@ pub mod tests { let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let key0 = solana_sdk::pubkey::new_rand(); - let account0 = Account::new(1, 0, &key); + let account0 = AccountSharedData::new(1, 0, &key); db.store_uncached(0, &[(&key0, &account0)]); let key1 = solana_sdk::pubkey::new_rand(); - let account1 = Account::new(2, 0, &key); + let account1 = AccountSharedData::new(2, 0, &key); db.store_uncached(1, &[(&key1, &account1)]); let ancestors = vec![(0, 0)].into_iter().collect(); - let accounts: Vec = - db.unchecked_scan_accounts("", &ancestors, |accounts: &mut Vec, option| { + let accounts: Vec = db.unchecked_scan_accounts( + "", + &ancestors, + |accounts: &mut Vec, option| { accounts.push(option.1.account()); - }); + }, + ); assert_eq!(accounts, vec![account0]); let ancestors = vec![(1, 1), (0, 0)].into_iter().collect(); - let accounts: Vec = - db.unchecked_scan_accounts("", &ancestors, |accounts: &mut Vec, option| { + let accounts: Vec = db.unchecked_scan_accounts( + "", + &ancestors, + |accounts: &mut Vec, option| { accounts.push(option.1.account()); - }); + }, + ); assert_eq!(accounts.len(), 2); } @@ -7175,12 +6705,12 @@ pub mod tests { let key = Pubkey::default(); let key0 = solana_sdk::pubkey::new_rand(); - let account0 = Account::new(1, 0, &key); + let account0 = AccountSharedData::new(1, 0, &key); db.store_uncached(0, &[(&key0, &account0)]); let key1 = solana_sdk::pubkey::new_rand(); - let account1 = Account::new(2, 0, &key); + let account1 = AccountSharedData::new(2, 0, &key); db.store_uncached(1, &[(&key1, &account1)]); db.print_accounts_stats("pre"); @@ -7189,7 +6719,7 @@ pub mod tests { let purge_keys = vec![(key1, slots)]; db.purge_keys_exact(&purge_keys); - let account2 = Account::new(3, 0, &key); + let account2 = AccountSharedData::new(3, 0, &key); db.store_uncached(2, &[(&key1, &account2)]); db.print_accounts_stats("post"); @@ -7204,18 +6734,18 @@ pub mod tests { let key = Pubkey::default(); let data_len = DEFAULT_FILE_SIZE as usize + 7; - let account = Account::new(1, data_len, &key); + let account = AccountSharedData::new(1, data_len, &key); db.store_uncached(0, &[(&key, &account)]); let ancestors = vec![(0, 0)].into_iter().collect(); let ret = db.load_slow(&ancestors, &key).unwrap(); - assert_eq!(ret.0.data.len(), data_len); + assert_eq!(ret.0.data().len(), data_len); } #[test] fn test_hash_frozen_account_data() { - let account = Account::new(1, 42, &Pubkey::default()); + let account = AccountSharedData::new(1, 42, &Pubkey::default()); let hash = AccountsDb::hash_frozen_account_data(&account); assert_ne!(hash, Hash::default()); // Better not be the default Hash @@ -7238,7 +6768,7 @@ pub mod tests { // Account data may not be modified let mut account_modified = account.clone(); - account_modified.data[0] = 42; + account_modified.data_as_mut_slice()[0] = 42; assert_ne!( hash, AccountsDb::hash_frozen_account_data(&account_modified) @@ -7268,7 +6798,7 @@ pub mod tests { Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); - let mut account = Account::new(1, 42, &frozen_pubkey); + let mut account = AccountSharedData::new(1, 42, &frozen_pubkey); db.store_uncached(0, &[(&frozen_pubkey, &account)]); let ancestors = vec![(0, 0)].into_iter().collect(); @@ -7303,7 +6833,7 @@ pub mod tests { Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); - let mut account = Account::new(1, 42, &frozen_pubkey); + let mut account = AccountSharedData::new(1, 42, &frozen_pubkey); db.store_uncached(0, &[(&frozen_pubkey, &account)]); let ancestors = vec![(0, 0)].into_iter().collect(); @@ -7336,13 +6866,13 @@ pub mod tests { Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); - let mut account = Account::new(1, 42, &frozen_pubkey); + let mut account = AccountSharedData::new(1, 42, &frozen_pubkey); db.store_uncached(0, &[(&frozen_pubkey, &account)]); let ancestors = vec![(0, 0)].into_iter().collect(); db.freeze_accounts(&ancestors, &[frozen_pubkey]); - account.data[0] = 42; + account.data_as_mut_slice()[0] = 42; db.store_uncached(0, &[(&frozen_pubkey, &account)]); } @@ -7412,7 +6942,7 @@ pub mod tests { let key = Pubkey::default(); let some_data_len = 5; let some_slot: Slot = 0; - let account = Account::new(1, some_data_len, &key); + let account = AccountSharedData::new(1, some_data_len, &key); let ancestors = vec![(some_slot, 0)].into_iter().collect(); db.store_uncached(some_slot, &[(&key, &account)]); @@ -7440,7 +6970,7 @@ pub mod tests { let key = solana_sdk::pubkey::new_rand(); let some_data_len = 0; let some_slot: Slot = 0; - let account = Account::new(1, some_data_len, &key); + let account = AccountSharedData::new(1, some_data_len, &key); let ancestors = vec![(some_slot, 0)].into_iter().collect(); db.store_uncached(some_slot, &[(&key, &account)]); @@ -7482,7 +7012,7 @@ pub mod tests { let key = solana_sdk::pubkey::new_rand(); let some_data_len = 0; let some_slot: Slot = 0; - let account = Account::new(1, some_data_len, &key); + let account = AccountSharedData::new(1, some_data_len, &key); let ancestors = vec![(some_slot, 0)].into_iter().collect(); db.store_uncached(some_slot, &[(&key, &account)]); @@ -7542,7 +7072,7 @@ pub mod tests { let key = Pubkey::default(); let some_data_len = 0; let some_slot: Slot = 0; - let account = Account::new(1, some_data_len, &key); + let account = AccountSharedData::new(1, some_data_len, &key); let ancestors = vec![(some_slot, 0)].into_iter().collect(); let accounts = &[(&key, &account)]; @@ -7558,64 +7088,6 @@ pub mod tests { ); } - #[test] - fn test_bad_bank_hash() { - solana_logger::setup(); - use solana_sdk::signature::{Keypair, Signer}; - let db = AccountsDb::new(Vec::new(), &ClusterType::Development); - - let some_slot: Slot = 0; - let ancestors: Ancestors = [(some_slot, 0)].iter().copied().collect(); - - let max_accounts = 200; - let mut accounts_keys: Vec<_> = (0..max_accounts) - .into_par_iter() - .map(|_| { - let key = Keypair::new().pubkey(); - let lamports = thread_rng().gen_range(0, 100); - let some_data_len = thread_rng().gen_range(0, 1000); - let account = Account::new(lamports, some_data_len, &key); - (key, account) - }) - .collect(); - - let mut existing = HashSet::new(); - let mut last_print = Instant::now(); - for i in 0..5_000 { - if last_print.elapsed().as_millis() > 5000 { - info!("i: {}", i); - last_print = Instant::now(); - } - let num_accounts = thread_rng().gen_range(0, 100); - (0..num_accounts).into_iter().for_each(|_| { - let mut idx; - loop { - idx = thread_rng().gen_range(0, max_accounts); - if existing.contains(&idx) { - continue; - } - existing.insert(idx); - break; - } - accounts_keys[idx].1.lamports = thread_rng().gen_range(0, 1000); - }); - - let account_refs: Vec<_> = existing - .iter() - .map(|idx| (&accounts_keys[*idx].0, &accounts_keys[*idx].1)) - .collect(); - db.store(some_slot, &account_refs, false); - - for (key, account) in &account_refs { - assert_eq!( - db.load_account_hash(&ancestors, &key), - AccountsDb::hash_account(some_slot, &account, &key, &ClusterType::Development) - ); - } - existing.clear(); - } - } - #[test] fn test_storage_finder() { solana_logger::setup(); @@ -7623,7 +7095,7 @@ pub mod tests { let key = solana_sdk::pubkey::new_rand(); let lamports = 100; let data_len = 8190; - let account = Account::new(lamports, data_len, &solana_sdk::pubkey::new_rand()); + let account = AccountSharedData::new(lamports, data_len, &solana_sdk::pubkey::new_rand()); // pre-populate with a smaller empty store db.create_and_insert_store(1, 8192, "test_storage_finder"); db.store_uncached(1, &[(&key, &account)]); @@ -7640,7 +7112,7 @@ pub mod tests { let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); - let account = Account::new(1, 0, &key); + let account = AccountSharedData::new(1, 0, &key); let before_slot = 0; let base_slot = before_slot + 1; let after_slot = base_slot + 1; @@ -7658,7 +7130,7 @@ pub mod tests { let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); - let account = Account::new(1, 0, &key); + let account = AccountSharedData::new(1, 0, &key); let base_slot = 0; let after_slot = base_slot + 1; @@ -7681,7 +7153,7 @@ pub mod tests { let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); - let account = Account::new(1, 0, &key); + let account = AccountSharedData::new(1, 0, &key); let base_slot = 0; let after_slot = base_slot + 1; @@ -7697,7 +7169,7 @@ pub mod tests { let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); - let account = Account::new(1, 0, &key); + let account = AccountSharedData::new(1, 0, &key); let base_slot = 0; let after_slot = base_slot + 1; @@ -7722,7 +7194,7 @@ pub mod tests { fn test_storage_remove_account_double_remove() { let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); - let account = Account::new(1, 0, &Account::default().owner); + let account = AccountSharedData::new(1, 0, &AccountSharedData::default().owner); accounts.store_uncached(0, &[(&pubkey, &account)]); let storage_entry = accounts .storage @@ -7744,13 +7216,13 @@ pub mod tests { let old_lamport = 223; let zero_lamport = 0; let no_data = 0; - let owner = Account::default().owner; + let owner = AccountSharedData::default().owner; - let account = Account::new(old_lamport, no_data, &owner); - let account2 = Account::new(old_lamport + 100_001, no_data, &owner); - let account3 = Account::new(old_lamport + 100_002, no_data, &owner); - let dummy_account = Account::new(99_999_999, no_data, &owner); - let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); + let account = AccountSharedData::new(old_lamport, no_data, &owner); + let account2 = AccountSharedData::new(old_lamport + 100_001, no_data, &owner); + let account3 = AccountSharedData::new(old_lamport + 100_002, no_data, &owner); + let dummy_account = AccountSharedData::new(99_999_999, no_data, &owner); + let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner); let pubkey = solana_sdk::pubkey::new_rand(); let dummy_pubkey = solana_sdk::pubkey::new_rand(); @@ -7811,13 +7283,13 @@ pub mod tests { // size data so only 1 fits in a 4k store let data_size = 2200; - let owner = Account::default().owner; + let owner = AccountSharedData::default().owner; - let account = Account::new(old_lamport, data_size, &owner); - let account2 = Account::new(old_lamport + 100_001, data_size, &owner); - let account3 = Account::new(old_lamport + 100_002, data_size, &owner); - let account4 = Account::new(dummy_lamport, data_size, &owner); - let zero_lamport_account = Account::new(zero_lamport, data_size, &owner); + let account = AccountSharedData::new(old_lamport, data_size, &owner); + let account2 = AccountSharedData::new(old_lamport + 100_001, data_size, &owner); + let account3 = AccountSharedData::new(old_lamport + 100_002, data_size, &owner); + let account4 = AccountSharedData::new(dummy_lamport, data_size, &owner); + let zero_lamport_account = AccountSharedData::new(zero_lamport, data_size, &owner); let mut current_slot = 0; let accounts = AccountsDb::new_sized_no_extra_stores(Vec::new(), store_size); @@ -7935,13 +7407,13 @@ pub mod tests { let zero_lamport = 0; let no_data = 0; let dummy_lamport = 999_999; - let owner = Account::default().owner; + let owner = AccountSharedData::default().owner; - let account = Account::new(old_lamport, no_data, &owner); - let account2 = Account::new(old_lamport + 100_001, no_data, &owner); - let account3 = Account::new(old_lamport + 100_002, no_data, &owner); - let dummy_account = Account::new(dummy_lamport, no_data, &owner); - let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); + let account = AccountSharedData::new(old_lamport, no_data, &owner); + let account2 = AccountSharedData::new(old_lamport + 100_001, no_data, &owner); + let account3 = AccountSharedData::new(old_lamport + 100_002, no_data, &owner); + let dummy_account = AccountSharedData::new(dummy_lamport, no_data, &owner); + let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner); let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); @@ -8138,9 +7610,9 @@ pub mod tests { let some_lamport = 223; let no_data = 0; - let owner = Account::default().owner; + let owner = AccountSharedData::default().owner; - let account = Account::new(some_lamport, no_data, &owner); + let account = AccountSharedData::new(some_lamport, no_data, &owner); let mut current_slot = 0; @@ -8206,9 +7678,9 @@ pub mod tests { let some_lamport = 223; let no_data = 0; - let owner = Account::default().owner; + let owner = AccountSharedData::default().owner; - let account = Account::new(some_lamport, no_data, &owner); + let account = AccountSharedData::new(some_lamport, no_data, &owner); let mut current_slot = 0; @@ -8266,9 +7738,9 @@ pub mod tests { let some_lamport = 223; let no_data = 0; - let owner = Account::default().owner; + let owner = AccountSharedData::default().owner; - let account = Account::new(some_lamport, no_data, &owner); + let account = AccountSharedData::new(some_lamport, no_data, &owner); let mut current_slot = 0; @@ -8349,7 +7821,7 @@ pub mod tests { &key0, &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), info0, &mut reclaims, ); @@ -8358,7 +7830,7 @@ pub mod tests { &key0, &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), info1.clone(), &mut reclaims, ); @@ -8367,7 +7839,7 @@ pub mod tests { &key1, &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), info1, &mut reclaims, ); @@ -8376,7 +7848,7 @@ pub mod tests { &key1, &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), info2.clone(), &mut reclaims, ); @@ -8385,7 +7857,7 @@ pub mod tests { &key2, &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), info2, &mut reclaims, ); @@ -8394,7 +7866,7 @@ pub mod tests { &key2, &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), info3, &mut reclaims, ); @@ -8436,60 +7908,6 @@ pub mod tests { } } - #[test] - #[ignore] - fn test_shrink_and_clean() { - solana_logger::setup(); - - // repeat the whole test scenario - for _ in 0..5 { - let accounts = Arc::new(AccountsDb::new_single()); - let accounts_for_shrink = accounts.clone(); - - // spawn the slot shrinking background thread - let exit = Arc::new(AtomicBool::default()); - let exit_for_shrink = exit.clone(); - let shrink_thread = std::thread::spawn(move || loop { - if exit_for_shrink.load(Ordering::Relaxed) { - break; - } - accounts_for_shrink.process_stale_slot_v1(); - }); - - let mut alive_accounts = vec![]; - let owner = Pubkey::default(); - - // populate the AccountsDb with plenty of food for slot shrinking - // also this simulates realistic some heavy spike account updates in the wild - for current_slot in 0..1000 { - while alive_accounts.len() <= 10 { - alive_accounts.push(( - solana_sdk::pubkey::new_rand(), - Account::new(thread_rng().gen_range(0, 50), 0, &owner), - )); - } - - alive_accounts.retain(|(_pubkey, account)| account.lamports >= 1); - - for (pubkey, account) in alive_accounts.iter_mut() { - account.lamports -= 1; - accounts.store(current_slot, &[(&pubkey, &account)], false); - } - accounts.add_root(current_slot); - } - - // let's dance. - for _ in 0..10 { - accounts.clean_accounts(None); - std::thread::sleep(std::time::Duration::from_millis(100)); - } - - // cleanup - exit.store(true, Ordering::Relaxed); - shrink_thread.join().unwrap(); - } - } - #[test] fn test_account_balance_for_capitalization_normal() { // system accounts @@ -8574,7 +7992,7 @@ pub mod tests { fn test_store_overhead() { solana_logger::setup(); let accounts = AccountsDb::new_single(); - let account = Account::default(); + let account = AccountSharedData::default(); let pubkey = solana_sdk::pubkey::new_rand(); accounts.store_uncached(0, &[(&pubkey, &account)]); let slot_stores = accounts.storage.get_slot_stores(0).unwrap(); @@ -8595,7 +8013,7 @@ pub mod tests { let num_accounts: usize = 100; let mut keys = Vec::new(); for i in 0..num_accounts { - let account = Account::new((i + 1) as u64, size, &Pubkey::default()); + let account = AccountSharedData::new((i + 1) as u64, size, &Pubkey::default()); let pubkey = solana_sdk::pubkey::new_rand(); accounts.store_uncached(0, &[(&pubkey, &account)]); keys.push(pubkey); @@ -8603,7 +8021,8 @@ pub mod tests { accounts.add_root(0); for (i, key) in keys[1..].iter().enumerate() { - let account = Account::new((1 + i + num_accounts) as u64, size, &Pubkey::default()); + let account = + AccountSharedData::new((1 + i + num_accounts) as u64, size, &Pubkey::default()); accounts.store_uncached(1, &[(key, &account)]); } accounts.add_root(1); @@ -8616,7 +8035,7 @@ pub mod tests { let mut account_refs = Vec::new(); let num_to_store = 20; for (i, key) in keys[..num_to_store].iter().enumerate() { - let account = Account::new( + let account = AccountSharedData::new( (1 + i + 2 * num_accounts) as u64, i + 20, &Pubkey::default(), @@ -8640,7 +8059,8 @@ pub mod tests { fn test_zero_lamport_new_root_not_cleaned() { let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let account_key = Pubkey::new_unique(); - let zero_lamport_account = Account::new(0, 0, &Account::default().owner); + let zero_lamport_account = + AccountSharedData::new(0, 0, &AccountSharedData::default().owner); // Store zero lamport account into slots 0 and 1, root both slots db.store_uncached(0, &[(&account_key, &zero_lamport_account)]); @@ -8665,7 +8085,7 @@ pub mod tests { let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; let key = Pubkey::default(); - let account0 = Account::new(1, 0, &key); + let account0 = AccountSharedData::new(1, 0, &key); let slot = 0; db.store_cached(slot, &[(&key, &account0)]); @@ -8693,7 +8113,7 @@ pub mod tests { let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; let key = Pubkey::default(); - let account0 = Account::new(1, 0, &key); + let account0 = AccountSharedData::new(1, 0, &key); let slot = 0; db.store_cached(slot, &[(&key, &account0)]); db.mark_slot_frozen(slot); @@ -8717,7 +8137,7 @@ pub mod tests { fn test_flush_accounts_cache() { let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; - let account0 = Account::new(1, 0, &Pubkey::default()); + let account0 = AccountSharedData::new(1, 0, &Pubkey::default()); let unrooted_slot = 4; let root5 = 5; @@ -8776,7 +8196,7 @@ pub mod tests { fn run_test_flush_accounts_cache_if_needed(num_roots: usize, num_unrooted: usize) { let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; - let account0 = Account::new(1, 0, &Pubkey::default()); + let account0 = AccountSharedData::new(1, 0, &Pubkey::default()); let mut keys = vec![]; let num_slots = 2 * MAX_CACHE_SLOTS; for i in 0..num_roots + num_unrooted { @@ -8835,13 +8255,14 @@ pub mod tests { let db = Arc::new(AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, - HashSet::new(), + AccountSecondaryIndexes::default(), caching_enabled, )); let account_key = Pubkey::new_unique(); - let zero_lamport_account = Account::new(0, 0, &Account::default().owner); - let slot1_account = Account::new(1, 1, &Account::default().owner); + let zero_lamport_account = + AccountSharedData::new(0, 0, &AccountSharedData::default().owner); + let slot1_account = AccountSharedData::new(1, 1, &AccountSharedData::default().owner); db.store_cached(0, &[(&account_key, &zero_lamport_account)]); db.store_cached(1, &[(&account_key, &slot1_account)]); @@ -8870,7 +8291,7 @@ pub mod tests { let db = Arc::new(AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, - HashSet::new(), + AccountSecondaryIndexes::default(), caching_enabled, )); @@ -8878,8 +8299,10 @@ pub mod tests { let other_account_key = Pubkey::new_unique(); let original_lamports = 1; - let slot0_account = Account::new(original_lamports, 1, &Account::default().owner); - let zero_lamport_account = Account::new(0, 0, &Account::default().owner); + let slot0_account = + AccountSharedData::new(original_lamports, 1, &AccountSharedData::default().owner); + let zero_lamport_account = + AccountSharedData::new(0, 0, &AccountSharedData::default().owner); // Store into slot 0, and then flush the slot to storage db.store_cached(0, &[(&zero_lamport_account_key, &slot0_account)]); @@ -8959,7 +8382,7 @@ pub mod tests { .spawn(move || { db.scan_accounts( &scan_ancestors, - |_collector: &mut Vec<(Pubkey, Account)>, maybe_account| { + |_collector: &mut Vec<(Pubkey, AccountSharedData)>, maybe_account| { ready_.store(true, Ordering::Relaxed); if let Some((pubkey, _, _)) = maybe_account { if *pubkey == stall_key { @@ -8991,14 +8414,15 @@ pub mod tests { let db = Arc::new(AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, - HashSet::new(), + AccountSecondaryIndexes::default(), caching_enabled, )); let account_key = Pubkey::new_unique(); let account_key2 = Pubkey::new_unique(); - let zero_lamport_account = Account::new(0, 0, &Account::default().owner); - let slot1_account = Account::new(1, 1, &Account::default().owner); - let slot2_account = Account::new(2, 1, &Account::default().owner); + let zero_lamport_account = + AccountSharedData::new(0, 0, &AccountSharedData::default().owner); + let slot1_account = AccountSharedData::new(1, 1, &AccountSharedData::default().owner); + let slot2_account = AccountSharedData::new(2, 1, &AccountSharedData::default().owner); /* Store zero lamport account into slots 0, 1, 2 where @@ -9074,14 +8498,14 @@ pub mod tests { let accounts_db = AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, - HashSet::new(), + AccountSecondaryIndexes::default(), caching_enabled, ); let slot: Slot = 0; let num_keys = 10; for data_size in 0..num_keys { - let account = Account::new(1, data_size, &Pubkey::default()); + let account = AccountSharedData::new(1, data_size, &Pubkey::default()); accounts_db.store_cached(slot, &[(&Pubkey::new_unique(), &account)]); } @@ -9128,7 +8552,7 @@ pub mod tests { let accounts_db = Arc::new(AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, - HashSet::new(), + AccountSecondaryIndexes::default(), caching_enabled, )); let slots: Vec<_> = (0..num_slots as Slot).into_iter().collect(); @@ -9141,7 +8565,10 @@ pub mod tests { accounts_db.store_cached( // Store it in a slot that isn't returned in `slots` stall_slot, - &[(&scan_stall_key, &Account::new(1, 0, &Pubkey::default()))], + &[( + &scan_stall_key, + &AccountSharedData::new(1, 0, &Pubkey::default()), + )], ); } @@ -9149,7 +8576,10 @@ pub mod tests { let mut scan_tracker = None; for slot in &slots { for key in &keys[*slot as usize..] { - accounts_db.store_cached(*slot, &[(key, &Account::new(1, 0, &Pubkey::default()))]); + accounts_db.store_cached( + *slot, + &[(key, &AccountSharedData::new(1, 0, &Pubkey::default()))], + ); } accounts_db.add_root(*slot as Slot); if Some(*slot) == scan_slot { @@ -9185,7 +8615,7 @@ pub mod tests { // Store a slot that overwrites all previous keys, rendering all previous keys dead accounts_db.store_cached( alive_slot, - &[(key, &Account::new(1, 0, &Pubkey::default()))], + &[(key, &AccountSharedData::new(1, 0, &Pubkey::default()))], ); accounts_db.add_root(alive_slot); } @@ -9510,12 +8940,12 @@ pub mod tests { let db = AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, - HashSet::default(), + AccountSecondaryIndexes::default(), caching_enabled, ); let account_key1 = Pubkey::new_unique(); let account_key2 = Pubkey::new_unique(); - let account1 = Account::new(1, 0, &Account::default().owner); + let account1 = AccountSharedData::new(1, 0, &AccountSharedData::default().owner); // Store into slot 0 db.store_cached(0, &[(&account_key1, &account1)]); @@ -9593,10 +9023,10 @@ pub mod tests { let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let account_key1 = Pubkey::new_unique(); let account_key2 = Pubkey::new_unique(); - let account1 = Account::new(1, 0, &Account::default().owner); - let account2 = Account::new(2, 0, &Account::default().owner); - let account3 = Account::new(3, 0, &Account::default().owner); - let account4 = Account::new(4, 0, &Account::default().owner); + let account1 = AccountSharedData::new(1, 0, &AccountSharedData::default().owner); + let account2 = AccountSharedData::new(2, 0, &AccountSharedData::default().owner); + let account3 = AccountSharedData::new(3, 0, &AccountSharedData::default().owner); + let account4 = AccountSharedData::new(4, 0, &AccountSharedData::default().owner); // Store accounts into slots 0 and 1 db.store_uncached(0, &[(&account_key1, &account1)]); diff --git a/runtime/src/accounts_hash.rs b/runtime/src/accounts_hash.rs new file mode 100644 index 0000000000..c9a2e48bee --- /dev/null +++ b/runtime/src/accounts_hash.rs @@ -0,0 +1,1557 @@ +use log::*; +use rayon::prelude::*; +use solana_measure::measure::Measure; +use solana_sdk::{ + clock::Slot, + hash::{Hash, Hasher}, + pubkey::Pubkey, +}; +use std::{convert::TryInto, sync::Mutex}; + +pub const ZERO_RAW_LAMPORTS_SENTINEL: u64 = std::u64::MAX; +pub const MERKLE_FANOUT: usize = 16; + +#[derive(Debug, Default)] +pub struct HashStats { + pub scan_time_total_us: u64, + pub zeros_time_total_us: u64, + pub hash_time_total_us: u64, + pub sort_time_total_us: u64, + pub flatten_time_total_us: u64, + pub pre_scan_flatten_time_total_us: u64, + pub hash_total: usize, + pub unreduced_entries: usize, + pub num_snapshot_storage: usize, +} +impl HashStats { + fn log(&mut self) { + let total_time_us = self.scan_time_total_us + + self.zeros_time_total_us + + self.hash_time_total_us + + self.sort_time_total_us + + self.flatten_time_total_us + + self.pre_scan_flatten_time_total_us; + datapoint_info!( + "calculate_accounts_hash_without_index", + ("accounts_scan", self.scan_time_total_us, i64), + ("eliminate_zeros", self.zeros_time_total_us, i64), + ("hash", self.hash_time_total_us, i64), + ("sort", self.sort_time_total_us, i64), + ("hash_total", self.hash_total, i64), + ("flatten", self.flatten_time_total_us, i64), + ("unreduced_entries", self.unreduced_entries as i64, i64), + ( + "num_snapshot_storage", + self.num_snapshot_storage as i64, + i64 + ), + ( + "pre_scan_flatten", + self.pre_scan_flatten_time_total_us as i64, + i64 + ), + ("total", total_time_us as i64, i64), + ); + } +} + +#[derive(Default, Debug, PartialEq, Clone)] +pub struct CalculateHashIntermediate { + pub version: u64, + pub hash: Hash, + pub lamports: u64, + pub slot: Slot, + pub pubkey: Pubkey, +} + +impl CalculateHashIntermediate { + pub fn new(version: u64, hash: Hash, lamports: u64, slot: Slot, pubkey: Pubkey) -> Self { + Self { + version, + hash, + lamports, + slot, + pubkey, + } + } +} + +#[derive(Default, Debug)] +pub struct CumulativeOffset { + pub index: Vec, + pub start_offset: usize, +} + +impl CumulativeOffset { + pub fn new(index: Vec, start_offset: usize) -> CumulativeOffset { + Self { + index, + start_offset, + } + } +} + +// Allow retrieving &[start..end] from a logical src: Vec, where src is really Vec> (or later Vec>>) +// This model prevents callers from having to flatten which saves both working memory and time. +#[derive(Default, Debug)] +pub struct CumulativeOffsets { + cumulative_offsets: Vec, + total_count: usize, +} + +impl CumulativeOffsets { + pub fn from_raw(raw: &[Vec]) -> CumulativeOffsets { + let mut total_count: usize = 0; + let cumulative_offsets: Vec<_> = raw + .iter() + .enumerate() + .filter_map(|(i, v)| { + let len = v.len(); + if len > 0 { + let result = CumulativeOffset::new(vec![i], total_count); + total_count += len; + Some(result) + } else { + None + } + }) + .collect(); + + Self { + cumulative_offsets, + total_count, + } + } + + pub fn from_raw_2d(raw: &[Vec>]) -> CumulativeOffsets { + let mut total_count: usize = 0; + let mut cumulative_offsets = Vec::with_capacity(0); + for (i, v_outer) in raw.iter().enumerate() { + for (j, v) in v_outer.iter().enumerate() { + let len = v.len(); + if len > 0 { + if cumulative_offsets.is_empty() { + // the first inner, non-empty vector we find gives us an approximate rectangular shape + cumulative_offsets = Vec::with_capacity(raw.len() * v_outer.len()); + } + cumulative_offsets.push(CumulativeOffset::new(vec![i, j], total_count)); + total_count += len; + } + } + } + + Self { + cumulative_offsets, + total_count, + } + } + + // return the biggest slice possible that starts at 'start' + pub fn get_slice<'a, T>(&self, raw: &'a [Vec], start: usize) -> &'a [T] { + // This could be binary search, but we expect a small number of vectors. + for i in (0..self.cumulative_offsets.len()).into_iter().rev() { + let index = &self.cumulative_offsets[i]; + if start >= index.start_offset { + let start = start - index.start_offset; + const DIMENSION: usize = 0; + return &raw[index.index[DIMENSION]][start..]; + } + } + panic!( + "get_slice didn't find: {}, len: {}", + start, self.total_count + ); + } + + // return the biggest slice possible that starts at 'start' + pub fn get_slice_2d<'a, T>(&self, raw: &'a [Vec>], start: usize) -> &'a [T] { + // This could be binary search, but we expect a small number of vectors. + for i in (0..self.cumulative_offsets.len()).into_iter().rev() { + let index = &self.cumulative_offsets[i]; + if start >= index.start_offset { + let start = start - index.start_offset; + const DIMENSION_0: usize = 0; + const DIMENSION_1: usize = 1; + return &raw[index.index[DIMENSION_0]][index.index[DIMENSION_1]][start..]; + } + } + panic!( + "get_slice didn't find: {}, len: {}", + start, self.total_count + ); + } +} + +#[derive(Debug)] +pub struct AccountsHash { + pub dummy: i32, +} + +impl AccountsHash { + pub fn calculate_hash(hashes: Vec>) -> (Hash, usize) { + let cumulative_offsets = CumulativeOffsets::from_raw(&hashes); + + let hash_total = cumulative_offsets.total_count; + let result = AccountsHash::compute_merkle_root_from_slices( + hash_total, + MERKLE_FANOUT, + None, + |start: usize| cumulative_offsets.get_slice(&hashes, start), + ); + (result, hash_total) + } + + pub fn compute_merkle_root(hashes: Vec<(Pubkey, Hash)>, fanout: usize) -> Hash { + Self::compute_merkle_root_loop(hashes, fanout, |t| t.1) + } + + // this function avoids an infinite recursion compiler error + pub fn compute_merkle_root_recurse(hashes: Vec, fanout: usize) -> Hash { + Self::compute_merkle_root_loop(hashes, fanout, |t: &Hash| *t) + } + + pub fn div_ceil(x: usize, y: usize) -> usize { + let mut result = x / y; + if x % y != 0 { + result += 1; + } + result + } + + // For the first iteration, there could be more items in the tuple than just hash and lamports. + // Using extractor allows us to avoid an unnecessary array copy on the first iteration. + pub fn compute_merkle_root_loop(hashes: Vec, fanout: usize, extractor: F) -> Hash + where + F: Fn(&T) -> Hash + std::marker::Sync, + T: std::marker::Sync, + { + if hashes.is_empty() { + return Hasher::default().result(); + } + + let mut time = Measure::start("time"); + + let total_hashes = hashes.len(); + let chunks = Self::div_ceil(total_hashes, fanout); + + let result: Vec<_> = (0..chunks) + .into_par_iter() + .map(|i| { + let start_index = i * fanout; + let end_index = std::cmp::min(start_index + fanout, total_hashes); + + let mut hasher = Hasher::default(); + for item in hashes.iter().take(end_index).skip(start_index) { + let h = extractor(&item); + hasher.hash(h.as_ref()); + } + + hasher.result() + }) + .collect(); + time.stop(); + debug!("hashing {} {}", total_hashes, time); + + if result.len() == 1 { + result[0] + } else { + Self::compute_merkle_root_recurse(result, fanout) + } + } + + // This function is designed to allow hashes to be located in multiple, perhaps multiply deep vecs. + // The caller provides a function to return a slice from the source data. + pub fn compute_merkle_root_from_slices<'a, F>( + total_hashes: usize, + fanout: usize, + max_levels_per_pass: Option, + get_hashes: F, + ) -> Hash + where + F: Fn(usize) -> &'a [Hash] + std::marker::Sync, + { + if total_hashes == 0 { + return Hasher::default().result(); + } + + let mut time = Measure::start("time"); + + const THREE_LEVEL_OPTIMIZATION: usize = 3; // this '3' is dependent on the code structure below where we manually unroll + let target = fanout.pow(THREE_LEVEL_OPTIMIZATION as u32); + + // Only use the 3 level optimization if we have at least 4 levels of data. + // Otherwise, we'll be serializing a parallel operation. + let threshold = target * fanout; + let three_level = max_levels_per_pass.unwrap_or(usize::MAX) >= THREE_LEVEL_OPTIMIZATION + && total_hashes >= threshold; + let num_hashes_per_chunk = if three_level { target } else { fanout }; + + let chunks = Self::div_ceil(total_hashes, num_hashes_per_chunk); + + // initial fetch - could return entire slice + let data: &[Hash] = get_hashes(0); + let data_len = data.len(); + + let result: Vec<_> = (0..chunks) + .into_par_iter() + .map(|i| { + let start_index = i * num_hashes_per_chunk; + let end_index = std::cmp::min(start_index + num_hashes_per_chunk, total_hashes); + + let mut hasher = Hasher::default(); + let mut data_index = start_index; + let mut data = data; + let mut data_len = data_len; + + if !three_level { + // 1 group of fanout + // The result of this loop is a single hash value from fanout input hashes. + for i in start_index..end_index { + if data_index >= data_len { + // fetch next slice + data = get_hashes(i); + data_len = data.len(); + data_index = 0; + } + hasher.hash(data[data_index].as_ref()); + data_index += 1; + } + } else { + // hash 3 levels of fanout simultaneously. + // The result of this loop is a single hash value from fanout^3 input hashes. + let mut i = start_index; + while i < end_index { + let mut hasher_j = Hasher::default(); + for _j in 0..fanout { + let mut hasher_k = Hasher::default(); + let end = std::cmp::min(end_index - i, fanout); + for _k in 0..end { + if data_index >= data_len { + // fetch next slice + data = get_hashes(i); + data_len = data.len(); + data_index = 0; + } + hasher_k.hash(data[data_index].as_ref()); + data_index += 1; + i += 1; + } + hasher_j.hash(hasher_k.result().as_ref()); + if i >= end_index { + break; + } + } + hasher.hash(hasher_j.result().as_ref()); + } + } + + hasher.result() + }) + .collect(); + time.stop(); + debug!("hashing {} {}", total_hashes, time); + + if result.len() == 1 { + result[0] + } else { + Self::compute_merkle_root_recurse(result, fanout) + } + } + + pub fn accumulate_account_hashes(mut hashes: Vec<(Pubkey, Hash)>) -> Hash { + Self::sort_hashes_by_pubkey(&mut hashes); + + Self::compute_merkle_root_loop(hashes, MERKLE_FANOUT, |i| i.1) + } + + pub fn sort_hashes_by_pubkey(hashes: &mut Vec<(Pubkey, Hash)>) { + hashes.par_sort_unstable_by(|a, b| a.0.cmp(&b.0)); + } + + fn flatten_hash_intermediate( + data_sections_by_pubkey: Vec>>, + stats: &mut HashStats, + ) -> Vec> + where + T: Clone, + { + // flatten this: + // vec: just a level of hierarchy + // vec: 1 vec per PUBKEY_BINS_FOR_CALCULATING_HASHES + // vec: Intermediate data whose pubkey belongs in this division + // into this: + // vec: 1 vec per PUBKEY_BINS_FOR_CALCULATING_HASHES + // vec: Intermediate data whose pubkey belongs in this division + let mut flatten_time = Measure::start("flatten"); + let mut data_by_pubkey: Vec> = vec![]; + let mut raw_len = 0; + for mut outer in data_sections_by_pubkey { + let outer_len = outer.len(); + for pubkey_index in 0..outer_len { + let this_len = outer[pubkey_index].len(); + if this_len == 0 { + continue; + } + raw_len += this_len; + let mut data = vec![]; + std::mem::swap(&mut data, &mut outer[pubkey_index]); + + if data_by_pubkey.len() <= pubkey_index { + data_by_pubkey.extend(vec![vec![]; pubkey_index - data_by_pubkey.len() + 1]); + } + + data_by_pubkey[pubkey_index].extend(data); + } + } + flatten_time.stop(); + stats.flatten_time_total_us += flatten_time.as_us(); + stats.unreduced_entries = raw_len; + data_by_pubkey + } + + pub fn compare_two_hash_entries( + a: &CalculateHashIntermediate, + b: &CalculateHashIntermediate, + ) -> std::cmp::Ordering { + // note partial_cmp only returns None with floating point comparisons + match a.pubkey.partial_cmp(&b.pubkey).unwrap() { + std::cmp::Ordering::Equal => match b.slot.partial_cmp(&a.slot).unwrap() { + std::cmp::Ordering::Equal => b.version.partial_cmp(&a.version).unwrap(), + other => other, + }, + other => other, + } + } + + fn sort_hash_intermediate( + data_by_pubkey: Vec>, + stats: &mut HashStats, + ) -> Vec> { + // sort each PUBKEY_DIVISION vec + let mut sort_time = Measure::start("sort"); + let sorted_data_by_pubkey: Vec> = data_by_pubkey + .into_par_iter() + .map(|mut pk_range| { + pk_range.par_sort_unstable_by(Self::compare_two_hash_entries); + pk_range + }) + .collect(); + sort_time.stop(); + stats.sort_time_total_us += sort_time.as_us(); + sorted_data_by_pubkey + } + + pub fn checked_cast_for_capitalization(balance: u128) -> u64 { + balance + .try_into() + .expect("overflow is detected while summing capitalization") + } + + fn de_dup_and_eliminate_zeros( + sorted_data_by_pubkey: Vec>, + stats: &mut HashStats, + ) -> (Vec>>, u64) { + // 1. eliminate zero lamport accounts + // 2. pick the highest slot or (slot = and highest version) of each pubkey + // 3. produce this output: + // vec: PUBKEY_BINS_FOR_CALCULATING_HASHES in pubkey order + // vec: sorted sections from parallelism, in pubkey order + // vec: individual hashes in pubkey order + let mut zeros = Measure::start("eliminate zeros"); + let overall_sum = Mutex::new(0u64); + const CHUNKS: usize = 10; + let hashes: Vec>> = sorted_data_by_pubkey + .into_par_iter() + .map(|pubkey_division| { + let (hashes, sum) = Self::de_dup_accounts_in_parallel(&pubkey_division, CHUNKS); + let mut overall = overall_sum.lock().unwrap(); + *overall = Self::checked_cast_for_capitalization(sum as u128 + *overall as u128); + hashes + }) + .collect(); + zeros.stop(); + stats.zeros_time_total_us += zeros.as_us(); + let sum = *overall_sum.lock().unwrap(); + (hashes, sum) + } + + // 1. eliminate zero lamport accounts + // 2. pick the highest slot or (slot = and highest version) of each pubkey + // 3. produce this output: + // vec: sorted sections from parallelism, in pubkey order + // vec: individual hashes in pubkey order + fn de_dup_accounts_in_parallel( + pubkey_division: &[CalculateHashIntermediate], + chunk_count: usize, + ) -> (Vec>, u64) { + let len = pubkey_division.len(); + let max = if len > chunk_count { + std::cmp::max(chunk_count, 1) + } else { + 1 + }; + let chunk_size = len / max; + let overall_sum = Mutex::new(0u64); + let hashes: Vec> = (0..max) + .into_par_iter() + .map(|chunk_index| { + let mut start_index = chunk_index * chunk_size; + let mut end_index = start_index + chunk_size; + if chunk_index == max - 1 { + end_index = len; + } + + let is_first_slice = chunk_index == 0; + if !is_first_slice { + // note that this causes all regions after region 0 to have 1 item that overlaps with the previous region + start_index -= 1; + } + + let (result, sum) = Self::de_dup_accounts_from_stores( + chunk_index == 0, + &pubkey_division[start_index..end_index], + ); + let mut overall = overall_sum.lock().unwrap(); + *overall = Self::checked_cast_for_capitalization(sum + *overall as u128); + + result + }) + .collect(); + + let sum = *overall_sum.lock().unwrap(); + (hashes, sum) + } + + fn de_dup_accounts_from_stores( + is_first_slice: bool, + slice: &[CalculateHashIntermediate], + ) -> (Vec, u128) { + let len = slice.len(); + let mut result: Vec = Vec::with_capacity(len); + + let mut sum: u128 = 0; + if len > 0 { + let mut i = 0; + // look_for_first_key means the first key we find in our slice may be a + // continuation of accounts belonging to a key that started in the last slice. + // so, look_for_first_key=true means we have to find the first key different than + // the first key we encounter in our slice. Note that if this is true, + // our slice begins one index prior to the 'actual' start of our logical range. + let mut look_for_first_key = !is_first_slice; + 'outer: loop { + // at start of loop, item at 'i' is the first entry for a given pubkey - unless look_for_first + let now = &slice[i]; + let last = now.pubkey; + if !look_for_first_key && now.lamports != ZERO_RAW_LAMPORTS_SENTINEL { + // first entry for this key that starts in our slice + result.push(now.hash); + sum += now.lamports as u128; + } + for (k, now) in slice.iter().enumerate().skip(i + 1) { + if now.pubkey != last { + i = k; + look_for_first_key = false; + continue 'outer; + } else { + let prev = &slice[k - 1]; + assert!( + !(prev.slot == now.slot + && prev.version == now.version + && (prev.hash != now.hash || prev.lamports != now.lamports)), + "Conflicting store data. Pubkey: {}, Slot: {}, Version: {}, Hashes: {}, {}, Lamports: {}, {}", now.pubkey, now.slot, now.version, prev.hash, now.hash, prev.lamports, now.lamports + ); + } + } + + break; // ran out of items in our slice, so our slice is done + } + } + (result, sum) + } + + fn flatten_hashes_and_hash( + hashes: Vec>>, + fanout: usize, + stats: &mut HashStats, + ) -> Hash { + let mut hash_time = Measure::start("flat2"); + + let offsets = CumulativeOffsets::from_raw_2d(&hashes); + + let get_slice = |start: usize| -> &[Hash] { offsets.get_slice_2d(&hashes, start) }; + let hash = AccountsHash::compute_merkle_root_from_slices( + offsets.total_count, + fanout, + None, + get_slice, + ); + hash_time.stop(); + stats.hash_time_total_us += hash_time.as_us(); + stats.hash_total = offsets.total_count; + + hash + } + + // input: + // vec: unordered, created by parallelism + // vec: [0..bins] - where bins are pubkey ranges + // vec: [..] - items which fin in the containing bin, unordered within this vec + // so, assumption is middle vec is bins sorted by pubkey + pub fn rest_of_hash_calculation( + data_sections_by_pubkey: Vec>>, + mut stats: &mut HashStats, + ) -> (Hash, u64) { + let outer = Self::flatten_hash_intermediate(data_sections_by_pubkey, &mut stats); + + let sorted_data_by_pubkey = Self::sort_hash_intermediate(outer, &mut stats); + + let (hashes, total_lamports) = + Self::de_dup_and_eliminate_zeros(sorted_data_by_pubkey, &mut stats); + + let hash = Self::flatten_hashes_and_hash(hashes, MERKLE_FANOUT, &mut stats); + + stats.log(); + + (hash, total_lamports) + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use std::str::FromStr; + + #[test] + fn test_accountsdb_div_ceil() { + assert_eq!(AccountsHash::div_ceil(10, 3), 4); + assert_eq!(AccountsHash::div_ceil(0, 1), 0); + assert_eq!(AccountsHash::div_ceil(0, 5), 0); + assert_eq!(AccountsHash::div_ceil(9, 3), 3); + assert_eq!(AccountsHash::div_ceil(9, 9), 1); + } + + #[test] + #[should_panic(expected = "attempt to divide by zero")] + fn test_accountsdb_div_ceil_fail() { + assert_eq!(AccountsHash::div_ceil(10, 0), 0); + } + + #[test] + fn test_accountsdb_rest_of_hash_calculation() { + solana_logger::setup(); + + let mut account_maps: Vec = Vec::new(); + + let key = Pubkey::new(&[11u8; 32]); + let hash = Hash::new(&[1u8; 32]); + let val = CalculateHashIntermediate::new(0, hash, 88, Slot::default(), key); + account_maps.push(val); + + // 2nd key - zero lamports, so will be removed + let key = Pubkey::new(&[12u8; 32]); + let hash = Hash::new(&[2u8; 32]); + let val = CalculateHashIntermediate::new( + 0, + hash, + ZERO_RAW_LAMPORTS_SENTINEL, + Slot::default(), + key, + ); + account_maps.push(val); + + let result = AccountsHash::rest_of_hash_calculation( + vec![vec![account_maps.clone()]], + &mut HashStats::default(), + ); + let expected_hash = Hash::from_str("8j9ARGFv4W2GfML7d3sVJK2MePwrikqYnu6yqer28cCa").unwrap(); + assert_eq!((result.0, result.1), (expected_hash, 88)); + + // 3rd key - with pubkey value before 1st key so it will be sorted first + let key = Pubkey::new(&[10u8; 32]); + let hash = Hash::new(&[2u8; 32]); + let val = CalculateHashIntermediate::new(0, hash, 20, Slot::default(), key); + account_maps.push(val); + + let result = AccountsHash::rest_of_hash_calculation( + vec![vec![account_maps.clone()]], + &mut HashStats::default(), + ); + let expected_hash = Hash::from_str("EHv9C5vX7xQjjMpsJMzudnDTzoTSRwYkqLzY8tVMihGj").unwrap(); + assert_eq!((result.0, result.1), (expected_hash, 108)); + + // 3rd key - with later slot + let key = Pubkey::new(&[10u8; 32]); + let hash = Hash::new(&[99u8; 32]); + let val = CalculateHashIntermediate::new(0, hash, 30, Slot::default() + 1, key); + account_maps.push(val); + + let result = AccountsHash::rest_of_hash_calculation( + vec![vec![account_maps]], + &mut HashStats::default(), + ); + let expected_hash = Hash::from_str("7NNPg5A8Xsg1uv4UFm6KZNwsipyyUnmgCrznP6MBWoBZ").unwrap(); + assert_eq!((result.0, result.1), (expected_hash, 118)); + } + + #[test] + fn test_accountsdb_de_dup_accounts_zero_chunks() { + let (hashes, lamports) = + AccountsHash::de_dup_accounts_in_parallel(&[CalculateHashIntermediate::default()], 0); + assert_eq!(vec![vec![Hash::default()]], hashes); + assert_eq!(lamports, 0); + } + + #[test] + fn test_accountsdb_de_dup_accounts_empty() { + solana_logger::setup(); + + let (hashes, lamports) = AccountsHash::de_dup_and_eliminate_zeros( + vec![vec![], vec![]], + &mut HashStats::default(), + ); + assert_eq!( + vec![vec![Hash::default(); 0], vec![]], + hashes.into_iter().flatten().collect::>() + ); + assert_eq!(lamports, 0); + + let (hashes, lamports) = + AccountsHash::de_dup_and_eliminate_zeros(vec![], &mut HashStats::default()); + let empty: Vec>> = Vec::default(); + assert_eq!(empty, hashes); + assert_eq!(lamports, 0); + + let (hashes, lamports) = AccountsHash::de_dup_accounts_in_parallel(&[], 1); + assert_eq!( + vec![Hash::default(); 0], + hashes.into_iter().flatten().collect::>() + ); + assert_eq!(lamports, 0); + + let (hashes, lamports) = AccountsHash::de_dup_accounts_in_parallel(&[], 2); + assert_eq!( + vec![Hash::default(); 0], + hashes.into_iter().flatten().collect::>() + ); + assert_eq!(lamports, 0); + } + + #[test] + fn test_accountsdb_de_dup_accounts_from_stores() { + solana_logger::setup(); + + let key_a = Pubkey::new(&[1u8; 32]); + let key_b = Pubkey::new(&[2u8; 32]); + let key_c = Pubkey::new(&[3u8; 32]); + const COUNT: usize = 6; + const VERSION: u64 = 0; + let hashes: Vec<_> = (0..COUNT) + .into_iter() + .map(|i| Hash::new(&[i as u8; 32])) + .collect(); + // create this vector + // abbbcc + let keys = [key_a, key_b, key_b, key_b, key_c, key_c]; + + let accounts: Vec<_> = hashes + .into_iter() + .zip(keys.iter()) + .enumerate() + .map(|(i, (hash, key))| { + CalculateHashIntermediate::new( + VERSION, + hash, + (i + 1) as u64, + u64::MAX - i as u64, + *key, + ) + }) + .collect(); + + type ExpectedType = (String, bool, u64, String); + let expected:Vec = vec![ + // ("key/lamports key2/lamports ...", + // is_first_slice + // result lamports + // result hashes) + // "a5" = key_a, 5 lamports + ("a1", false, 0, "[]"), + ("a1b2", false, 2, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), + ("a1b2b3", false, 2, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), + ("a1b2b3b4", false, 2, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), + ("a1b2b3b4c5", false, 7, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi, GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), + ("b2", false, 0, "[]"), + ("b2b3", false, 0, "[]"), + ("b2b3b4", false, 0, "[]"), + ("b2b3b4c5", false, 5, "[GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), + ("b3", false, 0, "[]"), + ("b3b4", false, 0, "[]"), + ("b3b4c5", false, 5, "[GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), + ("b4", false, 0, "[]"), + ("b4c5", false, 5, "[GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), + ("c5", false, 0, "[]"), + ("a1", true, 1, "[11111111111111111111111111111111]"), + ("a1b2", true, 3, "[11111111111111111111111111111111, 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), + ("a1b2b3", true, 3, "[11111111111111111111111111111111, 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), + ("a1b2b3b4", true, 3, "[11111111111111111111111111111111, 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), + ("a1b2b3b4c5", true, 8, "[11111111111111111111111111111111, 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi, GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), + ("b2", true, 2, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), + ("b2b3", true, 2, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), + ("b2b3b4", true, 2, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi]"), + ("b2b3b4c5", true, 7, "[4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi, GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), + ("b3", true, 3, "[8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR]"), + ("b3b4", true, 3, "[8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR]"), + ("b3b4c5", true, 8, "[8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR, GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), + ("b4", true, 4, "[CktRuQ2mttgRGkXJtyksdKHjUdc2C4TgDzyB98oEzy8]"), + ("b4c5", true, 9, "[CktRuQ2mttgRGkXJtyksdKHjUdc2C4TgDzyB98oEzy8, GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), + ("c5", true, 5, "[GgBaCs3NCBuZN12kCJgAW63ydqohFkHEdfdEXBPzLHq]"), + ].into_iter().map(|item| { + let result: ExpectedType = ( + item.0.to_string(), + item.1, + item.2, + item.3.to_string(), + ); + result + }).collect(); + + let mut expected_index = 0; + for first_slice in 0..2 { + for start in 0..COUNT { + for end in start + 1..COUNT { + let is_first_slice = first_slice == 1; + let accounts = accounts.clone(); + let slice = &accounts[start..end]; + + let result = AccountsHash::de_dup_accounts_from_stores(is_first_slice, slice); + let (hashes2, lamports2) = AccountsHash::de_dup_accounts_in_parallel(slice, 1); + let (hashes3, lamports3) = AccountsHash::de_dup_accounts_in_parallel(slice, 2); + let (hashes4, lamports4) = AccountsHash::de_dup_and_eliminate_zeros( + vec![slice.to_vec()], + &mut HashStats::default(), + ); + let (hashes5, lamports5) = AccountsHash::de_dup_and_eliminate_zeros( + vec![slice.to_vec(), slice.to_vec()], + &mut HashStats::default(), + ); + let (hashes6, lamports6) = AccountsHash::de_dup_and_eliminate_zeros( + vec![vec![], slice.to_vec()], + &mut HashStats::default(), + ); + + assert_eq!( + hashes2.iter().flatten().collect::>(), + hashes3.iter().flatten().collect::>() + ); + let expected2 = hashes2.clone().into_iter().flatten().collect::>(); + assert_eq!( + expected2, + hashes4 + .into_iter() + .flatten() + .into_iter() + .flatten() + .collect::>() + ); + assert_eq!( + vec![expected2.clone(), expected2.clone()], + hashes5.into_iter().flatten().collect::>() + ); + assert_eq!( + vec![vec![], expected2.clone()], + hashes6.into_iter().flatten().collect::>() + ); + assert_eq!(lamports2, lamports3); + assert_eq!(lamports2, lamports4); + assert_eq!(lamports2 * 2, lamports5); + assert_eq!(lamports2, lamports6); + + let hashes: Vec<_> = hashes2.into_iter().flatten().collect(); + + let human_readable = slice + .iter() + .map(|v| { + let mut s = (if v.pubkey == key_a { + "a" + } else if v.pubkey == key_b { + "b" + } else { + "c" + }) + .to_string(); + + s.push_str(&v.lamports.to_string()); + s + }) + .collect::(); + + let hash_result_as_string = format!("{:?}", result.0); + + let packaged_result: ExpectedType = ( + human_readable, + is_first_slice, + result.1 as u64, + hash_result_as_string, + ); + + if is_first_slice { + // the parallel version always starts with 'first slice' + assert_eq!( + result.0, hashes, + "description: {:?}, expected index: {}", + packaged_result, expected_index + ); + assert_eq!( + result.1 as u64, lamports2, + "description: {:?}, expected index: {}", + packaged_result, expected_index + ); + } + + assert_eq!(expected[expected_index], packaged_result); + + // for generating expected results + // error!("{:?},", packaged_result); + expected_index += 1; + } + } + } + + for first_slice in 0..2 { + let result = AccountsHash::de_dup_accounts_from_stores(first_slice == 1, &[]); + assert_eq!((vec![Hash::default(); 0], 0), result); + } + } + + #[test] + fn test_accountsdb_flatten_hashes_and_hash() { + solana_logger::setup(); + const COUNT: usize = 4; + let hashes: Vec<_> = (0..COUNT) + .into_iter() + .map(|i| Hash::new(&[(i) as u8; 32])) + .collect(); + let expected = + AccountsHash::compute_merkle_root_loop(hashes.clone(), MERKLE_FANOUT, |i| *i); + + assert_eq!( + AccountsHash::flatten_hashes_and_hash( + vec![vec![hashes.clone()]], + MERKLE_FANOUT, + &mut HashStats::default() + ), + expected, + ); + for in_first in 1..COUNT - 1 { + assert_eq!( + AccountsHash::flatten_hashes_and_hash( + vec![vec![ + hashes.clone()[0..in_first].to_vec(), + hashes.clone()[in_first..COUNT].to_vec() + ]], + MERKLE_FANOUT, + &mut HashStats::default() + ), + expected + ); + } + } + + #[test] + fn test_sort_hash_intermediate() { + solana_logger::setup(); + let mut stats = HashStats::default(); + let key = Pubkey::new_unique(); + let hash = Hash::new_unique(); + let val = CalculateHashIntermediate::new(1, hash, 1, 1, key); + + // slot same, version < + let hash2 = Hash::new_unique(); + let val2 = CalculateHashIntermediate::new(0, hash2, 4, 1, key); + let val3 = CalculateHashIntermediate::new(3, hash2, 4, 1, key); + let val4 = CalculateHashIntermediate::new(4, hash2, 4, 1, key); + + let src = vec![vec![val2.clone()], vec![val.clone()]]; + let result = AccountsHash::sort_hash_intermediate(src.clone(), &mut stats); + assert_eq!(result, src); + + let src = vec![ + vec![val2.clone(), val.clone()], + vec![val3.clone(), val4.clone()], + ]; + let sorted = vec![vec![val, val2], vec![val4, val3]]; + let result = AccountsHash::sort_hash_intermediate(src, &mut stats); + assert_eq!(result, sorted); + + let src = vec![vec![]]; + let result = AccountsHash::sort_hash_intermediate(src.clone(), &mut stats); + assert_eq!(result, src); + + let src = vec![]; + let result = AccountsHash::sort_hash_intermediate(src.clone(), &mut stats); + assert_eq!(result, src); + } + + #[test] + fn test_accountsdb_compare_two_hash_entries() { + solana_logger::setup(); + let key = Pubkey::new_unique(); + let hash = Hash::new_unique(); + let val = CalculateHashIntermediate::new(1, hash, 1, 1, key); + + // slot same, version < + let hash2 = Hash::new_unique(); + let val2 = CalculateHashIntermediate::new(0, hash2, 4, 1, key); + assert_eq!( + std::cmp::Ordering::Less, + AccountsHash::compare_two_hash_entries(&val, &val2) + ); + + let list = vec![val.clone(), val2.clone()]; + let mut list_bkup = list.clone(); + list_bkup.sort_by(AccountsHash::compare_two_hash_entries); + let list = AccountsHash::sort_hash_intermediate(vec![list], &mut HashStats::default()); + assert_eq!(list, vec![list_bkup]); + + let list = vec![val2, val.clone()]; // reverse args + let mut list_bkup = list.clone(); + list_bkup.sort_by(AccountsHash::compare_two_hash_entries); + let list = AccountsHash::sort_hash_intermediate(vec![list], &mut HashStats::default()); + assert_eq!(list, vec![list_bkup]); + + // slot same, vers = + let hash3 = Hash::new_unique(); + let val3 = CalculateHashIntermediate::new(1, hash3, 2, 1, key); + assert_eq!( + std::cmp::Ordering::Equal, + AccountsHash::compare_two_hash_entries(&val, &val3) + ); + + // slot same, vers > + let hash4 = Hash::new_unique(); + let val4 = CalculateHashIntermediate::new(2, hash4, 6, 1, key); + assert_eq!( + std::cmp::Ordering::Greater, + AccountsHash::compare_two_hash_entries(&val, &val4) + ); + + // slot >, version < + let hash5 = Hash::new_unique(); + let val5 = CalculateHashIntermediate::new(0, hash5, 8, 2, key); + assert_eq!( + std::cmp::Ordering::Greater, + AccountsHash::compare_two_hash_entries(&val, &val5) + ); + } + + #[test] + fn test_accountsdb_remove_zero_balance_accounts() { + solana_logger::setup(); + + let key = Pubkey::new_unique(); + let hash = Hash::new_unique(); + let mut account_maps: Vec = Vec::new(); + let val = CalculateHashIntermediate::new(0, hash, 1, Slot::default(), key); + account_maps.push(val.clone()); + + let result = AccountsHash::de_dup_accounts_from_stores(true, &account_maps[..]); + assert_eq!(result, (vec![val.hash], val.lamports as u128)); + + // zero original lamports, higher version + let val = CalculateHashIntermediate::new( + 1, + hash, + ZERO_RAW_LAMPORTS_SENTINEL, + Slot::default(), + key, + ); + account_maps.insert(0, val); // has to be before other entry since sort order matters + + let result = AccountsHash::de_dup_accounts_from_stores(true, &account_maps[..]); + assert_eq!(result, (vec![], 0)); + } + + #[test] + fn test_accountsdb_cumulative_offsets1_d() { + let input = vec![vec![0, 1], vec![], vec![2, 3, 4], vec![]]; + let cumulative = CumulativeOffsets::from_raw(&input); + + let src: Vec<_> = input.clone().into_iter().flatten().collect(); + let len = src.len(); + assert_eq!(cumulative.total_count, len); + assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors + + const DIMENSION: usize = 0; + assert_eq!(cumulative.cumulative_offsets[0].index[DIMENSION], 0); + assert_eq!(cumulative.cumulative_offsets[1].index[DIMENSION], 2); + + assert_eq!(cumulative.cumulative_offsets[0].start_offset, 0); + assert_eq!(cumulative.cumulative_offsets[1].start_offset, 2); + + for start in 0..len { + let slice = cumulative.get_slice(&input, start); + let len = slice.len(); + assert!(len > 0); + assert_eq!(&src[start..(start + len)], slice); + } + + let input = vec![vec![], vec![0, 1], vec![], vec![2, 3, 4], vec![]]; + let cumulative = CumulativeOffsets::from_raw(&input); + + let src: Vec<_> = input.clone().into_iter().flatten().collect(); + let len = src.len(); + assert_eq!(cumulative.total_count, len); + assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors + + assert_eq!(cumulative.cumulative_offsets[0].index[DIMENSION], 1); + assert_eq!(cumulative.cumulative_offsets[1].index[DIMENSION], 3); + + assert_eq!(cumulative.cumulative_offsets[0].start_offset, 0); + assert_eq!(cumulative.cumulative_offsets[1].start_offset, 2); + + for start in 0..len { + let slice = cumulative.get_slice(&input, start); + let len = slice.len(); + assert!(len > 0); + assert_eq!(&src[start..(start + len)], slice); + } + + let input: Vec> = vec![vec![]]; + let cumulative = CumulativeOffsets::from_raw(&input); + + let src: Vec<_> = input.into_iter().flatten().collect(); + let len = src.len(); + assert_eq!(cumulative.total_count, len); + assert_eq!(cumulative.cumulative_offsets.len(), 0); // 2 non-empty vectors + } + + #[test] + fn test_accountsdb_cumulative_offsets2_d() { + let input = vec![vec![vec![0, 1], vec![], vec![2, 3, 4], vec![]]]; + let cumulative = CumulativeOffsets::from_raw_2d(&input); + + let src: Vec<_> = input + .clone() + .into_iter() + .flatten() + .into_iter() + .flatten() + .collect(); + let len = src.len(); + assert_eq!(cumulative.total_count, len); + assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors + + const DIMENSION_0: usize = 0; + const DIMENSION_1: usize = 1; + assert_eq!(cumulative.cumulative_offsets[0].index[DIMENSION_0], 0); + assert_eq!(cumulative.cumulative_offsets[0].index[DIMENSION_1], 0); + assert_eq!(cumulative.cumulative_offsets[1].index[DIMENSION_0], 0); + assert_eq!(cumulative.cumulative_offsets[1].index[DIMENSION_1], 2); + + assert_eq!(cumulative.cumulative_offsets[0].start_offset, 0); + assert_eq!(cumulative.cumulative_offsets[1].start_offset, 2); + + for start in 0..len { + let slice = cumulative.get_slice_2d(&input, start); + let len = slice.len(); + assert!(len > 0); + assert_eq!(&src[start..(start + len)], slice); + } + + let input = vec![vec![vec![], vec![0, 1], vec![], vec![2, 3, 4], vec![]]]; + let cumulative = CumulativeOffsets::from_raw_2d(&input); + + let src: Vec<_> = input + .clone() + .into_iter() + .flatten() + .into_iter() + .flatten() + .collect(); + let len = src.len(); + assert_eq!(cumulative.total_count, len); + assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors + + assert_eq!(cumulative.cumulative_offsets[0].index[DIMENSION_0], 0); + assert_eq!(cumulative.cumulative_offsets[0].index[DIMENSION_1], 1); + assert_eq!(cumulative.cumulative_offsets[1].index[DIMENSION_0], 0); + assert_eq!(cumulative.cumulative_offsets[1].index[DIMENSION_1], 3); + + assert_eq!(cumulative.cumulative_offsets[0].start_offset, 0); + assert_eq!(cumulative.cumulative_offsets[1].start_offset, 2); + + for start in 0..len { + let slice = cumulative.get_slice_2d(&input, start); + let len = slice.len(); + assert!(len > 0); + assert_eq!(&src[start..(start + len)], slice); + } + + let input: Vec>> = vec![vec![]]; + let cumulative = CumulativeOffsets::from_raw_2d(&input); + + let src: Vec<_> = input.into_iter().flatten().collect(); + let len = src.len(); + assert_eq!(cumulative.total_count, len); + assert_eq!(cumulative.cumulative_offsets.len(), 0); // 2 non-empty vectors + + let input = vec![ + vec![vec![0, 1]], + vec![vec![]], + vec![vec![], vec![2, 3, 4], vec![]], + ]; + let cumulative = CumulativeOffsets::from_raw_2d(&input); + + let src: Vec<_> = input + .clone() + .into_iter() + .flatten() + .into_iter() + .flatten() + .collect(); + let len = src.len(); + assert_eq!(cumulative.total_count, len); + assert_eq!(cumulative.cumulative_offsets.len(), 2); // 2 non-empty vectors + + assert_eq!(cumulative.cumulative_offsets[0].index[DIMENSION_0], 0); + assert_eq!(cumulative.cumulative_offsets[0].index[DIMENSION_1], 0); + assert_eq!(cumulative.cumulative_offsets[1].index[DIMENSION_0], 2); + assert_eq!(cumulative.cumulative_offsets[1].index[DIMENSION_1], 1); + + assert_eq!(cumulative.cumulative_offsets[0].start_offset, 0); + assert_eq!(cumulative.cumulative_offsets[1].start_offset, 2); + + for start in 0..len { + let slice = cumulative.get_slice_2d(&input, start); + let len = slice.len(); + assert!(len > 0); + assert_eq!(&src[start..(start + len)], slice); + } + } + + #[test] + fn test_accountsdb_flatten_hash_intermediate() { + solana_logger::setup(); + let test = vec![vec![vec![CalculateHashIntermediate::new( + 1, + Hash::new_unique(), + 2, + 3, + Pubkey::new_unique(), + )]]]; + let mut stats = HashStats::default(); + let result = AccountsHash::flatten_hash_intermediate(test.clone(), &mut stats); + assert_eq!(result, test[0]); + assert_eq!(stats.unreduced_entries, 1); + + let mut stats = HashStats::default(); + let result = AccountsHash::flatten_hash_intermediate( + vec![vec![vec![CalculateHashIntermediate::default(); 0]]], + &mut stats, + ); + assert_eq!(result.iter().flatten().count(), 0); + assert_eq!(stats.unreduced_entries, 0); + + let test = vec![ + vec![vec![ + CalculateHashIntermediate::new(1, Hash::new_unique(), 2, 3, Pubkey::new_unique()), + CalculateHashIntermediate::new(8, Hash::new_unique(), 9, 10, Pubkey::new_unique()), + ]], + vec![vec![CalculateHashIntermediate::new( + 4, + Hash::new_unique(), + 5, + 6, + Pubkey::new_unique(), + )]], + ]; + let mut stats = HashStats::default(); + let result = AccountsHash::flatten_hash_intermediate(test.clone(), &mut stats); + let expected = test + .into_iter() + .flatten() + .into_iter() + .flatten() + .collect::>(); + assert_eq!(result.into_iter().flatten().collect::>(), expected); + assert_eq!(stats.unreduced_entries, expected.len()); + } + + #[test] + fn test_accountsdb_flatten_hash_intermediate2() { + solana_logger::setup(); + // data is ordered: + // vec: just a level of hierarchy + // vec: 1 vec per PUBKEY_BINS_FOR_CALCULATING_HASHES + // vec: Intermediate data whose pubkey belongs in this division + let binned_data = vec![ + vec![vec![1, 2], vec![3, 4], vec![], vec![5]], + vec![vec![], vec![11, 12]], + ]; + let mut combined: Vec>> = vec![vec![]]; + binned_data.iter().enumerate().for_each(|(bin, v)| { + v.iter() + .enumerate() + .for_each(|(dimension0, v): (usize, &Vec)| { + while combined.len() <= dimension0 { + combined.push(vec![]); + } + let vec: &mut Vec> = &mut combined[dimension0]; + while vec.len() <= bin { + vec.push(vec![]); + } + vec[bin].extend(v.clone()); + }); + }); + + let mut stats = HashStats::default(); + let result = AccountsHash::flatten_hash_intermediate(combined, &mut stats); + assert_eq!( + result, + binned_data + .clone() + .into_iter() + .map(|x| x.into_iter().flatten().collect::>()) + .collect::>() + ); + assert_eq!( + stats.unreduced_entries, + binned_data + .into_iter() + .flatten() + .into_iter() + .flatten() + .count() + ); + + let src = vec![vec![vec![0]]]; + let result = AccountsHash::flatten_hash_intermediate(src, &mut stats); + assert_eq!(result, vec![vec![0]]); + + let src = vec![vec![vec![0], vec![1]]]; + let result = AccountsHash::flatten_hash_intermediate(src, &mut stats); + assert_eq!(result, vec![vec![0], vec![1]]); + + let src = vec![vec![vec![]], vec![vec![], vec![1]]]; + let result = AccountsHash::flatten_hash_intermediate(src, &mut stats); + assert_eq!(result, vec![vec![], vec![1]]); + + let src: Vec>> = vec![vec![vec![], vec![]]]; + let result = AccountsHash::flatten_hash_intermediate(src, &mut stats); + let expected: Vec> = vec![]; + assert_eq!(result, expected); + + let src: Vec>> = vec![vec![vec![], vec![]], vec![vec![], vec![]]]; + let result = AccountsHash::flatten_hash_intermediate(src, &mut stats); + assert_eq!(result, expected); + + let src: Vec>> = vec![vec![vec![], vec![]], vec![vec![]]]; + let result = AccountsHash::flatten_hash_intermediate(src, &mut stats); + assert_eq!(result, expected); + + let src: Vec>> = vec![vec![], vec![vec![]]]; + let result = AccountsHash::flatten_hash_intermediate(src, &mut stats); + let expected: Vec> = vec![]; + assert_eq!(result, expected); + } + + fn test_hashing_larger(hashes: Vec<(Pubkey, Hash)>, fanout: usize) -> Hash { + let result = AccountsHash::compute_merkle_root(hashes.clone(), fanout); + let reduced: Vec<_> = hashes.iter().map(|x| x.1).collect(); + let result2 = test_hashing(reduced, fanout); + assert_eq!(result, result2, "len: {}", hashes.len()); + result + } + + fn test_hashing(hashes: Vec, fanout: usize) -> Hash { + let temp: Vec<_> = hashes.iter().map(|h| (Pubkey::default(), *h)).collect(); + let result = AccountsHash::compute_merkle_root(temp, fanout); + let reduced: Vec<_> = hashes.clone(); + let result2 = + AccountsHash::compute_merkle_root_from_slices(hashes.len(), fanout, None, |start| { + &reduced[start..] + }); + assert_eq!(result, result2, "len: {}", hashes.len()); + + let result2 = + AccountsHash::compute_merkle_root_from_slices(hashes.len(), fanout, Some(1), |start| { + &reduced[start..] + }); + assert_eq!(result, result2, "len: {}", hashes.len()); + + let reduced2: Vec<_> = hashes.iter().map(|x| vec![*x]).collect(); + let result2 = AccountsHash::flatten_hashes_and_hash( + vec![reduced2], + fanout, + &mut HashStats::default(), + ); + assert_eq!(result, result2, "len: {}", hashes.len()); + + let max = std::cmp::min(reduced.len(), fanout * 2); + for left in 0..max { + for right in left + 1..max { + let src = vec![ + vec![reduced[0..left].to_vec(), reduced[left..right].to_vec()], + vec![reduced[right..].to_vec()], + ]; + let result2 = + AccountsHash::flatten_hashes_and_hash(src, fanout, &mut HashStats::default()); + assert_eq!(result, result2); + } + } + result + } + + #[test] + fn test_accountsdb_compute_merkle_root_large() { + solana_logger::setup(); + + // handle fanout^x -1, +0, +1 for a few 'x's + const FANOUT: usize = 3; + let mut hash_counts: Vec<_> = (1..6) + .flat_map(|x| { + let mark = FANOUT.pow(x); + vec![mark - 1, mark, mark + 1] + }) + .collect(); + + // saturate the test space for threshold to threshold + target + // this hits right before we use the 3 deep optimization and all the way through all possible partial last chunks + let target = FANOUT.pow(3); + let threshold = target * FANOUT; + hash_counts.extend(threshold - 1..=threshold + target); + + for hash_count in hash_counts { + let hashes: Vec<_> = (0..hash_count) + .into_iter() + .map(|_| Hash::new_unique()) + .collect(); + + test_hashing(hashes, FANOUT); + } + } + + #[test] + fn test_accountsdb_compute_merkle_root() { + solana_logger::setup(); + + let expected_results = vec![ + (0, 0, "GKot5hBsd81kMupNCXHaqbhv3huEbxAFMLnpcX2hniwn", 0), + (0, 1, "8unXKJYTxrR423HgQxbDmx29mFri1QNrzVKKDxEfc6bj", 0), + (0, 2, "6QfkevXLLqbfAaR1kVjvMLFtEXvNUVrpmkwXqgsYtCFW", 1), + (0, 3, "G3FrJd9JrXcMiqChTSfvEdBL2sCPny3ebiUy9Xxbn7a2", 3), + (0, 4, "G3sZXHhwoCFuNyWy7Efffr47RBW33ibEp7b2hqNDmXdu", 6), + (0, 5, "78atJJYpokAPKMJwHxUW8SBDvPkkSpTBV7GiB27HwosJ", 10), + (0, 6, "7c9SM2BmCRVVXdrEdKcMK91MviPqXqQMd8QAb77tgLEy", 15), + (0, 7, "3hsmnZPhf22UvBLiZ4dVa21Qsdh65CCrtYXsb8MxoVAa", 21), + (0, 8, "5bwXUiC6RCRhb8fqvjvUXT6waU25str3UXA3a6Aq1jux", 28), + (0, 9, "3NNtQKH6PaYpCnFBtyi2icK9eYX3YM5pqA3SKaXtUNzu", 36), + (1, 0, "GKot5hBsd81kMupNCXHaqbhv3huEbxAFMLnpcX2hniwn", 0), + (1, 1, "4GWVCsnEu1iRyxjAB3F7J7C4MMvcoxFWtP9ihvwvDgxY", 0), + (1, 2, "8ML8Te6Uw2mipFr2v9sMZDcziXzhVqJo2qeMJohg1CJx", 1), + (1, 3, "AMEuC3AgqAeRBGBhSfTmuMdfbAiXJnGmKv99kHmcAE1H", 3), + (1, 4, "HEnDuJLHpsQfrApimGrovTqPEF6Vkrx2dKFr3BDtYzWx", 6), + (1, 5, "6rH69iP2yM1o565noZN1EqjySW4PhYUskz3c5tXePUfV", 10), + (1, 6, "7qEQMEXdfSPjbZ3q4cuuZwebDMvTvuaQ3dBiHoDUKo9a", 15), + (1, 7, "GDJz7LSKYjqqz6ujCaaQRJRmQ7TLNCwYJhdT84qT4qwk", 21), + (1, 8, "HT9krPLVTo3rr5WZQBQFrbqWs8SbYScXfnt8EVuobboM", 28), + (1, 9, "8y2pMgqMdRsvqw6BQXm6wtz3qxGPss72i6H6gVpPyeda", 36), + ]; + + let mut expected_index = 0; + let start = 0; + let default_fanout = 2; + // test 0..3 recursions (at fanout = 2) and 1 item remainder. The internals have 1 special case first loop and subsequent loops are the same types. + let iterations = default_fanout * default_fanout * default_fanout + 2; + for pass in 0..2 { + let fanout = if pass == 0 { + default_fanout + } else { + MERKLE_FANOUT + }; + for count in start..iterations { + let mut input: Vec<_> = (0..count) + .map(|i| { + let key = Pubkey::new(&[(pass * iterations + count) as u8; 32]); + let hash = Hash::new(&[(pass * iterations + count + i + 1) as u8; 32]); + (key, hash) + }) + .collect(); + + let result = if pass == 0 { + test_hashing_larger(input.clone(), fanout) + } else { + // this sorts inside + let early_result = AccountsHash::accumulate_account_hashes( + input.iter().map(|i| (i.0, i.1)).collect::>(), + ); + AccountsHash::sort_hashes_by_pubkey(&mut input); + let result = AccountsHash::compute_merkle_root(input.clone(), fanout); + assert_eq!(early_result, result); + result + }; + // compare against captured, expected results for hash (and lamports) + assert_eq!( + ( + pass, + count, + &*(result.to_string()), + expected_results[expected_index].3 + ), // we no longer calculate lamports + expected_results[expected_index] + ); + expected_index += 1; + } + } + } + + #[test] + #[should_panic(expected = "overflow is detected while summing capitalization")] + fn test_accountsdb_lamport_overflow() { + solana_logger::setup(); + + let offset = 2; + let input = vec![ + CalculateHashIntermediate::new( + 0, + Hash::new_unique(), + u64::MAX - offset, + 0, + Pubkey::new_unique(), + ), + CalculateHashIntermediate::new( + 0, + Hash::new_unique(), + offset + 1, + 0, + Pubkey::new_unique(), + ), + ]; + AccountsHash::de_dup_accounts_in_parallel(&input, 1); + } + + #[test] + #[should_panic(expected = "overflow is detected while summing capitalization")] + fn test_accountsdb_lamport_overflow2() { + solana_logger::setup(); + + let offset = 2; + let input = vec![ + vec![CalculateHashIntermediate::new( + 0, + Hash::new_unique(), + u64::MAX - offset, + 0, + Pubkey::new_unique(), + )], + vec![CalculateHashIntermediate::new( + 0, + Hash::new_unique(), + offset + 1, + 0, + Pubkey::new_unique(), + )], + ]; + AccountsHash::de_dup_and_eliminate_zeros(input, &mut HashStats::default()); + } +} diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index 22d6692c67..ad8057f8f6 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -1,3 +1,15 @@ +use crate::{ + contains::Contains, + inline_spl_token_v2_0::{self, SPL_TOKEN_ACCOUNT_MINT_OFFSET, SPL_TOKEN_ACCOUNT_OWNER_OFFSET}, + secondary_index::*, +}; +use dashmap::DashSet; +use ouroboros::self_referencing; +use solana_measure::measure::Measure; +use solana_sdk::{ + clock::Slot, + pubkey::{Pubkey, PUBKEY_BYTES}, +}; use std::{ collections::{ btree_map::{self, BTreeMap}, @@ -14,25 +26,7 @@ use std::{ Arc, RwLock, RwLockReadGuard, RwLockWriteGuard, }, }; - -use dashmap::DashSet; -use log::*; -use ouroboros::self_referencing; - -use { - solana_measure::measure::Measure, - solana_sdk::{ - clock::Slot, - pubkey::{Pubkey, PUBKEY_BYTES}, - }, - velas_account_program::{VAccountInfo, VAccountStorage, VelasAccountType}, -}; - -use crate::{ - contains::Contains, - inline_spl_token_v2_0::{self, SPL_TOKEN_ACCOUNT_MINT_OFFSET, SPL_TOKEN_ACCOUNT_OWNER_OFFSET}, - secondary_index::*, -}; +use velas_account_program::{VAccountInfo, VAccountStorage, VelasAccountType}; pub const ITER_BATCH_SIZE: usize = 1000; @@ -86,6 +80,33 @@ pub enum AccountIndex { VelasAccountOperational, } +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct AccountSecondaryIndexesIncludeExclude { + pub exclude: bool, + pub keys: HashSet, +} + +#[derive(Debug, Default, Clone)] +pub struct AccountSecondaryIndexes { + pub keys: Option, + pub indexes: HashSet, +} + +impl AccountSecondaryIndexes { + pub fn is_empty(&self) -> bool { + self.indexes.is_empty() + } + pub fn contains(&self, index: &AccountIndex) -> bool { + self.indexes.contains(index) + } + pub fn include_key(&self, key: &Pubkey) -> bool { + match &self.keys { + Some(options) => options.exclude ^ options.keys.contains(key), + None => true, // include all keys + } + } +} + #[derive(Debug)] pub struct AccountMapEntryInner { ref_count: AtomicU64, @@ -270,18 +291,52 @@ pub trait ZeroLamport { fn is_zero_lamport(&self) -> bool; } -#[derive(Debug, Default)] +#[derive(Debug)] pub struct AccountsIndex { pub account_maps: RwLock>>, program_id_index: SecondaryIndex, spl_token_mint_index: SecondaryIndex, spl_token_owner_index: SecondaryIndex, - velas_account_storage_index: SecondaryIndex, - velas_account_owner_index: SecondaryIndex, - velas_account_operational_index: SecondaryIndex, roots_tracker: RwLock, ongoing_scan_roots: RwLock>, zero_lamport_pubkeys: DashSet, + // EVM Indices + velas_account_storage_index: SecondaryIndex, + velas_account_owner_index: SecondaryIndex, + velas_account_operational_index: SecondaryIndex, + // TODO: Velas Relying Owner +} + +impl Default for AccountsIndex { + fn default() -> Self { + Self { + account_maps: RwLock::>>::default(), + program_id_index: SecondaryIndex::::new( + "program_id_index_stats", + ), + spl_token_mint_index: SecondaryIndex::::new( + "spl_token_mint_index_stats", + ), + spl_token_owner_index: SecondaryIndex::::new( + "spl_token_owner_index_stats", + ), + roots_tracker: RwLock::::default(), + ongoing_scan_roots: RwLock::>::default(), + zero_lamport_pubkeys: DashSet::::default(), + // + // EVM Indices + velas_account_storage_index: SecondaryIndex::::new( + "velas_account_storage_index", + ), + velas_account_owner_index: SecondaryIndex::::new( + "velas_account_owner_index", + ), + velas_account_operational_index: SecondaryIndex::::new( + "velas_account_operational_index", + ), + // TODO: Velas Relying Owner + } + } } impl AccountsIndex { @@ -323,7 +378,7 @@ impl AccountsIndex { // First we show that for any bank `B` that is a descendant of // the current `max_root`, it must be true that and `B.ancestors.contains(max_root)`, - // regardless of the pattern of `squash()` behavior, `where` `ancestors` is the set + // regardless of the pattern of `squash()` behavior, where `ancestors` is the set // of ancestors that is tracked in each bank. // // Proof: At startup, if starting from a snapshot, generate_index() adds all banks @@ -355,7 +410,7 @@ impl AccountsIndex { // BankForks before the `set_root`. // // This means by the guarantees of `R_descendants` described above, because - // `R_new` is an ancestor of `B`, and `R < R_new < B`, then B.ancestors.contains(R_new)`. + // `R_new` is an ancestor of `B`, and `R < R_new < B`, then `B.ancestors.contains(R_new)`. // // Now until the next `set_root`, any new banks constructed from `new_from_parent` will // also have `max_root == R_new` in their ancestor set, so the claim holds for those descendants @@ -476,7 +531,6 @@ impl AccountsIndex { &va_storage_key, Some(max_root), ), - ScanTypes::Indexed(IndexKey::VelasAccountOwner(va_owner_key)) => self .do_scan_secondary_index( ancestors, @@ -591,7 +645,7 @@ impl AccountsIndex { F: FnMut(&Pubkey, (&T, Slot)), { for pubkey in index.get(index_key) { - // Maybe these reads from the AccountsIndex can be batched everytime it + // Maybe these reads from the AccountsIndex can be batched every time it // grabs the read lock as well... if let Some((list_r, index)) = self.get(&pubkey, Some(ancestors), max_root) { func( @@ -650,7 +704,11 @@ impl AccountsIndex { (w_account_entry.unwrap(), is_newly_inserted) } - pub fn handle_dead_keys(&self, dead_keys: &[&Pubkey], account_indexes: &HashSet) { + pub fn handle_dead_keys( + &self, + dead_keys: &[&Pubkey], + account_indexes: &AccountSecondaryIndexes, + ) { if !dead_keys.is_empty() { for key in dead_keys.iter() { let mut w_index = self.account_maps.write().unwrap(); @@ -658,15 +716,10 @@ impl AccountsIndex { if index_entry.get().slot_list.read().unwrap().is_empty() { index_entry.remove(); - // Note passing `None` to remove all the entries for this key - // is only safe because we have the lock for this key's entry - // in the AccountsIndex, so no other thread is also updating - // the index - self.purge_secondary_indexes_by_inner_key( - key, - None::<&Slot>, - account_indexes, - ); + // Note it's only safe to remove all the entries for this key + // because we have the lock for this key's entry in the AccountsIndex, + // so no other thread is also updating the index + self.purge_secondary_indexes_by_inner_key(key, account_indexes); } } } @@ -752,26 +805,23 @@ impl AccountsIndex { pubkey: &Pubkey, slots_to_purge: &'a C, reclaims: &mut SlotList, - account_indexes: &HashSet, ) -> bool where C: Contains<'a, Slot>, { - let res = { - let mut write_account_map_entry = self.get_account_write_entry(pubkey).unwrap(); - write_account_map_entry.slot_list_mut(|slot_list| { - slot_list.retain(|(slot, item)| { - let should_purge = slots_to_purge.contains(&slot); - if should_purge { - reclaims.push((*slot, item.clone())); - } - !should_purge - }); - slot_list.is_empty() - }) - }; - self.purge_secondary_indexes_by_inner_key(pubkey, Some(slots_to_purge), account_indexes); - res + let mut write_account_map_entry = self.get_account_write_entry(pubkey).unwrap(); + write_account_map_entry.slot_list_mut(|slot_list| { + slot_list.retain(|(slot, item)| { + let should_purge = slots_to_purge.contains(&slot); + if should_purge { + reclaims.push((*slot, item.clone())); + false + } else { + true + } + }); + slot_list.is_empty() + }) } pub fn min_ongoing_scan_root(&self) -> Option { @@ -858,20 +908,21 @@ impl AccountsIndex { fn update_secondary_indexes( &self, pubkey: &Pubkey, - slot: Slot, account_owner: &Pubkey, account_data: &[u8], - account_indexes: &HashSet, + account_indexes: &AccountSecondaryIndexes, ) { if account_indexes.is_empty() { return; } - if account_indexes.contains(&AccountIndex::ProgramId) { - self.program_id_index.insert(account_owner, pubkey, slot); + if account_indexes.contains(&AccountIndex::ProgramId) + && account_indexes.include_key(account_owner) + { + self.program_id_index.insert(account_owner, pubkey); } // Note because of the below check below on the account data length, when an - // account hits zero lamports and is reset to Account::Default, then we skip + // account hits zero lamports and is reset to AccountSharedData::Default, then we skip // the below updates to the secondary indexes. // // Skipping means not updating secondary index to mark the account as missing. @@ -880,7 +931,7 @@ impl AccountsIndex { // removed from the secondary index, the scan function will: // 1) consult the primary index via `get(&pubkey, Some(ancestors), max_root)` // and find the zero-lamport version - // 2) When the fetch from storage occurs, it will return Account::Default + // 2) When the fetch from storage occurs, it will return AccountSharedData::Default // (as persisted tombstone for snapshots). This will then ultimately be // filtered out by post-scan filters, like in `get_filtered_spl_token_accounts_by_owner()`. if *account_owner == inline_spl_token_v2_0::id() @@ -891,7 +942,9 @@ impl AccountsIndex { &account_data[SPL_TOKEN_ACCOUNT_OWNER_OFFSET ..SPL_TOKEN_ACCOUNT_OWNER_OFFSET + PUBKEY_BYTES], ); - self.spl_token_owner_index.insert(&owner_key, pubkey, slot); + if account_indexes.include_key(&owner_key) { + self.spl_token_owner_index.insert(&owner_key, pubkey); + } } if account_indexes.contains(&AccountIndex::SplTokenMint) { @@ -899,7 +952,9 @@ impl AccountsIndex { &account_data[SPL_TOKEN_ACCOUNT_MINT_OFFSET ..SPL_TOKEN_ACCOUNT_MINT_OFFSET + PUBKEY_BYTES], ); - self.spl_token_mint_index.insert(&mint_key, pubkey, slot); + if account_indexes.include_key(&mint_key) { + self.spl_token_mint_index.insert(&mint_key, pubkey); + } } } @@ -907,8 +962,7 @@ impl AccountsIndex { match VelasAccountType::try_from(account_data) { Ok(VelasAccountType::Account(VAccountInfo { ref storage, .. })) => { if account_indexes.contains(&AccountIndex::VelasAccountStorage) { - self.velas_account_storage_index - .insert(storage, pubkey, slot); + self.velas_account_storage_index.insert(storage, pubkey); } } Ok(VelasAccountType::Storage(VAccountStorage { @@ -917,20 +971,17 @@ impl AccountsIndex { })) => { if account_indexes.contains(&AccountIndex::VelasAccountOwner) { for owner in owners { - self.velas_account_owner_index.insert(&owner, pubkey, slot); + self.velas_account_owner_index.insert(&owner, pubkey); } } if account_indexes.contains(&AccountIndex::VelasAccountOperational) { for operational in operationals { - self.velas_account_operational_index.insert( - &operational.pubkey, - pubkey, - slot, - ); + self.velas_account_operational_index + .insert(&operational.pubkey, pubkey); } } } - Err(err) => warn!("Unable to parse Velas Account: {:?}", err), + Err(err) => log::warn!("Unable to parse Velas Account: {:?}", err), } } } @@ -944,7 +995,7 @@ impl AccountsIndex { pubkey: &Pubkey, account_owner: &Pubkey, account_data: &[u8], - account_indexes: &HashSet, + account_indexes: &AccountSecondaryIndexes, account_info: T, reclaims: &mut SlotList, ) { @@ -955,7 +1006,7 @@ impl AccountsIndex { } w_account_entry.update(slot, account_info, reclaims); } - self.update_secondary_indexes(pubkey, slot, account_owner, account_data, account_indexes); + self.update_secondary_indexes(pubkey, account_owner, account_data, account_indexes); } // Updates the given pubkey at the given slot with the new account information. @@ -967,7 +1018,7 @@ impl AccountsIndex { pubkey: &Pubkey, account_owner: &Pubkey, account_data: &[u8], - account_indexes: &HashSet, + account_indexes: &AccountSecondaryIndexes, account_info: T, reclaims: &mut SlotList, ) -> bool { @@ -991,7 +1042,7 @@ impl AccountsIndex { w_account_entry.update(slot, account_info, reclaims); is_newly_inserted }; - self.update_secondary_indexes(pubkey, slot, account_owner, account_data, account_indexes); + self.update_secondary_indexes(pubkey, account_owner, account_data, account_indexes); is_newly_inserted } @@ -1017,40 +1068,32 @@ impl AccountsIndex { } } - fn purge_secondary_indexes_by_inner_key<'a, C>( + fn purge_secondary_indexes_by_inner_key<'a>( &'a self, inner_key: &Pubkey, - slots_to_remove: Option<&'a C>, - account_indexes: &HashSet, - ) where - C: Contains<'a, Slot>, - { + account_indexes: &AccountSecondaryIndexes, + ) { if account_indexes.contains(&AccountIndex::ProgramId) { - self.program_id_index - .remove_by_inner_key(inner_key, slots_to_remove); + self.program_id_index.remove_by_inner_key(inner_key); } if account_indexes.contains(&AccountIndex::SplTokenOwner) { - self.spl_token_owner_index - .remove_by_inner_key(inner_key, slots_to_remove); + self.spl_token_owner_index.remove_by_inner_key(inner_key); } if account_indexes.contains(&AccountIndex::SplTokenMint) { - self.spl_token_mint_index - .remove_by_inner_key(inner_key, slots_to_remove); + self.spl_token_mint_index.remove_by_inner_key(inner_key); } } fn purge_older_root_entries( &self, - pubkey: &Pubkey, list: &mut SlotList, reclaims: &mut SlotList, max_clean_root: Option, - account_indexes: &HashSet, ) { - let roots_traker = &self.roots_tracker.read().unwrap(); - let max_root = Self::get_max_root(&roots_traker.roots, &list, max_clean_root); + let roots_tracker = &self.roots_tracker.read().unwrap(); + let max_root = Self::get_max_root(&roots_tracker.roots, &list, max_clean_root); let mut purged_slots: HashSet = HashSet::new(); list.retain(|(slot, value)| { @@ -1061,8 +1104,6 @@ impl AccountsIndex { } !should_purge }); - - self.purge_secondary_indexes_by_inner_key(pubkey, Some(&purged_slots), account_indexes); } // `is_cached` closure is needed to work around the generic (`T`) indexed type. @@ -1071,17 +1112,10 @@ impl AccountsIndex { pubkey: &Pubkey, reclaims: &mut SlotList, max_clean_root: Option, - account_indexes: &HashSet, ) { if let Some(mut locked_entry) = self.get_account_write_entry(pubkey) { locked_entry.slot_list_mut(|slot_list| { - self.purge_older_root_entries( - pubkey, - slot_list, - reclaims, - max_clean_root, - account_indexes, - ); + self.purge_older_root_entries(slot_list, reclaims, max_clean_root); }); } } @@ -1232,19 +1266,25 @@ pub mod tests { DashMap(&'a SecondaryIndex), } - pub fn spl_token_mint_index_enabled() -> HashSet { + pub fn spl_token_mint_index_enabled() -> AccountSecondaryIndexes { let mut account_indexes = HashSet::new(); account_indexes.insert(AccountIndex::SplTokenMint); - account_indexes + AccountSecondaryIndexes { + indexes: account_indexes, + keys: None, + } } - pub fn spl_token_owner_index_enabled() -> HashSet { + pub fn spl_token_owner_index_enabled() -> AccountSecondaryIndexes { let mut account_indexes = HashSet::new(); account_indexes.insert(AccountIndex::SplTokenOwner); - account_indexes + AccountSecondaryIndexes { + indexes: account_indexes, + keys: None, + } } - fn create_dashmap_secondary_index_state() -> (usize, usize, HashSet) { + fn create_dashmap_secondary_index_state() -> (usize, usize, AccountSecondaryIndexes) { { // Check that we're actually testing the correct variant let index = AccountsIndex::::default(); @@ -1254,7 +1294,7 @@ pub mod tests { (0, PUBKEY_BYTES, spl_token_mint_index_enabled()) } - fn create_rwlock_secondary_index_state() -> (usize, usize, HashSet) { + fn create_rwlock_secondary_index_state() -> (usize, usize, AccountSecondaryIndexes) { { // Check that we're actually testing the correct variant let index = AccountsIndex::::default(); @@ -1281,6 +1321,51 @@ pub mod tests { assert_eq!(num, 0); } + #[test] + fn test_secondary_index_include_exclude() { + let pk1 = Pubkey::new_unique(); + let pk2 = Pubkey::new_unique(); + let mut index = AccountSecondaryIndexes::default(); + + assert!(!index.contains(&AccountIndex::ProgramId)); + index.indexes.insert(AccountIndex::ProgramId); + assert!(index.contains(&AccountIndex::ProgramId)); + assert!(index.include_key(&pk1)); + assert!(index.include_key(&pk2)); + + let exclude = false; + index.keys = Some(AccountSecondaryIndexesIncludeExclude { + keys: [pk1].iter().cloned().collect::>(), + exclude, + }); + assert!(index.include_key(&pk1)); + assert!(!index.include_key(&pk2)); + + let exclude = true; + index.keys = Some(AccountSecondaryIndexesIncludeExclude { + keys: [pk1].iter().cloned().collect::>(), + exclude, + }); + assert!(!index.include_key(&pk1)); + assert!(index.include_key(&pk2)); + + let exclude = true; + index.keys = Some(AccountSecondaryIndexesIncludeExclude { + keys: [pk1, pk2].iter().cloned().collect::>(), + exclude, + }); + assert!(!index.include_key(&pk1)); + assert!(!index.include_key(&pk2)); + + let exclude = false; + index.keys = Some(AccountSecondaryIndexesIncludeExclude { + keys: [pk1, pk2].iter().cloned().collect::>(), + exclude, + }); + assert!(index.include_key(&pk1)); + assert!(index.include_key(&pk2)); + } + #[test] fn test_insert_no_ancestors() { let key = Keypair::new(); @@ -1291,7 +1376,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), true, &mut gc, ); @@ -1316,7 +1401,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), true, &mut gc, ); @@ -1340,7 +1425,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), true, &mut gc, ); @@ -1373,7 +1458,7 @@ pub mod tests { &new_pubkey, &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), true, &mut vec![], ); @@ -1389,7 +1474,7 @@ pub mod tests { &Pubkey::default(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), true, &mut vec![], ); @@ -1520,7 +1605,7 @@ pub mod tests { &solana_sdk::pubkey::new_rand(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), true, &mut gc, ); @@ -1545,7 +1630,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), true, &mut gc, ); @@ -1659,7 +1744,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), true, &mut gc, ); @@ -1674,7 +1759,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), false, &mut gc, ); @@ -1695,7 +1780,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), true, &mut gc, ); @@ -1705,7 +1790,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), false, &mut gc, ); @@ -1727,7 +1812,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), true, &mut gc, ); @@ -1737,7 +1822,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), false, &mut gc, ); @@ -1746,7 +1831,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), true, &mut gc, ); @@ -1755,7 +1840,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), true, &mut gc, ); @@ -1767,7 +1852,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), true, &mut gc, ); @@ -1801,7 +1886,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), 12, &mut gc )); @@ -1811,7 +1896,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), 10, &mut gc )); @@ -1828,7 +1913,7 @@ pub mod tests { &key.pubkey(), &Pubkey::default(), &[], - &HashSet::new(), + &AccountSecondaryIndexes::default(), 9, &mut gc )); @@ -1884,7 +1969,7 @@ pub mod tests { secondary_index: &SecondaryIndex, key_start: usize, key_end: usize, - account_index: &HashSet, + secondary_indexes: &AccountSecondaryIndexes, ) { // No roots, should be no reclaims let slots = vec![1, 2, 5, 9]; @@ -1892,7 +1977,7 @@ pub mod tests { let account_key = Pubkey::new_unique(); let mut account_data = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()]; - account_data[key_start..key_end].clone_from_slice(&(index_key.clone().to_bytes())); + account_data[key_start..key_end].clone_from_slice(&(index_key.to_bytes())); // Insert slots into secondary index for slot in &slots { @@ -1902,7 +1987,7 @@ pub mod tests { // Make sure these accounts are added to secondary index &inline_spl_token_v2_0::id(), &account_data, - account_index, + secondary_indexes, true, &mut vec![], ); @@ -1922,43 +2007,43 @@ pub mod tests { .read() .unwrap() .len(), - slots.len() + 1 ); index.purge_exact( &account_key, &slots.into_iter().collect::>(), &mut vec![], - account_index, ); + index.handle_dead_keys(&[&account_key], secondary_indexes); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); } #[test] fn test_purge_exact_dashmap_secondary_index() { - let (key_start, key_end, account_index) = create_dashmap_secondary_index_state(); + let (key_start, key_end, secondary_indexes) = create_dashmap_secondary_index_state(); let index = AccountsIndex::::default(); run_test_purge_exact_secondary_index( &index, &index.spl_token_mint_index, key_start, key_end, - &account_index, + &secondary_indexes, ); } #[test] fn test_purge_exact_rwlock_secondary_index() { - let (key_start, key_end, account_index) = create_rwlock_secondary_index_state(); + let (key_start, key_end, secondary_indexes) = create_rwlock_secondary_index_state(); let index = AccountsIndex::::default(); run_test_purge_exact_secondary_index( &index, &index.spl_token_owner_index, key_start, key_end, - &account_index, + &secondary_indexes, ); } @@ -1968,13 +2053,7 @@ pub mod tests { let index = AccountsIndex::::default(); let mut slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; let mut reclaims = vec![]; - index.purge_older_root_entries( - &Pubkey::default(), - &mut slot_list, - &mut reclaims, - None, - &HashSet::new(), - ); + index.purge_older_root_entries(&mut slot_list, &mut reclaims, None); assert!(reclaims.is_empty()); assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]); @@ -1984,13 +2063,7 @@ pub mod tests { // Note 2 is not a root index.add_root(5, false); reclaims = vec![]; - index.purge_older_root_entries( - &Pubkey::default(), - &mut slot_list, - &mut reclaims, - None, - &HashSet::new(), - ); + index.purge_older_root_entries(&mut slot_list, &mut reclaims, None); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); @@ -1998,13 +2071,7 @@ pub mod tests { slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; index.add_root(6, false); reclaims = vec![]; - index.purge_older_root_entries( - &Pubkey::default(), - &mut slot_list, - &mut reclaims, - None, - &HashSet::new(), - ); + index.purge_older_root_entries(&mut slot_list, &mut reclaims, None); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); @@ -2012,26 +2079,14 @@ pub mod tests { // outcome slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; - index.purge_older_root_entries( - &Pubkey::default(), - &mut slot_list, - &mut reclaims, - Some(6), - &HashSet::new(), - ); + index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(6)); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); // Pass a max root, earlier slots should be reclaimed slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; - index.purge_older_root_entries( - &Pubkey::default(), - &mut slot_list, - &mut reclaims, - Some(5), - &HashSet::new(), - ); + index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(5)); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); @@ -2039,13 +2094,7 @@ pub mod tests { // so nothing will be purged slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; - index.purge_older_root_entries( - &Pubkey::default(), - &mut slot_list, - &mut reclaims, - Some(2), - &HashSet::new(), - ); + index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(2)); assert!(reclaims.is_empty()); assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]); @@ -2053,13 +2102,7 @@ pub mod tests { // so nothing will be purged slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; - index.purge_older_root_entries( - &Pubkey::default(), - &mut slot_list, - &mut reclaims, - Some(1), - &HashSet::new(), - ); + index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(1)); assert!(reclaims.is_empty()); assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]); @@ -2067,41 +2110,32 @@ pub mod tests { // some of the roots in the list, shouldn't return those smaller roots slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; - index.purge_older_root_entries( - &Pubkey::default(), - &mut slot_list, - &mut reclaims, - Some(7), - &HashSet::new(), - ); + index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(7)); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); } - fn check_secondary_index_unique( + fn check_secondary_index_mapping_correct( secondary_index: &SecondaryIndex, - slot: Slot, - key: &Pubkey, + secondary_index_keys: &[Pubkey], account_key: &Pubkey, ) where SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send, { // Check secondary index has unique mapping from secondary index key // to the account key and slot - assert_eq!(secondary_index.index.len(), 1); - let inner_key_map = secondary_index.index.get(key).unwrap(); - assert_eq!(inner_key_map.len(), 1); - inner_key_map - .value() - .get(account_key, &|slots_map: Option<&RwLock>>| { - let slots_map = slots_map.unwrap(); - assert_eq!(slots_map.read().unwrap().len(), 1); - assert!(slots_map.read().unwrap().contains(&slot)); - }); - - // Check reverse index is unique - let slots_map = secondary_index.reverse_index.get(account_key).unwrap(); - assert_eq!(slots_map.value().read().unwrap().get(&slot).unwrap(), key); + for secondary_index_key in secondary_index_keys { + assert_eq!(secondary_index.index.len(), secondary_index_keys.len()); + let account_key_map = secondary_index.get(secondary_index_key); + assert_eq!(account_key_map.len(), 1); + assert_eq!(account_key_map, vec![*account_key]); + } + // Check reverse index contains all of the `secondary_index_keys` + let secondary_index_key_map = secondary_index.reverse_index.get(account_key).unwrap(); + assert_eq!( + &*secondary_index_key_map.value().read().unwrap(), + secondary_index_keys + ); } fn run_test_secondary_indexes< @@ -2111,13 +2145,13 @@ pub mod tests { secondary_index: &SecondaryIndex, key_start: usize, key_end: usize, - account_index: &HashSet, + secondary_indexes: &AccountSecondaryIndexes, ) { + let mut secondary_indexes = secondary_indexes.clone(); let account_key = Pubkey::new_unique(); let index_key = Pubkey::new_unique(); - let slot = 1; let mut account_data = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()]; - account_data[key_start..key_end].clone_from_slice(&(index_key.clone().to_bytes())); + account_data[key_start..key_end].clone_from_slice(&(index_key.to_bytes())); // Wrong program id index.upsert( @@ -2125,7 +2159,7 @@ pub mod tests { &account_key, &Pubkey::default(), &account_data, - account_index, + &secondary_indexes, true, &mut vec![], ); @@ -2138,59 +2172,99 @@ pub mod tests { &account_key, &inline_spl_token_v2_0::id(), &account_data[1..], - account_index, + &secondary_indexes, true, &mut vec![], ); - assert!(index.spl_token_mint_index.index.is_empty()); - assert!(index.spl_token_mint_index.reverse_index.is_empty()); + assert!(secondary_index.index.is_empty()); + assert!(secondary_index.reverse_index.is_empty()); + + secondary_indexes.keys = None; // Just right. Inserting the same index multiple times should be ok for _ in 0..2 { index.update_secondary_indexes( &account_key, - slot, &inline_spl_token_v2_0::id(), &account_data, - account_index, + &secondary_indexes, ); - check_secondary_index_unique(secondary_index, slot, &index_key, &account_key); + check_secondary_index_mapping_correct(secondary_index, &[index_key], &account_key); } + // included + assert!(!secondary_index.index.is_empty()); + assert!(!secondary_index.reverse_index.is_empty()); + + secondary_indexes.keys = Some(AccountSecondaryIndexesIncludeExclude { + keys: [index_key].iter().cloned().collect::>(), + exclude: false, + }); + secondary_index.index.clear(); + secondary_index.reverse_index.clear(); + index.update_secondary_indexes( + &account_key, + &inline_spl_token_v2_0::id(), + &account_data, + &secondary_indexes, + ); + assert!(!secondary_index.index.is_empty()); + assert!(!secondary_index.reverse_index.is_empty()); + check_secondary_index_mapping_correct(secondary_index, &[index_key], &account_key); + + // not-excluded + secondary_indexes.keys = Some(AccountSecondaryIndexesIncludeExclude { + keys: [].iter().cloned().collect::>(), + exclude: true, + }); + secondary_index.index.clear(); + secondary_index.reverse_index.clear(); + index.update_secondary_indexes( + &account_key, + &inline_spl_token_v2_0::id(), + &account_data, + &secondary_indexes, + ); + assert!(!secondary_index.index.is_empty()); + assert!(!secondary_index.reverse_index.is_empty()); + check_secondary_index_mapping_correct(secondary_index, &[index_key], &account_key); + + secondary_indexes.keys = None; + index .get_account_write_entry(&account_key) .unwrap() .slot_list_mut(|slot_list| slot_list.clear()); // Everything should be deleted - index.handle_dead_keys(&[&account_key], account_index); - assert!(index.spl_token_mint_index.index.is_empty()); - assert!(index.spl_token_mint_index.reverse_index.is_empty()); + index.handle_dead_keys(&[&account_key], &secondary_indexes); + assert!(secondary_index.index.is_empty()); + assert!(secondary_index.reverse_index.is_empty()); } #[test] fn test_dashmap_secondary_index() { - let (key_start, key_end, account_index) = create_dashmap_secondary_index_state(); + let (key_start, key_end, secondary_indexes) = create_dashmap_secondary_index_state(); let index = AccountsIndex::::default(); run_test_secondary_indexes( &index, &index.spl_token_mint_index, key_start, key_end, - &account_index, + &secondary_indexes, ); } #[test] fn test_rwlock_secondary_index() { - let (key_start, key_end, account_index) = create_rwlock_secondary_index_state(); + let (key_start, key_end, secondary_indexes) = create_rwlock_secondary_index_state(); let index = AccountsIndex::::default(); run_test_secondary_indexes( &index, &index.spl_token_owner_index, key_start, key_end, - &account_index, + &secondary_indexes, ); } @@ -2201,7 +2275,7 @@ pub mod tests { secondary_index: &SecondaryIndex, index_key_start: usize, index_key_end: usize, - account_index: &HashSet, + secondary_indexes: &AccountSecondaryIndexes, ) { let account_key = Pubkey::new_unique(); let secondary_key1 = Pubkey::new_unique(); @@ -2209,10 +2283,10 @@ pub mod tests { let slot = 1; let mut account_data1 = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()]; account_data1[index_key_start..index_key_end] - .clone_from_slice(&(secondary_key1.clone().to_bytes())); + .clone_from_slice(&(secondary_key1.to_bytes())); let mut account_data2 = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()]; account_data2[index_key_start..index_key_end] - .clone_from_slice(&(secondary_key2.clone().to_bytes())); + .clone_from_slice(&(secondary_key2.to_bytes())); // First write one mint index index.upsert( @@ -2220,61 +2294,66 @@ pub mod tests { &account_key, &inline_spl_token_v2_0::id(), &account_data1, - account_index, + secondary_indexes, true, &mut vec![], ); - // Now write a different mint index + // Now write a different mint index for the same account index.upsert( slot, &account_key, &inline_spl_token_v2_0::id(), &account_data2, - account_index, + secondary_indexes, true, &mut vec![], ); - // Check correctness - check_secondary_index_unique(&secondary_index, slot, &secondary_key2, &account_key); - assert!(secondary_index.get(&secondary_key1).is_empty()); - assert_eq!(secondary_index.get(&secondary_key2), vec![account_key]); + // Both pubkeys will now be present in the index + check_secondary_index_mapping_correct( + &secondary_index, + &[secondary_key1, secondary_key2], + &account_key, + ); - // If another fork reintroduces secondary_key1, then it should be re-added to the - // index - let fork = slot + 1; + // If a later slot also introduces secondary_key1, then it should still exist in the index + let later_slot = slot + 1; index.upsert( - fork, + later_slot, &account_key, &inline_spl_token_v2_0::id(), &account_data1, - account_index, + secondary_indexes, true, &mut vec![], ); assert_eq!(secondary_index.get(&secondary_key1), vec![account_key]); - // If we set a root at fork, and clean, then the secondary_key1 should no longer - // be findable - index.add_root(fork, false); + // If we set a root at `later_slot`, and clean, then even though the account with secondary_key1 + // was outdated by the update in the later slot, the primary account key is still alive, + // so both secondary keys will still be kept alive. + index.add_root(later_slot, false); index .get_account_write_entry(&account_key) .unwrap() .slot_list_mut(|slot_list| { - index.purge_older_root_entries( - &account_key, - slot_list, - &mut vec![], - None, - account_index, - ) + index.purge_older_root_entries(slot_list, &mut vec![], None) }); - assert!(secondary_index.get(&secondary_key2).is_empty()); - assert_eq!(secondary_index.get(&secondary_key1), vec![account_key]); - // Check correctness - check_secondary_index_unique(secondary_index, fork, &secondary_key1, &account_key); + check_secondary_index_mapping_correct( + secondary_index, + &[secondary_key1, secondary_key2], + &account_key, + ); + + // Removing the remaining entry for this pubkey in the index should mark the + // pubkey as dead and finally remove all the secondary indexes + let mut reclaims = vec![]; + index.purge_exact(&account_key, &later_slot, &mut reclaims); + index.handle_dead_keys(&[&account_key], secondary_indexes); + assert!(secondary_index.index.is_empty()); + assert!(secondary_index.reverse_index.is_empty()); } #[test] diff --git a/runtime/src/append_vec.rs b/runtime/src/append_vec.rs index 02aade79cc..31b51f3eff 100644 --- a/runtime/src/append_vec.rs +++ b/runtime/src/append_vec.rs @@ -1,8 +1,11 @@ +//! Persistent storage for accounts. For more information, see: +//! https://docs.solana.com/implemented-proposals/persistent-account-storage + use log::*; use memmap2::MmapMut; use serde::{Deserialize, Serialize}; use solana_sdk::{ - account::Account, + account::{Account, AccountSharedData, ReadableAccount}, clock::{Epoch, Slot}, hash::Hash, pubkey::Pubkey, @@ -17,8 +20,8 @@ use std::{ sync::Mutex, }; -//Data placement should be aligned at the next boundary. Without alignment accessing the memory may -//crash on some architectures. +// Data placement should be aligned at the next boundary. Without alignment accessing the memory may +// crash on some architectures. const ALIGN_BOUNDARY_OFFSET: usize = mem::size_of::(); macro_rules! u64_align { ($addr: expr) => { @@ -54,8 +57,8 @@ pub struct AccountMeta { pub rent_epoch: Epoch, } -impl<'a> From<&'a Account> for AccountMeta { - fn from(account: &'a Account) -> Self { +impl<'a> From<&'a AccountSharedData> for AccountMeta { + fn from(account: &'a AccountSharedData) -> Self { Self { lamports: account.lamports, owner: account.owner, @@ -65,8 +68,8 @@ impl<'a> From<&'a Account> for AccountMeta { } } -/// References to Memory Mapped memory -/// The Account is stored separately from its data, so getting the actual account requires a clone +/// References to account data stored elsewhere. Getting an `Account` requires cloning +/// (see `StoredAccountMeta::clone_account()`). #[derive(PartialEq, Debug)] pub struct StoredAccountMeta<'a> { pub meta: &'a StoredMeta, @@ -79,14 +82,15 @@ pub struct StoredAccountMeta<'a> { } impl<'a> StoredAccountMeta<'a> { - pub fn clone_account(&self) -> Account { - Account { + /// Return a new Account by copying all the data referenced by the `StoredAccountMeta`. + pub fn clone_account(&self) -> AccountSharedData { + AccountSharedData::from(Account { lamports: self.account_meta.lamports, owner: self.account_meta.owner, executable: self.account_meta.executable, rent_epoch: self.account_meta.rent_epoch, data: self.data.to_vec(), - } + }) } fn sanitize(&self) -> bool { @@ -99,8 +103,8 @@ impl<'a> StoredAccountMeta<'a> { } fn sanitize_lamports(&self) -> bool { - // Sanitize 0 lamports to ensure to be same as Account::default() - self.account_meta.lamports != 0 || self.clone_account() == Account::default() + // Sanitize 0 lamports to ensure to be same as AccountSharedData::default() + self.account_meta.lamports != 0 || self.clone_account() == AccountSharedData::default() } fn ref_executable_byte(&self) -> &u8 { @@ -113,16 +117,28 @@ impl<'a> StoredAccountMeta<'a> { } } +/// A thread-safe, file-backed block of memory used to store `Account` instances. Append operations +/// are serialized such that only one thread updates the internal `append_lock` at a time. No +/// restrictions are placed on reading. That is, one may read items from one thread while another +/// is appending new items. #[derive(Debug, AbiExample)] -#[allow(clippy::mutex_atomic)] pub struct AppendVec { + /// The file path where the data is stored. path: PathBuf, + + /// A file-backed block of memory that is used to store the data for each appended item. map: MmapMut, - // This mutex forces append to be single threaded, but concurrent with reads - #[allow(clippy::mutex_atomic)] - append_offset: Mutex, + + /// A lock used to serialize append operations. + append_lock: Mutex<()>, + + /// The number of bytes used to store items, not the number of items. current_len: AtomicUsize, + + /// The number of bytes available for storing items. file_size: u64, + + /// True if the file should automatically be deleted when this AppendVec is dropped. remove_on_drop: bool, } @@ -132,7 +148,7 @@ impl Drop for AppendVec { if let Err(_e) = remove_file(&self.path) { // promote this to panic soon. // disabled due to many false positive warnings while running tests. - // blocked by rpc's updrade to jsonrpc v17 + // blocked by rpc's upgrade to jsonrpc v17 //error!("AppendVec failed to remove {:?}: {:?}", &self.path, e); } } @@ -140,7 +156,6 @@ impl Drop for AppendVec { } impl AppendVec { - #[allow(clippy::mutex_atomic)] pub fn new(file: &Path, create: bool, size: usize) -> Self { let initial_len = 0; AppendVec::sanitize_len_and_size(initial_len, size).unwrap(); @@ -165,10 +180,14 @@ impl AppendVec { }) .unwrap(); + // Theoretical performance optimization: write a zero to the end of + // the file so that we won't have to resize it later, which may be + // expensive. data.seek(SeekFrom::Start((size - 1) as u64)).unwrap(); data.write_all(&[0]).unwrap(); data.seek(SeekFrom::Start(0)).unwrap(); data.flush().unwrap(); + //UNSAFE: Required to create a Mmap let map = unsafe { MmapMut::map_mut(&data) }; let map = map.unwrap_or_else(|e| { @@ -185,7 +204,7 @@ impl AppendVec { map, // This mutex forces append to be single threaded, but concurrent with reads // See UNSAFE usage in `append_ptr` - append_offset: Mutex::new(initial_len), + append_lock: Mutex::new(()), current_len: AtomicUsize::new(initial_len), file_size: size as u64, remove_on_drop: true, @@ -196,7 +215,6 @@ impl AppendVec { self.remove_on_drop = false; } - #[allow(clippy::mutex_atomic)] pub fn new_empty_map(current_len: usize) -> Self { let map = MmapMut::map_anon(1).unwrap_or_else(|e| { error!( @@ -210,7 +228,7 @@ impl AppendVec { AppendVec { path: PathBuf::from(String::default()), map, - append_offset: Mutex::new(current_len), + append_lock: Mutex::new(()), current_len: AtomicUsize::new(current_len), file_size: 0, // will be filled by set_file() remove_on_drop: true, @@ -242,13 +260,11 @@ impl AppendVec { self.map.flush() } - #[allow(clippy::mutex_atomic)] pub fn reset(&self) { // This mutex forces append to be single threaded, but concurrent with reads // See UNSAFE usage in `append_ptr` - let mut offset = self.append_offset.lock().unwrap(); + let _lock = self.append_lock.lock().unwrap(); self.current_len.store(0, Ordering::Relaxed); - *offset = 0; } pub fn len(&self) -> usize { @@ -263,16 +279,10 @@ impl AppendVec { self.file_size } - // Get the file path relative to the top level accounts directory - pub fn get_relative_path>(append_vec_path: P) -> Option { - append_vec_path.as_ref().file_name().map(PathBuf::from) - } - - pub fn new_relative_path(slot: Slot, id: usize) -> PathBuf { - PathBuf::from(&format!("{}.{}", slot, id)) + pub fn file_name(slot: Slot, id: usize) -> String { + format!("{}.{}", slot, id) } - #[allow(clippy::mutex_atomic)] pub fn new_from_file>(path: P, current_len: usize) -> io::Result<(Self, usize)> { let data = OpenOptions::new() .read(true) @@ -288,7 +298,7 @@ impl AppendVec { let new = AppendVec { path: path.as_ref().to_path_buf(), map, - append_offset: Mutex::new(current_len), + append_lock: Mutex::new(()), current_len: AtomicUsize::new(current_len), file_size, remove_on_drop: true, @@ -326,6 +336,10 @@ impl AppendVec { (offset == aligned_current_len, num_accounts) } + /// Get a reference to the data at `offset` of `size` bytes if that slice + /// doesn't overrun the internal buffer. Otherwise return None. + /// Also return the offset of the first byte after the requested data that + /// falls on a 64-byte boundary. fn get_slice(&self, offset: usize, size: usize) -> Option<(&[u8], usize)> { let (next, overflow) = offset.overflowing_add(size); if overflow || next > self.len() { @@ -342,11 +356,13 @@ impl AppendVec { )) } + /// Copy `len` bytes from `src` to the first 64-byte boundary after position `offset` of + /// the internal buffer. Then update `offset` to the first byte after the copied data. fn append_ptr(&self, offset: &mut usize, src: *const u8, len: usize) { let pos = u64_align!(*offset); let data = &self.map[pos..(pos + len)]; //UNSAFE: This mut append is safe because only 1 thread can append at a time - //Mutex guarantees exclusive write access to the memory occupied in + //Mutex<()> guarantees exclusive write access to the memory occupied in //the range. unsafe { let dst = data.as_ptr() as *mut u8; @@ -355,6 +371,10 @@ impl AppendVec { *offset = pos + len; } + /// Copy each value in `vals`, in order, to the first 64-byte boundary after position `offset`. + /// If there is sufficient space, then update `offset` and the internal `current_len` to the + /// first byte after the copied data and return the starting position of the copied data. + /// Otherwise return None and leave `offset` unchanged. fn append_ptrs_locked(&self, offset: &mut usize, vals: &[(*const u8, usize)]) -> Option { let mut end = *offset; for val in vals { @@ -374,14 +394,20 @@ impl AppendVec { Some(pos) } + /// Return a reference to the type at `offset` if its data doesn't overrun the internal buffer. + /// Otherwise return None. Also return the offset of the first byte after the requested data + /// that falls on a 64-byte boundary. fn get_type<'a, T>(&self, offset: usize) -> Option<(&'a T, usize)> { let (data, next) = self.get_slice(offset, mem::size_of::())?; let ptr: *const T = data.as_ptr() as *const T; //UNSAFE: The cast is safe because the slice is aligned and fits into the memory - //and the lifetime of he &T is tied to self, which holds the underlying memory map + //and the lifetime of the &T is tied to self, which holds the underlying memory map Some((unsafe { &*ptr }, next)) } + /// Return account metadata for the account at `offset` if its data doesn't overrun + /// the internal buffer. Otherwise return None. Also return the offset of the first byte + /// after the requested data that falls on a 64-byte boundary. pub fn get_account<'a>(&'a self, offset: usize) -> Option<(StoredAccountMeta<'a>, usize)> { let (meta, next): (&'a StoredMeta, _) = self.get_type(offset)?; let (account_meta, next): (&'a AccountMeta, _) = self.get_type(next)?; @@ -400,7 +426,7 @@ impl AppendVec { next, )) } - pub fn get_account_test(&self, offset: usize) -> Option<(StoredMeta, Account)> { + pub fn get_account_test(&self, offset: usize) -> Option<(StoredMeta, AccountSharedData)> { let (stored_account, _) = self.get_account(offset)?; let meta = stored_account.meta.clone(); Some((meta, stored_account.clone_account())) @@ -410,29 +436,34 @@ impl AppendVec { self.path.clone() } - pub fn accounts(&self, mut start: usize) -> Vec { + /// Return account metadata for each account, starting from `offset`. + pub fn accounts(&self, mut offset: usize) -> Vec { let mut accounts = vec![]; - while let Some((account, next)) = self.get_account(start) { + while let Some((account, next)) = self.get_account(offset) { accounts.push(account); - start = next; + offset = next; } accounts } - #[allow(clippy::mutex_atomic)] + /// Copy each account metadata, account and hash to the internal buffer. + /// Return the starting offset of each account metadata. + /// After each account is appended, the internal `current_len` is updated + /// and will be available to other threads. pub fn append_accounts( &self, - accounts: &[(StoredMeta, &Account)], + accounts: &[(StoredMeta, &AccountSharedData)], hashes: &[Hash], ) -> Vec { - let mut offset = self.append_offset.lock().unwrap(); + let _lock = self.append_lock.lock().unwrap(); + let mut offset = self.len(); let mut rv = Vec::with_capacity(accounts.len()); for ((stored_meta, account), hash) in accounts.iter().zip(hashes) { let meta_ptr = stored_meta as *const StoredMeta; let account_meta = AccountMeta::from(*account); let account_meta_ptr = &account_meta as *const AccountMeta; let data_len = stored_meta.data_len as usize; - let data_ptr = account.data.as_ptr(); + let data_ptr = account.data().as_ptr(); let hash_ptr = hash.as_ref().as_ptr(); let ptrs = [ (meta_ptr as *const u8, mem::size_of::()), @@ -449,15 +480,18 @@ impl AppendVec { // The last entry in this offset needs to be the u64 aligned offset, because that's // where the *next* entry will begin to be stored. - rv.push(u64_align!(*offset)); + rv.push(u64_align!(offset)); rv } + /// Copy the account metadata, account and hash to the internal buffer. + /// Return the starting offset of the account metadata. + /// After the account is appended, the internal `current_len` is updated. pub fn append_account( &self, storage_meta: StoredMeta, - account: &Account, + account: &AccountSharedData, hash: Hash, ) -> Option { let res = self.append_accounts(&[(storage_meta, account)], &[hash]); @@ -473,7 +507,7 @@ pub mod test_utils { use super::StoredMeta; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; - use solana_sdk::account::Account; + use solana_sdk::account::AccountSharedData; use solana_sdk::pubkey::Pubkey; use std::fs::create_dir_all; use std::path::PathBuf; @@ -504,10 +538,10 @@ pub mod test_utils { TempFile { path: buf } } - pub fn create_test_account(sample: usize) -> (StoredMeta, Account) { + pub fn create_test_account(sample: usize) -> (StoredMeta, AccountSharedData) { let data_len = sample % 256; - let mut account = Account::new(sample as u64, 0, &Pubkey::default()); - account.data = (0..data_len).map(|_| data_len as u8).collect(); + let mut account = AccountSharedData::new(sample as u64, 0, &Pubkey::default()); + account.set_data((0..data_len).map(|_| data_len as u8).collect()); let stored_meta = StoredMeta { write_version: 0, pubkey: Pubkey::default(), @@ -527,7 +561,7 @@ pub mod tests { use std::time::Instant; impl AppendVec { - fn append_account_test(&self, data: &(StoredMeta, Account)) -> Option { + fn append_account_test(&self, data: &(StoredMeta, AccountSharedData)) -> Option { self.append_account(data.0.clone(), &data.1, Hash::default()) } } @@ -681,16 +715,6 @@ pub mod tests { ); } - #[test] - fn test_relative_path() { - let relative_path = AppendVec::new_relative_path(0, 2); - let full_path = Path::new("/tmp").join(&relative_path); - assert_eq!( - relative_path, - AppendVec::get_relative_path(full_path).unwrap() - ); - } - #[test] fn test_new_from_file_crafted_zero_lamport_account() { let file = get_append_vec_path("test_append"); @@ -701,7 +725,7 @@ pub mod tests { let pubkey = solana_sdk::pubkey::new_rand(); let owner = Pubkey::default(); let data_len = 3_u64; - let mut account = Account::new(0, data_len as usize, &owner); + let mut account = AccountSharedData::new(0, data_len as usize, &owner); account.data = b"abc".to_vec(); let stored_meta = StoredMeta { write_version: 0, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f73c6b64ee..79c5b10935 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -8,7 +8,7 @@ use crate::{ TransactionLoadResult, TransactionLoaders, }, accounts_db::{ErrorCounters, SnapshotStorages}, - accounts_index::{AccountIndex, Ancestors, IndexKey}, + accounts_index::{AccountSecondaryIndexes, Ancestors, IndexKey}, blockhash_queue::{BlockHashEvm, BlockhashQueue}, builtins::{self, ActivationType}, epoch_stakes::{EpochStakes, NodeVoteAccounts}, @@ -16,7 +16,7 @@ use crate::{ inline_spl_token_v2_0, instruction_recorder::InstructionRecorder, log_collector::LogCollector, - message_processor::{Executors, MessageProcessor}, + message_processor::{ExecuteDetailsTimings, Executors, MessageProcessor}, rent_collector::RentCollector, stakes::Stakes, status_cache::{SlotDelta, StatusCache}, @@ -34,8 +34,8 @@ use solana_measure::measure::Measure; use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_info}; use solana_sdk::{ account::{ - create_account_with_fields as create_account, from_account, Account, - InheritableAccountFields, + create_account_shared_data_with_fields as create_account, from_account, Account, + AccountSharedData, InheritableAccountFields, ReadableAccount, }, clock::{ Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, @@ -100,12 +100,34 @@ pub const SECONDS_PER_YEAR: f64 = 365.25 * 24.0 * 60.0 * 60.0; pub const MAX_LEADER_SCHEDULE_STAKES: Epoch = 5; -#[derive(Default)] +#[derive(Clone, Debug, Default, PartialEq)] +pub struct RentDebits(pub Vec<(Pubkey, RewardInfo)>); + +impl RentDebits { + pub fn push(&mut self, account: &Pubkey, rent: u64, post_balance: u64) { + if rent != 0 { + let rent_debit = i64::try_from(rent).ok().and_then(|r| r.checked_neg()); + if let Some(rent_debit) = rent_debit { + let reward_info = RewardInfo { + reward_type: RewardType::Rent, + lamports: rent_debit, + post_balance, + }; + self.0.push((*account, reward_info)); + } else { + warn!("out of range rent debit from {}: {}", account, rent); + } + } + } +} + +#[derive(Default, Debug)] pub struct ExecuteTimings { pub check_us: u64, pub load_us: u64, pub execute_us: u64, pub store_us: u64, + pub details: ExecuteDetailsTimings, } impl ExecuteTimings { @@ -114,18 +136,19 @@ impl ExecuteTimings { self.load_us += other.load_us; self.execute_us += other.execute_us; self.store_us += other.store_us; + self.details.accumulate(&other.details); } } type BankStatusCache = StatusCache>; -#[frozen_abi(digest = "7WxTa4GdrBkjZCrHZ6xCpNA5b9LWMFnAt3fTyauYEurS")] +#[frozen_abi(digest = "HhY4tMP5KZU9fw9VLpMMUikfvNVCLksocZBUKjt8ZjYH")] pub type BankSlotDelta = SlotDelta>; -type TransactionAccountRefCells = Vec>>; -type TransactionAccountDepRefCells = Vec<(Pubkey, RefCell)>; -type TransactionLoaderRefCells = Vec)>>; +type TransactionAccountRefCells = Vec>>; +type TransactionAccountDepRefCells = Vec<(Pubkey, Rc>)>; +type TransactionLoaderRefCells = Vec>)>>; // Eager rent collection repeats in cyclic manner. -// Each cycle is composed of number of tiny pubkey subranges +// Each cycle is composed of number of tiny pubkey subranges // to scan, which is always multiple of the number of slots in epoch. type PartitionIndex = u64; type PartitionsPerCycle = u64; @@ -379,6 +402,7 @@ pub struct TransactionResults { pub fee_collection_results: Vec>, pub execution_results: Vec, pub overwritten_vote_accounts: Vec, + pub rent_debits: Vec, } pub struct TransactionBalancesSet { pub pre_balances: TransactionBalances, @@ -451,19 +475,19 @@ pub struct TransactionLogCollector { pub trait NonceRollbackInfo { fn nonce_address(&self) -> &Pubkey; - fn nonce_account(&self) -> &Account; + fn nonce_account(&self) -> &AccountSharedData; fn fee_calculator(&self) -> Option; - fn fee_account(&self) -> Option<&Account>; + fn fee_account(&self) -> Option<&AccountSharedData>; } #[derive(Clone, Debug, Default, PartialEq)] pub struct NonceRollbackPartial { nonce_address: Pubkey, - nonce_account: Account, + nonce_account: AccountSharedData, } impl NonceRollbackPartial { - pub fn new(nonce_address: Pubkey, nonce_account: Account) -> Self { + pub fn new(nonce_address: Pubkey, nonce_account: AccountSharedData) -> Self { Self { nonce_address, nonce_account, @@ -475,13 +499,13 @@ impl NonceRollbackInfo for NonceRollbackPartial { fn nonce_address(&self) -> &Pubkey { &self.nonce_address } - fn nonce_account(&self) -> &Account { + fn nonce_account(&self) -> &AccountSharedData { &self.nonce_account } fn fee_calculator(&self) -> Option { nonce_account::fee_calculator_of(&self.nonce_account) } - fn fee_account(&self) -> Option<&Account> { + fn fee_account(&self) -> Option<&AccountSharedData> { None } } @@ -489,16 +513,16 @@ impl NonceRollbackInfo for NonceRollbackPartial { #[derive(Clone, Debug, Default, PartialEq)] pub struct NonceRollbackFull { nonce_address: Pubkey, - nonce_account: Account, - fee_account: Option, + nonce_account: AccountSharedData, + fee_account: Option, } impl NonceRollbackFull { #[cfg(test)] pub fn new( nonce_address: Pubkey, - nonce_account: Account, - fee_account: Option, + nonce_account: AccountSharedData, + fee_account: Option, ) -> Self { Self { nonce_address, @@ -509,7 +533,7 @@ impl NonceRollbackFull { pub fn from_partial( partial: NonceRollbackPartial, message: &Message, - accounts: &[Account], + accounts: &[AccountSharedData], ) -> Result { let NonceRollbackPartial { nonce_address, @@ -545,13 +569,13 @@ impl NonceRollbackInfo for NonceRollbackFull { fn nonce_address(&self) -> &Pubkey { &self.nonce_address } - fn nonce_account(&self) -> &Account { + fn nonce_account(&self) -> &AccountSharedData { &self.nonce_account } fn fee_calculator(&self) -> Option { nonce_account::fee_calculator_of(&self.nonce_account) } - fn fee_account(&self) -> Option<&Account> { + fn fee_account(&self) -> Option<&AccountSharedData> { self.fee_account.as_ref() } } @@ -919,7 +943,7 @@ impl Bank { &[], None, None, - HashSet::new(), + AccountSecondaryIndexes::default(), false, ) } @@ -932,7 +956,7 @@ impl Bank { &[], None, None, - HashSet::new(), + AccountSecondaryIndexes::default(), false, ); @@ -943,7 +967,7 @@ impl Bank { #[cfg(test)] pub(crate) fn new_with_config( genesis_config: &GenesisConfig, - account_indexes: HashSet, + account_indexes: AccountSecondaryIndexes, accounts_db_caching_enabled: bool, ) -> Self { Self::new_with_paths( @@ -966,7 +990,7 @@ impl Bank { frozen_account_pubkeys: &[Pubkey], debug_keys: Option>>, additional_builtins: Option<&Builtins>, - account_indexes: HashSet, + account_indexes: AccountSecondaryIndexes, accounts_db_caching_enabled: bool, ) -> Self { let mut bank = Self::default(); @@ -975,13 +999,6 @@ impl Bank { bank.cluster_type = Some(genesis_config.cluster_type); bank.evm_chain_id = genesis_config.evm_chain_id; - bank.rc.accounts = Arc::new(Accounts::new_with_config( - paths, - &genesis_config.cluster_type, - account_indexes, - accounts_db_caching_enabled, - )); - if let Some((evm_state_path, evm_genesis_path)) = evm_paths { let spv_compatibility = bank.fix_spv_proofs_evm(); @@ -996,6 +1013,13 @@ impl Bank { .unwrap(), ); } + + bank.rc.accounts = Arc::new(Accounts::new_with_config( + paths, + &genesis_config.cluster_type, + account_indexes, + accounts_db_caching_enabled, + )); bank.process_genesis_config(genesis_config); bank.finish_init(genesis_config, additional_builtins); @@ -1052,7 +1076,6 @@ impl Bank { &parent.rc.accounts, slot, parent.slot(), - epoch, )), parent: RwLock::new(Some(parent.clone())), slot, @@ -1428,7 +1451,7 @@ impl Bank { fn update_sysvar_account(&self, pubkey: &Pubkey, updater: F) where - F: Fn(&Option) -> Account, + F: Fn(&Option) -> AccountSharedData, { let old_account = self.get_sysvar_account(pubkey); let new_account = updater(&old_account); @@ -1438,7 +1461,7 @@ impl Bank { fn inherit_specially_retained_account_fields( &self, - old_account: &Option, + old_account: &Option, ) -> InheritableAccountFields { ( old_account.as_ref().map(|a| a.lamports).unwrap_or(1), @@ -1533,7 +1556,7 @@ impl Bank { self.update_sysvar_account(&sysvar::slot_history::id(), |account| { let mut slot_history = account .as_ref() - .map(|account| from_account::(&account).unwrap()) + .map(|account| from_account::(account).unwrap()) .unwrap_or_default(); slot_history.add(self.slot()); create_account( @@ -1547,7 +1570,7 @@ impl Bank { self.update_sysvar_account(&sysvar::slot_hashes::id(), |account| { let mut slot_hashes = account .as_ref() - .map(|account| from_account::(&account).unwrap()) + .map(|account| from_account::(account).unwrap()) .unwrap_or_default(); slot_hashes.add(self.parent_slot, self.parent_hash); create_account( @@ -1822,7 +1845,7 @@ impl Bank { fn stake_delegation_accounts( &self, reward_calc_tracer: &mut Option, - ) -> HashMap, Account)> { + ) -> HashMap, AccountSharedData)> { let mut accounts = HashMap::new(); self.stakes @@ -2157,7 +2180,6 @@ impl Bank { // record and commit are finished, those transactions will be // committed before this write lock can be obtained here. let mut hash = self.hash.write().unwrap(); - if *hash == Hash::default() { // finish up any deferred changes to account state self.collect_rent_eagerly(); @@ -2196,10 +2218,7 @@ impl Bank { shrink.stop(); info!( - "exhaustively_free_unused_resource() - flush: {}, - clean: {}, - shrink: {}", + "exhaustively_free_unused_resource() {} {} {}", flush, clean, shrink, ); } @@ -2248,6 +2267,10 @@ impl Bank { self.parent_slot } + pub fn parent_hash(&self) -> Hash { + self.parent_hash + } + fn process_genesis_config(&mut self, genesis_config: &GenesisConfig) { // Bootstrap validator collects fees until `new_from_parent` is called. self.fee_rate_governor = genesis_config.fee_rate_governor.clone(); @@ -2257,7 +2280,7 @@ impl Bank { if self.get_account(&pubkey).is_some() { panic!("{} repeated in genesis config", pubkey); } - self.store_account(pubkey, account); + self.store_account(pubkey, &AccountSharedData::from(account.clone())); self.capitalization.fetch_add(account.lamports, Relaxed); } // updating sysvars (the fees sysvar in this case) now depends on feature activations in @@ -2268,7 +2291,7 @@ impl Bank { if self.get_account(&pubkey).is_some() { panic!("{} repeated in genesis config", pubkey); } - self.store_account(pubkey, account); + self.store_account(pubkey, &AccountSharedData::from(account.clone())); } // highest staked node is the first collector @@ -2352,7 +2375,7 @@ impl Bank { name, program_id ), Some(account) => { - if *name == String::from_utf8_lossy(&account.data) { + if *name == String::from_utf8_lossy(&account.data()) { // nop; it seems that already AccountsDb is updated. return; } @@ -2435,6 +2458,7 @@ impl Bank { &self.fee_rate_governor } + // DEPRECATED pub fn get_blockhash_last_valid_slot(&self, blockhash: &Hash) -> Option { let blockhash_queue = self.blockhash_queue.read().unwrap(); // This calculation will need to be updated to consider epoch boundaries if BlockhashQueue @@ -2444,6 +2468,15 @@ impl Bank { .map(|age| self.slot + blockhash_queue.len() as u64 - age) } + pub fn get_blockhash_last_valid_block_height(&self, blockhash: &Hash) -> Option { + let blockhash_queue = self.blockhash_queue.read().unwrap(); + // This calculation will need to be updated to consider epoch boundaries if BlockhashQueue + // length is made variable by epoch + blockhash_queue + .get_hash_age(blockhash) + .map(|age| self.block_height + blockhash_queue.len() as u64 - age) + } + pub fn confirmed_last_blockhash(&self) -> (Hash, FeeCalculator) { const NUM_BLOCKHASH_CONFIRMATIONS: usize = 3; @@ -2537,15 +2570,20 @@ impl Bank { tick_height % self.ticks_per_slot == 0 } + pub fn demote_sysvar_write_locks(&self) -> bool { + self.feature_set + .is_active(&feature_set::demote_sysvar_write_locks::id()) + } + pub fn prepare_batch<'a, 'b>( &'a self, txs: impl Iterator, ) -> TransactionBatch<'a, 'b> { let hashed_txs: Vec = txs.map(HashedTransaction::from).collect(); - let lock_results = self - .rc - .accounts - .lock_accounts(hashed_txs.as_transactions_iter()); + let lock_results = self.rc.accounts.lock_accounts( + hashed_txs.as_transactions_iter(), + self.demote_sysvar_write_locks(), + ); TransactionBatch::new(lock_results, &self, Cow::Owned(hashed_txs)) } @@ -2553,27 +2591,25 @@ impl Bank { &'a self, hashed_txs: &'b [HashedTransaction], ) -> TransactionBatch<'a, 'b> { - let lock_results = self - .rc - .accounts - .lock_accounts(hashed_txs.as_transactions_iter()); + let lock_results = self.rc.accounts.lock_accounts( + hashed_txs.as_transactions_iter(), + self.demote_sysvar_write_locks(), + ); TransactionBatch::new(lock_results, &self, Cow::Borrowed(hashed_txs)) } - pub fn prepare_simulation_batch<'a, 'b>( + pub(crate) fn prepare_simulation_batch<'a, 'b>( &'a self, - txs: &'b [Transaction], + tx: &'b Transaction, ) -> TransactionBatch<'a, 'b> { - let lock_results: Vec<_> = txs - .iter() - .map(|tx| tx.sanitize().map_err(|e| e.into())) - .collect(); - let hashed_txs = txs.iter().map(HashedTransaction::from).collect(); - let mut batch = TransactionBatch::new(lock_results, &self, hashed_txs); + let mut batch = TransactionBatch::new( + vec![tx.sanitize().map_err(|e| e.into())], + &self, + Cow::Owned(vec![HashedTransaction::from(tx)]), + ); batch.needs_unlock = false; batch } - pub fn take_evm_state_cloned(&self) -> Option> { match &*self.evm_state.read().expect("bank evm state was poisoned") { evm_state::EvmState::Incomming(i) => Some(i.clone()), @@ -2606,15 +2642,16 @@ impl Bank { /// Run transactions against a frozen bank without committing the results pub fn simulate_transaction( &self, - transaction: Transaction, - ) -> (Result<()>, TransactionLogMessages) { + transaction: &Transaction, + ) -> (Result<()>, TransactionLogMessages, Vec) { assert!(self.is_frozen(), "simulation bank must be frozen"); - let txs = &[transaction]; - let batch = self.prepare_simulation_batch(txs); + let batch = self.prepare_simulation_batch(&transaction); + + let mut timings = ExecuteTimings::default(); let ( - _loaded_accounts, + loaded_accounts, executed, _inner_instructions, log_messages, @@ -2630,7 +2667,7 @@ impl Bank { MAX_PROCESSING_AGE - MAX_TRANSACTION_FORWARDING_DELAY, false, true, - &mut ExecuteTimings::default(), + &mut timings, Self::take_evm_state_form_simulation, ); @@ -2638,16 +2675,28 @@ impl Bank { let log_messages = log_messages .get(0) .map_or(vec![], |messages| messages.to_vec()); + let post_transaction_accounts = loaded_accounts + .into_iter() + .next() + .unwrap() + .0 + .ok() + .map(|loaded_transaction| loaded_transaction.accounts.into_iter().collect::>()) + .unwrap_or_default(); - (transaction_result, log_messages) + debug!("simulate_transaction: {:?}", timings); + + (transaction_result, log_messages, post_transaction_accounts) } pub fn unlock_accounts(&self, batch: &mut TransactionBatch) { if batch.needs_unlock { batch.needs_unlock = false; - self.rc - .accounts - .unlock_accounts(batch.transactions_iter(), batch.lock_results()) + self.rc.accounts.unlock_accounts( + batch.transactions_iter(), + batch.lock_results(), + self.demote_sysvar_write_locks(), + ) } } @@ -2764,7 +2813,7 @@ impl Bank { .check_hash_age(hash, max_age) } - pub fn check_tx_durable_nonce(&self, tx: &Transaction) -> Option<(Pubkey, Account)> { + pub fn check_tx_durable_nonce(&self, tx: &Transaction) -> Option<(Pubkey, AccountSharedData)> { transaction::uses_durable_nonce(&tx) .and_then(|nonce_ix| transaction::get_nonce_pubkey_from_instruction(&nonce_ix, &tx)) .and_then(|nonce_pubkey| { @@ -2903,7 +2952,7 @@ impl Bank { } } - /// Converts Accounts into RefCell, this involves moving + /// Converts Accounts into RefCell, this involves moving /// ownership by draining the source fn accounts_to_refcells( accounts: &mut TransactionAccounts, @@ -2920,37 +2969,46 @@ impl Bank { .collect(); let account_dep_refcells: Vec<_> = account_deps .drain(..) - .map(|(pubkey, account_dep)| (pubkey, RefCell::new(account_dep))) + .map(|(pubkey, account_dep)| (pubkey, Rc::new(RefCell::new(account_dep)))) .collect(); let loader_refcells: Vec> = loaders .iter_mut() .map(|v| { v.drain(..) - .map(|(pubkey, account)| (pubkey, RefCell::new(account))) + .map(|(pubkey, account)| (pubkey, Rc::new(RefCell::new(account)))) .collect() }) .collect(); (account_refcells, account_dep_refcells, loader_refcells) } - /// Converts back from RefCell to Account, this involves moving + /// Converts back from RefCell to AccountSharedData, this involves moving /// ownership by draining the sources fn refcells_to_accounts( accounts: &mut TransactionAccounts, loaders: &mut TransactionLoaders, mut account_refcells: TransactionAccountRefCells, loader_refcells: TransactionLoaderRefCells, - ) { - account_refcells.drain(..).for_each(|account_refcell| { - accounts.push(Rc::try_unwrap(account_refcell).unwrap().into_inner()) - }); - loaders - .iter_mut() - .zip(loader_refcells) - .for_each(|(ls, mut lrcs)| { - lrcs.drain(..) - .for_each(|(pubkey, lrc)| ls.push((pubkey, lrc.into_inner()))) - }); + ) -> std::result::Result<(), TransactionError> { + for account_refcell in account_refcells.drain(..) { + accounts.push( + Rc::try_unwrap(account_refcell) + .map_err(|_| TransactionError::AccountBorrowOutstanding)? + .into_inner(), + ) + } + for (ls, mut lrcs) in loaders.iter_mut().zip(loader_refcells) { + for (pubkey, lrc) in lrcs.drain(..) { + ls.push(( + pubkey, + Rc::try_unwrap(lrc) + .map_err(|_| TransactionError::AccountBorrowOutstanding)? + .into_inner(), + )) + } + } + + Ok(()) } fn compile_recorded_instructions( @@ -2970,7 +3028,7 @@ impl Bank { fn get_executors( &self, message: &Message, - loaders: &[Vec<(Pubkey, Account)>], + loaders: &[Vec<(Pubkey, AccountSharedData)>], ) -> Rc> { let mut num_executors = message.account_keys.len(); for instruction_loaders in loaders.iter() { @@ -3095,19 +3153,23 @@ impl Bank { let mut transaction_log_messages = Vec::with_capacity(hashed_txs.len()); let bpf_compute_budget = self .bpf_compute_budget - .unwrap_or_else(|| BpfComputeBudget::new(&self.feature_set)); + .unwrap_or_else(BpfComputeBudget::new); let executed: Vec = loaded_accounts .iter_mut() .zip(hashed_txs.as_transactions_iter()) .map(|(accs, tx)| match accs { (Err(e), _nonce_rollback) => (Err(e.clone()), None), - (Ok((accounts, account_deps, loaders, _rents)), nonce_rollback) => { + (Ok(loaded_transaction), nonce_rollback) => { signature_count += u64::from(tx.message().header.num_required_signatures); + let executors = self.get_executors(&tx.message, &loaded_transaction.loaders); - let executors = self.get_executors(&tx.message, &loaders); let (account_refcells, account_dep_refcells, loader_refcells) = - Self::accounts_to_refcells(accounts, account_deps, loaders); + Self::accounts_to_refcells( + &mut loaded_transaction.accounts, + &mut loaded_transaction.account_deps, + &mut loaded_transaction.loaders, + ); let instruction_recorders = if enable_cpi_recording { let ix_count = tx.message.instructions.len(); @@ -3144,7 +3206,7 @@ impl Bank { None }; - let process_result = self.message_processor.process_message( + let mut process_result = self.message_processor.process_message( tx.message(), &loader_refcells, &account_refcells, @@ -3155,6 +3217,9 @@ impl Bank { instruction_recorders.as_deref(), self.feature_set.clone(), bpf_compute_budget, + &mut timings.details, + self.rc.accounts.clone(), + &self.ancestors, evm_executor.as_mut(), ); @@ -3177,12 +3242,15 @@ impl Bank { &tx.message, ); - Self::refcells_to_accounts( - accounts, - loaders, + if let Err(e) = Self::refcells_to_accounts( + &mut loaded_transaction.accounts, + &mut loaded_transaction.loaders, account_refcells, loader_refcells, - ); + ) { + warn!("Account lifetime mismanagement"); + process_result = Err(e); + } if process_result.is_ok() { self.update_executors(executors); @@ -3410,8 +3478,9 @@ impl Bank { &self.rent_collector, &self.last_blockhash_with_fee_calculator(), self.fix_recent_blockhashes_sysvar_delay(), + self.demote_sysvar_write_locks(), ); - self.collect_rent(executed, loaded_accounts); + let rent_debits = self.collect_rent(executed, loaded_accounts); let overwritten_vote_accounts = self.update_cached_accounts( hashed_txs.as_transactions_iter(), @@ -3443,6 +3512,7 @@ impl Bank { fee_collection_results, execution_results: executed.to_vec(), overwritten_vote_accounts, + rent_debits, } } @@ -3590,27 +3660,31 @@ impl Bank { fn collect_rent( &self, res: &[TransactionExecutionResult], - loaded_accounts: &[TransactionLoadResult], - ) { + loaded_accounts: &mut [TransactionLoadResult], + ) -> Vec { let mut collected_rent: u64 = 0; - for (i, (raccs, _nonce_rollback)) in loaded_accounts.iter().enumerate() { + let mut rent_debits: Vec = Vec::with_capacity(loaded_accounts.len()); + for (i, (raccs, _nonce_rollback)) in loaded_accounts.iter_mut().enumerate() { let (res, _nonce_rollback) = &res[i]; if res.is_err() || raccs.is_err() { + rent_debits.push(RentDebits::default()); continue; } - let acc = raccs.as_ref().unwrap(); + let loaded_transaction = raccs.as_mut().unwrap(); - collected_rent += acc.3; + collected_rent += loaded_transaction.rent; + rent_debits.push(mem::take(&mut loaded_transaction.rent_debits)); } self.collected_rent.fetch_add(collected_rent, Relaxed); + rent_debits } fn run_incinerator(&self) { if let Some((account, _)) = self.get_account_modified_since_parent(&incinerator::id()) { self.capitalization.fetch_sub(account.lamports, Relaxed); - self.store_account(&incinerator::id(), &Account::default()); + self.store_account(&incinerator::id(), &AccountSharedData::default()); } } @@ -3671,16 +3745,20 @@ impl Bank { let account_count = accounts.len(); // parallelize? - let mut rent = 0; + let mut total_rent = 0; + let mut rent_debits = RentDebits::default(); for (pubkey, mut account) in accounts { - rent += self + let rent = self .rent_collector .collect_from_existing_account(&pubkey, &mut account); + total_rent += rent; // Store all of them unconditionally to purge old AppendVec, // even if collected rent is 0 (= not updated). self.store_account(&pubkey, &account); + rent_debits.push(&pubkey, rent, account.lamports()); } - self.collected_rent.fetch_add(rent, Relaxed); + self.collected_rent.fetch_add(total_rent, Relaxed); + self.rewards.write().unwrap().append(&mut rent_debits.0); datapoint_info!("collect_rent_eagerly", ("accounts", account_count, i64)); } @@ -3841,8 +3919,8 @@ impl Bank { let cycle_params = self.determine_collection_cycle_params(epoch); let (_, _, in_multi_epoch_cycle, _, _, partition_count) = cycle_params; - // use common code-path for both very-likely and very-unlikely for the sake of minimized - // risk of any mis-calculation instead of neligilbe faster computation per slot for the + // use common codepath for both very likely and very unlikely for the sake of minimized + // risk of any miscalculation instead of negligibly faster computation per slot for the // likely case. let mut start_partition_index = Self::partition_index_from_slot_index(start_slot_index, cycle_params); @@ -3854,7 +3932,7 @@ impl Bank { let in_middle_of_cycle = start_partition_index > 0; if in_multi_epoch_cycle && is_special_new_epoch && in_middle_of_cycle { // Adjust slot indexes so that the final partition ranges are continuous! - // This is neeed because the caller gives us off-by-one indexes when + // This is need because the caller gives us off-by-one indexes when // an epoch boundary is crossed. // Usually there is no need for this adjustment because cycles are aligned // with epochs. But for multi-epoch cycles, adjust the indexes if it @@ -4130,7 +4208,7 @@ impl Bank { self.process_transaction(&tx).map(|_| signature) } - pub fn read_balance(account: &Account) -> u64 { + pub fn read_balance(account: &AccountSharedData) -> u64 { account.lamports } /// Each program would need to be able to introspect its own state @@ -4159,7 +4237,7 @@ impl Bank { parents } - pub fn store_account(&self, pubkey: &Pubkey, account: &Account) { + pub fn store_account(&self, pubkey: &Pubkey, account: &AccountSharedData) { assert!(!self.freeze_started()); self.rc .accounts @@ -4195,7 +4273,11 @@ impl Bank { /// Technically this issues (or even burns!) new lamports, /// so be extra careful for its usage - fn store_account_and_update_capitalization(&self, pubkey: &Pubkey, new_account: &Account) { + fn store_account_and_update_capitalization( + &self, + pubkey: &Pubkey, + new_account: &AccountSharedData, + ) { if let Some(old_account) = self.get_account(&pubkey) { match new_account.lamports.cmp(&old_account.lamports) { std::cmp::Ordering::Greater => { @@ -4215,7 +4297,7 @@ impl Bank { self.store_account(pubkey, new_account); } - pub fn withdraw(&self, pubkey: &Pubkey, lamports: u64) -> Result<()> { + fn withdraw(&self, pubkey: &Pubkey, lamports: u64) -> Result<()> { match self.get_account(pubkey) { Some(mut account) => { let min_balance = match get_system_account_kind(&account) { @@ -4225,9 +4307,11 @@ impl Bank { .minimum_balance(nonce::State::size()), _ => 0, }; - if lamports + min_balance > account.lamports { - return Err(TransactionError::InsufficientFundsForFee); - } + + lamports + .checked_add(min_balance) + .filter(|required_balance| *required_balance <= account.lamports()) + .ok_or(TransactionError::InsufficientFundsForFee)?; account.lamports -= lamports; self.store_account(pubkey, &account); @@ -4292,12 +4376,12 @@ impl Bank { self.hard_forks.clone() } - pub fn get_account(&self, pubkey: &Pubkey) -> Option { + pub fn get_account(&self, pubkey: &Pubkey) -> Option { self.get_account_modified_slot(pubkey) .map(|(acc, _slot)| acc) } - pub fn get_account_modified_slot(&self, pubkey: &Pubkey) -> Option<(Account, Slot)> { + pub fn get_account_modified_slot(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> { self.rc.accounts.load_slow(&self.ancestors, pubkey) } @@ -4308,7 +4392,7 @@ impl Bank { // multiple times with the same parent_slot in the case of forking. // // Generally, all of sysvar update granularity should be slot boundaries. - fn get_sysvar_account(&self, pubkey: &Pubkey) -> Option { + fn get_sysvar_account(&self, pubkey: &Pubkey) -> Option { let mut ancestors = self.ancestors.clone(); ancestors.remove(&self.slot()); self.rc @@ -4317,40 +4401,44 @@ impl Bank { .map(|(acc, _slot)| acc) } - pub fn get_program_accounts(&self, program_id: &Pubkey) -> Vec<(Pubkey, Account)> { + pub fn get_program_accounts(&self, program_id: &Pubkey) -> Vec<(Pubkey, AccountSharedData)> { self.rc .accounts .load_by_program(&self.ancestors, program_id) } - pub fn get_filtered_program_accounts bool>( + pub fn get_filtered_program_accounts bool>( &self, program_id: &Pubkey, filter: F, - ) -> Vec<(Pubkey, Account)> { + ) -> Vec<(Pubkey, AccountSharedData)> { self.rc .accounts .load_by_program_with_filter(&self.ancestors, program_id, filter) } - pub fn get_filtered_indexed_accounts bool>( + pub fn get_filtered_indexed_accounts bool>( &self, index_key: &IndexKey, filter: F, - ) -> Vec<(Pubkey, Account)> { + ) -> Vec<(Pubkey, AccountSharedData)> { self.rc .accounts .load_by_index_key_with_filter(&self.ancestors, index_key, filter) } - pub fn get_all_accounts_with_modified_slots(&self) -> Vec<(Pubkey, Account, Slot)> { + pub fn account_indexes_include_key(&self, key: &Pubkey) -> bool { + self.rc.accounts.account_indexes_include_key(key) + } + + pub fn get_all_accounts_with_modified_slots(&self) -> Vec<(Pubkey, AccountSharedData, Slot)> { self.rc.accounts.load_all(&self.ancestors) } pub fn get_program_accounts_modified_since_parent( &self, program_id: &Pubkey, - ) -> Vec<(Pubkey, Account)> { + ) -> Vec<(Pubkey, AccountSharedData)> { self.rc .accounts .load_by_program_slot(self.slot(), Some(program_id)) @@ -4376,11 +4464,14 @@ impl Bank { } } - pub fn get_all_accounts_modified_since_parent(&self) -> Vec<(Pubkey, Account)> { + pub fn get_all_accounts_modified_since_parent(&self) -> Vec<(Pubkey, AccountSharedData)> { self.rc.accounts.load_by_program_slot(self.slot(), None) } - pub fn get_account_modified_since_parent(&self, pubkey: &Pubkey) -> Option<(Account, Slot)> { + pub fn get_account_modified_since_parent( + &self, + pubkey: &Pubkey, + ) -> Option<(AccountSharedData, Slot)> { let just_self: Ancestors = vec![(self.slot(), 0)].into_iter().collect(); if let Some((account, slot)) = self.rc.accounts.load_slow(&just_self, pubkey) { if slot == self.slot() { @@ -4601,7 +4692,21 @@ impl Bank { &self.ancestors, Some(self.capitalization()), ); - assert_eq!(total_lamports, self.capitalization()); + if total_lamports != self.capitalization() { + datapoint_info!( + "capitalization_mismatch", + ("slot", self.slot(), i64), + ("calculated_lamports", total_lamports, i64), + ("capitalization", self.capitalization(), i64), + ); + + panic!( + "capitalization_mismatch. slot: {}, calculated_lamports: {}, capitalization: {}", + self.slot(), + total_lamports, + self.capitalization() + ); + } hash } @@ -4612,12 +4717,36 @@ impl Bank { /// A snapshot bank should be purged of 0 lamport accounts which are not part of the hash /// calculation and could shield other real accounts. pub fn verify_snapshot_bank(&self) -> bool { + let mut clean_time = Measure::start("clean"); if self.slot() > 0 { self.clean_accounts(true); + } + clean_time.stop(); + + let mut shrink_all_slots_time = Measure::start("shrink_all_slots"); + if self.slot() > 0 { self.shrink_all_slots(); } + shrink_all_slots_time.stop(); + + let mut verify_time = Measure::start("verify_bank_hash"); + let mut verify = self.verify_bank_hash(); + verify_time.stop(); + + let mut verify2_time = Measure::start("verify_hash"); // Order and short-circuiting is significant; verify_hash requires a valid bank hash - self.verify_bank_hash() && self.verify_hash() + verify = verify && self.verify_hash(); + verify2_time.stop(); + + datapoint_info!( + "verify_snapshot_bank", + ("clean_us", clean_time.as_us(), i64), + ("shrink_all_slots_us", shrink_all_slots_time.as_us(), i64), + ("verify_bank_hash_us", verify_time.as_us(), i64), + ("verify_hash_us", verify2_time.as_us(), i64), + ); + + verify } /// Return the number of hashes per tick @@ -4645,6 +4774,10 @@ impl Bank { *self.inflation.read().unwrap() } + pub fn rent_collector(&self) -> RentCollector { + self.rent_collector.clone() + } + /// Return the total capitalization of the Bank pub fn capitalization(&self) -> u64 { self.capitalization.load(Relaxed) @@ -4686,12 +4819,12 @@ impl Bank { } let message = &tx.message(); - let acc = raccs.as_ref().unwrap(); + let loaded_transaction = raccs.as_ref().unwrap(); for (pubkey, account) in message .account_keys .iter() - .zip(acc.0.iter()) + .zip(loaded_transaction.accounts.iter()) .filter(|(_key, account)| (Stakes::is_stake(account))) { if Stakes::is_stake(account) { @@ -4946,26 +5079,6 @@ impl Bank { bank_creation_time.elapsed().as_nanos() <= max_tx_ingestion_nanos } - fn get_unlock_switch_vote_slot(cluster_type: ClusterType) -> Slot { - match cluster_type { - ClusterType::Development => 0, - ClusterType::Devnet => 0, - // Epoch 63 - ClusterType::Testnet => 21_692_256, - // 400_000 slots into epoch 61 - ClusterType::MainnetBeta => 26_752_000, - } - } - - pub fn unlock_switch_vote(&self) -> bool { - let solana_unlock_switch = - self.slot() > Self::get_unlock_switch_vote_slot(self.cluster_type()); - let velas_unlock_switch = self - .feature_set - .is_active(&feature_set::velas_hardfork_pack::id()); - solana_unlock_switch || velas_unlock_switch - } - pub fn deactivate_feature(&mut self, id: &Pubkey) { let mut feature_set = Arc::make_mut(&mut self.feature_set).clone(); feature_set.active.remove(&id); @@ -5108,7 +5221,7 @@ impl Bank { // Clear new token account self.store_account( &inline_spl_token_v2_0::new_token_program::id(), - &Account::default(), + &AccountSharedData::default(), ); self.remove_executor(&inline_spl_token_v2_0::id()); @@ -5126,13 +5239,13 @@ impl Bank { // It's okay if we trigget two activations sequentionally. if reconfigure_token2_native_mint_old || reconfigure_token2_native_mint_velas { - let mut native_mint_account = solana_sdk::account::Account { + let mut native_mint_account = solana_sdk::account::AccountSharedData::from(Account { owner: inline_spl_token_v2_0::id(), data: inline_spl_token_v2_0::native_mint::ACCOUNT_DATA.to_vec(), lamports: sol_to_lamports(1.), executable: false, rent_epoch: self.epoch() + 1, - }; + }); // As a workaround for // https://github.com/solana-labs/solana-program-library/issues/374, ensure that the @@ -5262,7 +5375,9 @@ pub(crate) mod tests { use super::*; use crate::{ accounts_db::SHRINK_RATIO, - accounts_index::{AccountMap, Ancestors, ITER_BATCH_SIZE}, + accounts_index::{ + AccountIndex, AccountMap, AccountSecondaryIndexes, Ancestors, ITER_BATCH_SIZE, + }, genesis_utils::{ activate_all_features, bootstrap_validator_stake_lamports, create_genesis_config_with_leader, create_genesis_config_with_vote_accounts, @@ -5274,6 +5389,7 @@ pub(crate) mod tests { use crossbeam_channel::bounded; use evm_state::H256; use solana_sdk::{ + account::Account, account_utils::StateMut, clock::{DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT}, epoch_schedule::MINIMUM_SLOTS_PER_EPOCH, @@ -5316,7 +5432,7 @@ pub(crate) mod tests { blockhash: Hash::new_unique(), fee_calculator: fee_calculator.clone(), })); - let nonce_account = Account::new_data(43, &state, &system_program::id()).unwrap(); + let nonce_account = AccountSharedData::new_data(43, &state, &system_program::id()).unwrap(); // NonceRollbackPartial create + NonceRollbackInfo impl let partial = NonceRollbackPartial::new(nonce_address, nonce_account.clone()); @@ -5334,9 +5450,9 @@ pub(crate) mod tests { ]; let message = Message::new(&instructions, Some(&from_address)); - let from_account = Account::new(44, 0, &Pubkey::default()); - let to_account = Account::new(45, 0, &Pubkey::default()); - let recent_blockhashes_sysvar_account = Account::new(4, 0, &Pubkey::default()); + let from_account = AccountSharedData::new(44, 0, &Pubkey::default()); + let to_account = AccountSharedData::new(45, 0, &Pubkey::default()); + let recent_blockhashes_sysvar_account = AccountSharedData::new(4, 0, &Pubkey::default()); let accounts = [ from_account.clone(), nonce_account.clone(), @@ -5421,7 +5537,7 @@ pub(crate) mod tests { ); let rent_account = bank.get_account(&sysvar::rent::id()).unwrap(); - let rent = from_account::(&rent_account).unwrap(); + let rent = from_account::(&rent_account).unwrap(); assert_eq!(rent.burn_percent, 5); assert_eq!(rent.exemption_threshold, 1.2); @@ -5513,7 +5629,7 @@ pub(crate) mod tests { cluster_type: ClusterType::MainnetBeta, ..GenesisConfig::default() })); - let sysvar_and_native_proram_delta0 = 12; + let sysvar_and_native_proram_delta0 = 10; assert_eq!( bank0.capitalization(), 42 * 42 + sysvar_and_native_proram_delta0 @@ -5571,12 +5687,12 @@ pub(crate) mod tests { assert_eq!(bank.last_blockhash(), genesis_config.hash()); // Initialize credit-debit and credit only accounts - let account1 = Account::new(264, 0, &Pubkey::default()); - let account2 = Account::new(264, 1, &Pubkey::default()); - let account3 = Account::new(264, 0, &Pubkey::default()); - let account4 = Account::new(264, 1, &Pubkey::default()); - let account5 = Account::new(10, 0, &Pubkey::default()); - let account6 = Account::new(10, 1, &Pubkey::default()); + let account1 = AccountSharedData::new(264, 0, &Pubkey::default()); + let account2 = AccountSharedData::new(264, 1, &Pubkey::default()); + let account3 = AccountSharedData::new(264, 0, &Pubkey::default()); + let account4 = AccountSharedData::new(264, 1, &Pubkey::default()); + let account5 = AccountSharedData::new(10, 0, &Pubkey::default()); + let account6 = AccountSharedData::new(10, 1, &Pubkey::default()); bank.store_account(&keypair1.pubkey(), &account1); bank.store_account(&keypair2.pubkey(), &account2); @@ -5596,7 +5712,7 @@ pub(crate) mod tests { let system_program_id = system_program::id(); let mut system_program_account = bank.get_account(&system_program_id).unwrap(); system_program_account.lamports = - bank.get_minimum_balance_for_rent_exemption(system_program_account.data.len()); + bank.get_minimum_balance_for_rent_exemption(system_program_account.data().len()); bank.store_account(&system_program_id, &system_program_account); bank_with_success_txs.store_account(&system_program_id, &system_program_account); @@ -5671,8 +5787,11 @@ pub(crate) mod tests { AccountMeta::new(keypair2.pubkey(), true), AccountMeta::new_readonly(read_only_keypair.pubkey(), false), ]; - let deduct_instruction = - Instruction::new(mock_program_id, &MockInstruction::Deduction, account_metas); + let deduct_instruction = Instruction::new_with_bincode( + mock_program_id, + &MockInstruction::Deduction, + account_metas, + ); Transaction::new_signed_with_payer( &[deduct_instruction], Some(&payer.pubkey()), @@ -5687,10 +5806,11 @@ pub(crate) mod tests { mock_program_id: Pubkey, generic_rent_due_for_system_account: u64, ) { - let mut account_pairs: Vec<(Pubkey, Account)> = Vec::with_capacity(keypairs.len() - 1); + let mut account_pairs: Vec<(Pubkey, AccountSharedData)> = + Vec::with_capacity(keypairs.len() - 1); account_pairs.push(( keypairs[0].pubkey(), - Account::new( + AccountSharedData::new( generic_rent_due_for_system_account + 2, 0, &Pubkey::default(), @@ -5698,7 +5818,7 @@ pub(crate) mod tests { )); account_pairs.push(( keypairs[1].pubkey(), - Account::new( + AccountSharedData::new( generic_rent_due_for_system_account + 2, 0, &Pubkey::default(), @@ -5706,7 +5826,7 @@ pub(crate) mod tests { )); account_pairs.push(( keypairs[2].pubkey(), - Account::new( + AccountSharedData::new( generic_rent_due_for_system_account + 2, 0, &Pubkey::default(), @@ -5714,7 +5834,7 @@ pub(crate) mod tests { )); account_pairs.push(( keypairs[3].pubkey(), - Account::new( + AccountSharedData::new( generic_rent_due_for_system_account + 2, 0, &Pubkey::default(), @@ -5722,15 +5842,15 @@ pub(crate) mod tests { )); account_pairs.push(( keypairs[4].pubkey(), - Account::new(10, 0, &Pubkey::default()), + AccountSharedData::new(10, 0, &Pubkey::default()), )); account_pairs.push(( keypairs[5].pubkey(), - Account::new(10, 0, &Pubkey::default()), + AccountSharedData::new(10, 0, &Pubkey::default()), )); account_pairs.push(( keypairs[6].pubkey(), - Account::new( + AccountSharedData::new( (2 * generic_rent_due_for_system_account) + 24, 0, &Pubkey::default(), @@ -5739,7 +5859,7 @@ pub(crate) mod tests { account_pairs.push(( keypairs[8].pubkey(), - Account::new( + AccountSharedData::new( generic_rent_due_for_system_account + 2 + 929, 0, &Pubkey::default(), @@ -5747,13 +5867,13 @@ pub(crate) mod tests { )); account_pairs.push(( keypairs[9].pubkey(), - Account::new(10, 0, &Pubkey::default()), + AccountSharedData::new(10, 0, &Pubkey::default()), )); // Feeding to MockProgram to test read only rent behaviour account_pairs.push(( keypairs[10].pubkey(), - Account::new( + AccountSharedData::new( generic_rent_due_for_system_account + 3, 0, &Pubkey::default(), @@ -5761,15 +5881,15 @@ pub(crate) mod tests { )); account_pairs.push(( keypairs[11].pubkey(), - Account::new(generic_rent_due_for_system_account + 3, 0, &mock_program_id), + AccountSharedData::new(generic_rent_due_for_system_account + 3, 0, &mock_program_id), )); account_pairs.push(( keypairs[12].pubkey(), - Account::new(generic_rent_due_for_system_account + 3, 0, &mock_program_id), + AccountSharedData::new(generic_rent_due_for_system_account + 3, 0, &mock_program_id), )); account_pairs.push(( keypairs[13].pubkey(), - Account::new(14, 22, &mock_program_id), + AccountSharedData::new(14, 22, &mock_program_id), )); for account_pair in account_pairs.iter() { @@ -5812,7 +5932,7 @@ pub(crate) mod tests { let pubkey = solana_sdk::pubkey::new_rand(); let some_lamports = 400; - let account = Account::new(some_lamports, 0, &system_program::id()); + let account = AccountSharedData::new(some_lamports, 0, &system_program::id()); assert_capitalization_diff( &bank, @@ -5830,7 +5950,7 @@ pub(crate) mod tests { let pubkey = mint_keypair.pubkey(); let new_lamports = 500; - let account = Account::new(new_lamports, 0, &system_program::id()); + let account = AccountSharedData::new(new_lamports, 0, &system_program::id()); assert_capitalization_diff( &bank, @@ -5848,7 +5968,7 @@ pub(crate) mod tests { let pubkey = mint_keypair.pubkey(); let new_lamports = 100; - let account = Account::new(new_lamports, 0, &system_program::id()); + let account = AccountSharedData::new(new_lamports, 0, &system_program::id()); assert_capitalization_diff( &bank, @@ -5865,7 +5985,7 @@ pub(crate) mod tests { let bank = Bank::new(&genesis_config); let pubkey = mint_keypair.pubkey(); - let account = Account::new(lamports, 1, &system_program::id()); + let account = AccountSharedData::new(lamports, 1, &system_program::id()); assert_capitalization_diff( &bank, @@ -5928,11 +6048,11 @@ pub(crate) mod tests { ); genesis_config.accounts.insert( validator_1_staking_keypair.pubkey(), - validator_1_stake_account, + Account::from(validator_1_stake_account), ); genesis_config.accounts.insert( validator_1_voting_keypair.pubkey(), - validator_1_vote_account, + Account::from(validator_1_vote_account), ); let validator_2_pubkey = solana_sdk::pubkey::new_rand(); @@ -5961,11 +6081,11 @@ pub(crate) mod tests { ); genesis_config.accounts.insert( validator_2_staking_keypair.pubkey(), - validator_2_stake_account, + Account::from(validator_2_stake_account), ); genesis_config.accounts.insert( validator_2_voting_keypair.pubkey(), - validator_2_vote_account, + Account::from(validator_2_vote_account), ); let validator_3_pubkey = solana_sdk::pubkey::new_rand(); @@ -5994,11 +6114,11 @@ pub(crate) mod tests { ); genesis_config.accounts.insert( validator_3_staking_keypair.pubkey(), - validator_3_stake_account, + Account::from(validator_3_stake_account), ); genesis_config.accounts.insert( validator_3_voting_keypair.pubkey(), - validator_3_vote_account, + Account::from(validator_3_vote_account), ); genesis_config.rent = Rent { @@ -6013,11 +6133,11 @@ pub(crate) mod tests { bank.rent_collector.slots_per_year = 192.0; let payer = Keypair::new(); - let payer_account = Account::new(400, 0, &system_program::id()); + let payer_account = AccountSharedData::new(400, 0, &system_program::id()); bank.store_account_and_update_capitalization(&payer.pubkey(), &payer_account); let payee = Keypair::new(); - let payee_account = Account::new(70, 1, &system_program::id()); + let payee_account = AccountSharedData::new(70, 1, &system_program::id()); bank.store_account_and_update_capitalization(&payee.pubkey(), &payee_account); let bootstrap_validator_initial_balance = bank.get_balance(&bootstrap_validator_pubkey); @@ -6094,7 +6214,7 @@ pub(crate) mod tests { let sysvar_and_native_proram_delta = 1; assert_eq!( - previous_capitalization - current_capitalization + sysvar_and_native_proram_delta, + previous_capitalization - (current_capitalization - sysvar_and_native_proram_delta), burned_portion ); @@ -6107,13 +6227,17 @@ pub(crate) mod tests { .unwrap() .iter() .map(|(address, reward)| { - assert_eq!(reward.reward_type, RewardType::Rent); - if *address == validator_2_pubkey { - assert_eq!(reward.post_balance, validator_2_portion + 42 - tweak_2); - } else if *address == validator_3_pubkey { - assert_eq!(reward.post_balance, validator_3_portion + 42); + if reward.lamports > 0 { + assert_eq!(reward.reward_type, RewardType::Rent); + if *address == validator_2_pubkey { + assert_eq!(reward.post_balance, validator_2_portion + 42 - tweak_2); + } else if *address == validator_3_pubkey { + assert_eq!(reward.post_balance, validator_3_portion + 42); + } + reward.lamports as u64 + } else { + 0 } - reward.lamports as u64 }) .sum::() ); @@ -6181,7 +6305,8 @@ pub(crate) mod tests { let account_pubkey = solana_sdk::pubkey::new_rand(); let account_balance = 1; - let mut account = Account::new(account_balance, 0, &solana_sdk::pubkey::new_rand()); + let mut account = + AccountSharedData::new(account_balance, 0, &solana_sdk::pubkey::new_rand()); account.executable = true; bank.store_account(&account_pubkey, &account); @@ -6352,7 +6477,7 @@ pub(crate) mod tests { // Then, at store time we deducted `magic_rent_number` rent for the current epoch, once it has balance assert_eq!(account10.rent_epoch, bank.epoch + 1); // account data is blank now - assert_eq!(account10.data.len(), 0); + assert_eq!(account10.data().len(), 0); // 10 - 10(Rent) + 929(Transfer) - magic_rent_number(Rent) assert_eq!(account10.lamports, 929 - magic_rent_number); rent_collected += magic_rent_number + 10; @@ -6625,7 +6750,7 @@ pub(crate) mod tests { } #[test] - fn test_rent_eager_under_fixed_cycle_for_developemnt() { + fn test_rent_eager_under_fixed_cycle_for_development() { solana_logger::setup(); let leader_pubkey = solana_sdk::pubkey::new_rand(); let leader_lamports = 3; @@ -6771,11 +6896,11 @@ pub(crate) mod tests { let max_exact = 64; // Make sure `max_exact` divides evenly when calculating `calculate_partition_width` assert!(should_cause_overflow(max_exact)); - // Make sure `max_unexact` doesn't divide evenly when calculating `calculate_partition_width` - let max_unexact = 10; - assert!(!should_cause_overflow(max_unexact)); + // Make sure `max_inexact` doesn't divide evenly when calculating `calculate_partition_width` + let max_inexact = 10; + assert!(!should_cause_overflow(max_inexact)); - for max in &[max_exact, max_unexact] { + for max in &[max_exact, max_inexact] { let range = Bank::pubkey_range_from_partition((max - 1, max - 1, *max)); assert_eq!( range, @@ -7020,15 +7145,15 @@ pub(crate) mod tests { bank.store_account( &zero_lamport_pubkey, - &Account::new(zero_lamports, 0, &Pubkey::default()), + &AccountSharedData::new(zero_lamports, 0, &Pubkey::default()), ); bank.store_account( &rent_due_pubkey, - &Account::new(little_lamports, 0, &Pubkey::default()), + &AccountSharedData::new(little_lamports, 0, &Pubkey::default()), ); bank.store_account( &rent_exempt_pubkey, - &Account::new(large_lamports, 0, &Pubkey::default()), + &AccountSharedData::new(large_lamports, 0, &Pubkey::default()), ); let genesis_slot = 0; @@ -7098,7 +7223,7 @@ pub(crate) mod tests { let bank1_without_zero = Arc::new(new_from_parent(&genesis_bank2)); let zero_lamports = 0; - let account = Account::new(zero_lamports, 0, &Pubkey::default()); + let account = AccountSharedData::new(zero_lamports, 0, &Pubkey::default()); bank1_with_zero.store_account(&zero_lamport_pubkey, &account); bank1_without_zero.store_account(&zero_lamport_pubkey, &account); @@ -7237,7 +7362,7 @@ pub(crate) mod tests { let rewards = bank1 .get_account(&sysvar::rewards::id()) - .map(|account| from_account::(&account).unwrap()) + .map(|account| from_account::(&account).unwrap()) .unwrap(); // verify the stake and vote accounts are the right size @@ -7669,7 +7794,7 @@ pub(crate) mod tests { let min_balance = bank.get_minimum_balance_for_rent_exemption(nonce::State::size()); let nonce = Keypair::new(); - let nonce_account = Account::new_data( + let nonce_account = AccountSharedData::new_data( min_balance + 42, &nonce::state::Versions::new_current(nonce::State::Initialized( nonce::state::Data::default(), @@ -8803,6 +8928,7 @@ pub(crate) mod tests { use sysvar::clock::Clock; let dummy_clock_id = solana_sdk::pubkey::new_rand(); + let dummy_rent_epoch = 44; let (mut genesis_config, _mint_keypair) = create_genesis_config(500); let expected_previous_slot = 3; @@ -8819,19 +8945,22 @@ pub(crate) mod tests { bank1.update_sysvar_account(&dummy_clock_id, |optional_account| { assert!(optional_account.is_none()); - create_account( + let mut account = create_account( &Clock { slot: expected_previous_slot, ..Clock::default() }, bank1.inherit_specially_retained_account_fields(optional_account), - ) + ); + account.rent_epoch = dummy_rent_epoch; + account }); let current_account = bank1.get_account(&dummy_clock_id).unwrap(); assert_eq!( expected_previous_slot, - from_account::(¤t_account).unwrap().slot + from_account::(¤t_account).unwrap().slot ); + assert_eq!(dummy_rent_epoch, current_account.rent_epoch); }, |old, new| { assert_eq!(old + 1, new); @@ -8865,7 +8994,7 @@ pub(crate) mod tests { &bank2, || { bank2.update_sysvar_account(&dummy_clock_id, |optional_account| { - let slot = from_account::(optional_account.as_ref().unwrap()) + let slot = from_account::(optional_account.as_ref().unwrap()) .unwrap() .slot + 1; @@ -8881,8 +9010,9 @@ pub(crate) mod tests { let current_account = bank2.get_account(&dummy_clock_id).unwrap(); assert_eq!( expected_next_slot, - from_account::(¤t_account).unwrap().slot + from_account::(¤t_account).unwrap().slot ); + assert_eq!(INITIAL_RENT_EPOCH, current_account.rent_epoch); }, |old, new| { // if existing, capitalization shouldn't change @@ -8896,7 +9026,7 @@ pub(crate) mod tests { &bank2, || { bank2.update_sysvar_account(&dummy_clock_id, |optional_account| { - let slot = from_account::(optional_account.as_ref().unwrap()) + let slot = from_account::(optional_account.as_ref().unwrap()) .unwrap() .slot + 1; @@ -8912,7 +9042,7 @@ pub(crate) mod tests { let current_account = bank2.get_account(&dummy_clock_id).unwrap(); assert_eq!( expected_next_slot, - from_account::(¤t_account).unwrap().slot + from_account::(¤t_account).unwrap().slot ); }, |old, new| { @@ -9267,7 +9397,7 @@ pub(crate) mod tests { let bank = Arc::new(Bank::new(&genesis_config)); let fees_account = bank.get_account(&sysvar::fees::id()).unwrap(); - let fees = from_account::(&fees_account).unwrap(); + let fees = from_account::(&fees_account).unwrap(); assert_eq!( bank.fee_calculator.lamports_per_signature, fees.fee_calculator.lamports_per_signature @@ -9333,7 +9463,7 @@ pub(crate) mod tests { let bank0 = Arc::new(new_from_parent(&parent)); let pubkey0 = solana_sdk::pubkey::new_rand(); let program_id = Pubkey::new(&[2; 32]); - let account0 = Account::new(1, 0, &program_id); + let account0 = AccountSharedData::new(1, 0, &program_id); bank0.store_account(&pubkey0, &account0); assert_eq!( @@ -9358,11 +9488,11 @@ pub(crate) mod tests { let bank2 = Arc::new(new_from_parent(&bank1)); let pubkey1 = solana_sdk::pubkey::new_rand(); - let account1 = Account::new(3, 0, &program_id); + let account1 = AccountSharedData::new(3, 0, &program_id); bank2.store_account(&pubkey1, &account1); // Accounts with 0 lamports should be filtered out by Accounts::load_by_program() let pubkey2 = solana_sdk::pubkey::new_rand(); - let account2 = Account::new(0, 0, &program_id); + let account2 = AccountSharedData::new(0, 0, &program_id); bank2.store_account(&pubkey2, &account2); let bank3 = Arc::new(new_from_parent(&bank2)); @@ -9374,8 +9504,8 @@ pub(crate) mod tests { #[test] fn test_get_filtered_indexed_accounts() { let (genesis_config, _mint_keypair) = create_genesis_config(500); - let mut account_indexes = HashSet::new(); - account_indexes.insert(AccountIndex::ProgramId); + let mut account_indexes = AccountSecondaryIndexes::default(); + account_indexes.indexes.insert(AccountIndex::ProgramId); let bank = Arc::new(Bank::new_with_config( &genesis_config, account_indexes, @@ -9384,7 +9514,7 @@ pub(crate) mod tests { let address = Pubkey::new_unique(); let program_id = Pubkey::new_unique(); - let account = Account::new(1, 0, &program_id); + let account = AccountSharedData::new(1, 0, &program_id); bank.store_account(&address, &account); let indexed_accounts = @@ -9396,7 +9526,7 @@ pub(crate) mod tests { // it is still present in the index under the original program id as well. This // demonstrates the need for a redundant post-processing filter. let another_program_id = Pubkey::new_unique(); - let new_account = Account::new(1, 0, &another_program_id); + let new_account = AccountSharedData::new(1, 0, &another_program_id); let bank = Arc::new(new_from_parent(&bank)); bank.store_account(&address, &new_account); let indexed_accounts = @@ -9543,7 +9673,7 @@ pub(crate) mod tests { ); let new_vote_loader_account = bank.get_account(&solana_vote_program::id()).unwrap(); // Vote loader account should not be updated since it was included in the genesis config. - assert_eq!(vote_loader_account.data, new_vote_loader_account.data); + assert_eq!(vote_loader_account.data(), new_vote_loader_account.data()); assert_eq!( bank.process_transaction(&transaction), Err(TransactionError::InstructionError( @@ -9589,11 +9719,11 @@ pub(crate) mod tests { assert_eq!(bank.calculate_capitalization(), bank.capitalization()); assert_eq!( "mock_program1", - String::from_utf8_lossy(&bank.get_account(&vote_id).unwrap_or_default().data) + String::from_utf8_lossy(&bank.get_account(&vote_id).unwrap_or_default().data()) ); assert_eq!( "mock_program2", - String::from_utf8_lossy(&bank.get_account(&stake_id).unwrap_or_default().data) + String::from_utf8_lossy(&bank.get_account(&stake_id).unwrap_or_default().data()) ); // Re-adding builtin programs should be no-op @@ -9609,11 +9739,11 @@ pub(crate) mod tests { assert_eq!(bank.calculate_capitalization(), bank.capitalization()); assert_eq!( "mock_program1", - String::from_utf8_lossy(&bank.get_account(&vote_id).unwrap_or_default().data) + String::from_utf8_lossy(&bank.get_account(&vote_id).unwrap_or_default().data()) ); assert_eq!( "mock_program2", - String::from_utf8_lossy(&bank.get_account(&stake_id).unwrap_or_default().data) + String::from_utf8_lossy(&bank.get_account(&stake_id).unwrap_or_default().data()) ); } @@ -9624,7 +9754,7 @@ pub(crate) mod tests { for i in 1..5 { let bhq_account = bank.get_account(&sysvar::recent_blockhashes::id()).unwrap(); let recent_blockhashes = - from_account::(&bhq_account) + from_account::(&bhq_account) .unwrap(); // Check length assert_eq!(recent_blockhashes.len(), i); @@ -9644,7 +9774,7 @@ pub(crate) mod tests { let bhq_account = bank.get_account(&sysvar::recent_blockhashes::id()).unwrap(); let recent_blockhashes = - from_account::(&bhq_account).unwrap(); + from_account::(&bhq_account).unwrap(); let sysvar_recent_blockhash = recent_blockhashes[0].blockhash; let bank_last_blockhash = bank.last_blockhash(); @@ -9935,7 +10065,7 @@ pub(crate) mod tests { let (genesis_config, _mint_keypair) = create_genesis_config(100_000_000); let bank = Arc::new(Bank::new(&genesis_config)); let nonce = Keypair::new(); - let nonce_account = Account::new_data( + let nonce_account = AccountSharedData::new_data( 42_424_242, &nonce::state::Versions::new_current(nonce::State::Initialized( nonce::state::Data::default(), @@ -10188,9 +10318,9 @@ pub(crate) mod tests { let pubkey0 = solana_sdk::pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand(); let program_id = Pubkey::new(&[2; 32]); - let keypair_account = Account::new(8, 0, &program_id); - let account0 = Account::new(11, 0, &program_id); - let program_account = Account::new(1, 10, &Pubkey::default()); + let keypair_account = AccountSharedData::new(8, 0, &program_id); + let account0 = AccountSharedData::new(11, 0, &program_id); + let program_account = AccountSharedData::new(1, 10, &Pubkey::default()); bank0.store_account(&keypair.pubkey(), &keypair_account); bank0.store_account(&pubkey0, &account0); bank0.store_account(&program_id, &program_account); @@ -10239,9 +10369,9 @@ pub(crate) mod tests { let pubkey0 = solana_sdk::pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); - let keypair0_account = Account::new(8, 0, &Pubkey::default()); - let keypair1_account = Account::new(9, 0, &Pubkey::default()); - let account0 = Account::new(11, 0, &&Pubkey::default()); + let keypair0_account = AccountSharedData::new(8, 0, &Pubkey::default()); + let keypair1_account = AccountSharedData::new(9, 0, &Pubkey::default()); + let account0 = AccountSharedData::new(11, 0, &&Pubkey::default()); bank0.store_account(&keypair0.pubkey(), &keypair0_account); bank0.store_account(&keypair1.pubkey(), &keypair1_account); bank0.store_account(&pubkey0, &account0); @@ -10316,8 +10446,8 @@ pub(crate) mod tests { let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); let dup_pubkey = from_pubkey; - let from_account = Account::new(100, 1, &mock_program_id); - let to_account = Account::new(0, 1, &mock_program_id); + let from_account = AccountSharedData::new(100, 1, &mock_program_id); + let to_account = AccountSharedData::new(0, 1, &mock_program_id); bank.store_account(&from_pubkey, &from_account); bank.store_account(&to_pubkey, &to_account); @@ -10326,7 +10456,7 @@ pub(crate) mod tests { AccountMeta::new(to_pubkey, false), AccountMeta::new(dup_pubkey, false), ]; - let instruction = Instruction::new(mock_program_id, &10, account_metas); + let instruction = Instruction::new_with_bincode(mock_program_id, &10, account_metas); let tx = Transaction::new_signed_with_payer( &[instruction], Some(&mint_keypair.pubkey()), @@ -10361,8 +10491,8 @@ pub(crate) mod tests { let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); let dup_pubkey = from_pubkey; - let from_account = Account::new(100, 1, &mock_program_id); - let to_account = Account::new(0, 1, &mock_program_id); + let from_account = AccountSharedData::new(100, 1, &mock_program_id); + let to_account = AccountSharedData::new(0, 1, &mock_program_id); bank.store_account(&from_pubkey, &from_account); bank.store_account(&to_pubkey, &to_account); @@ -10372,7 +10502,7 @@ pub(crate) mod tests { AccountMeta::new(dup_pubkey, false), AccountMeta::new(mock_program_id, false), ]; - let instruction = Instruction::new(mock_program_id, &10, account_metas); + let instruction = Instruction::new_with_bincode(mock_program_id, &10, account_metas); let tx = Transaction::new_signed_with_payer( &[instruction], Some(&mint_keypair.pubkey()), @@ -10398,7 +10528,8 @@ pub(crate) mod tests { AccountMeta::new(to_pubkey, false), ]; - let instruction = Instruction::new(solana_vote_program::id(), &10, account_metas); + let instruction = + Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( &[instruction], Some(&mint_keypair.pubkey()), @@ -10468,7 +10599,8 @@ pub(crate) mod tests { mock_ok_vote_processor, ); - let instruction = Instruction::new(solana_vote_program::id(), &10, account_metas); + let instruction = + Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( &[instruction], Some(&mint_keypair.pubkey()), @@ -10501,7 +10633,8 @@ pub(crate) mod tests { mock_ok_vote_processor, ); - let instruction = Instruction::new(solana_vote_program::id(), &10, account_metas); + let instruction = + Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( &[instruction], Some(&mint_keypair.pubkey()), @@ -10558,7 +10691,8 @@ pub(crate) mod tests { mock_ok_vote_processor, ); - let instruction = Instruction::new(solana_vote_program::id(), &10, account_metas); + let instruction = + Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); let mut tx = Transaction::new_signed_with_payer( &[instruction], Some(&mint_keypair.pubkey()), @@ -10605,7 +10739,7 @@ pub(crate) mod tests { }; let space = thread_rng().gen_range(0, 10); let owner = Pubkey::default(); - let account = Account::new(lamports, space, &owner); + let account = AccountSharedData::new(lamports, space, &owner); bank.store_account(&key, &account); lamports } else { @@ -10721,7 +10855,7 @@ pub(crate) mod tests { for (key, name) in &program_keys { let account = bank.get_account(key).unwrap(); assert!(account.executable); - assert_eq!(account.data, *name); + assert_eq!(account.data(), name); } info!("result: {:?}", result); let result_key = format!("{:?}", result); @@ -10737,7 +10871,7 @@ pub(crate) mod tests { let mut genesis_config = GenesisConfig::new( &[( Pubkey::new(&[42; 32]), - Account::new(1_000_000_000_000, 0, &system_program::id()), + AccountSharedData::new(1_000_000_000_000, 0, &system_program::id()), )], &[], ); @@ -10762,19 +10896,19 @@ pub(crate) mod tests { if bank.slot == 32 { assert_eq!( bank.hash().to_string(), - "7x879deKLA5jAVpFTTV7w8wThuePdi1vzHwd5M3tgF83" + "7AkMgAb2v4tuoiSf3NnVgaBxSvp7XidbrSwsPEn4ENTp" ); } if bank.slot == 64 { assert_eq!( bank.hash().to_string(), - "2hDcZFBGCyXbBshR9VfcvFUZpXu3noDiW3L5X2oFX93E" + "2JzWWRBtQgdXboaACBRXNNKsHeBtn57uYmqH1AgGUkdG" ); } if bank.slot == 128 { assert_eq!( bank.hash().to_string(), - "6o6RvvLmmF2xQgX8THLyYGz9S11xpmoVvZmFBXeN1bk8" + "FQnVhDVjhCyfBxFb3bdm3CLiuCePvWuW5TGDsLBZnKAo" ); break; } @@ -10805,11 +10939,11 @@ pub(crate) mod tests { // Add a new program owned by the first let program2_pubkey = solana_sdk::pubkey::new_rand(); - let mut program2_account = Account::new(42, 1, &program1_pubkey); + let mut program2_account = AccountSharedData::new(42, 1, &program1_pubkey); program2_account.executable = true; bank.store_account(&program2_pubkey, &program2_account); - let instruction = Instruction::new(program2_pubkey, &10, vec![]); + let instruction = Instruction::new_with_bincode(program2_pubkey, &10, vec![]); let tx = Transaction::new_signed_with_payer( &[instruction.clone(), instruction], Some(&mint_keypair.pubkey()), @@ -10828,7 +10962,7 @@ pub(crate) mod tests { // of the storage for this slot let mut bank0 = Arc::new(Bank::new_with_config( &genesis_config, - HashSet::new(), + AccountSecondaryIndexes::default(), false, )); bank0.restore_old_behavior_for_fragile_tests(); @@ -10859,12 +10993,16 @@ pub(crate) mod tests { let pubkey2 = solana_sdk::pubkey::new_rand(); // Set root for bank 0, with caching enabled - let mut bank0 = Arc::new(Bank::new_with_config(&genesis_config, HashSet::new(), true)); + let mut bank0 = Arc::new(Bank::new_with_config( + &genesis_config, + AccountSecondaryIndexes::default(), + true, + )); bank0.restore_old_behavior_for_fragile_tests(); let pubkey0_size = get_shrink_account_size(); - let account0 = Account::new(1000, pubkey0_size as usize, &Pubkey::new_unique()); + let account0 = AccountSharedData::new(1000, pubkey0_size as usize, &Pubkey::new_unique()); bank0.store_account(&pubkey0, &account0); goto_end_of_slot(Arc::::get_mut(&mut bank0).unwrap()); @@ -10916,7 +11054,7 @@ pub(crate) mod tests { // No more slots should be shrunk assert_eq!(bank2.shrink_candidate_slots(), 0); // alive_counts represents the count of alive accounts in the three slots 0,1,2 - assert_eq!(alive_counts, vec![11, 1, 7]); + assert_eq!(alive_counts, vec![9, 1, 7]); } #[test] @@ -10964,7 +11102,7 @@ pub(crate) mod tests { .map(|_| bank.process_stale_slot_with_budget(0, force_to_return_alive_account)) .sum(); // consumed_budgets represents the count of alive accounts in the three slots 0,1,2 - assert_eq!(consumed_budgets, 12); + assert_eq!(consumed_budgets, 10); } #[test] @@ -11357,7 +11495,6 @@ pub(crate) mod tests { bank.get_balance(&inline_spl_token_v2_0::native_mint::id()), 0 ); - bank.deposit(&inline_spl_token_v2_0::native_mint::id(), 4200000000); // schedule activation of velas_hardfork_pack which contain spl token reconfigure patch @@ -11397,56 +11534,6 @@ pub(crate) mod tests { assert_eq!(native_mint_account.owner, inline_spl_token_v2_0::id()); } - #[test] - fn test_ensure_no_storage_rewards_pool() { - solana_logger::setup(); - - let mut genesis_config = - create_genesis_config_with_leader(5, &solana_sdk::pubkey::new_rand(), 0).genesis_config; - - // Testnet - Storage rewards pool is purged at epoch 93 - // Also this is with bad capitalization - genesis_config.cluster_type = ClusterType::Testnet; - genesis_config.inflation = Inflation::default(); - let reward_pubkey = solana_sdk::pubkey::new_rand(); - genesis_config.rewards_pools.insert( - reward_pubkey, - Account::new(u64::MAX, 0, &solana_sdk::pubkey::new_rand()), - ); - let bank0 = Bank::new(&genesis_config); - // because capitalization has been reset with bogus capitalization calculation allowing overflows, - // deliberately substract 1 lamport to simulate it - bank0.capitalization.fetch_sub(1, Relaxed); - let bank0 = Arc::new(bank0); - assert_eq!(bank0.get_balance(&reward_pubkey), u64::MAX,); - - let bank1 = Bank::new_from_parent( - &bank0, - &Pubkey::default(), - genesis_config.epoch_schedule.get_first_slot_in_epoch(93), - ); - - // assert that everything gets in order.... - assert!(bank1.get_account(&reward_pubkey).is_none()); - let sysvar_and_native_proram_delta = 1; - assert_eq!( - bank0.capitalization() + 1 + 1_000_000_000 + sysvar_and_native_proram_delta, - bank1.capitalization() - ); - assert_eq!(bank1.capitalization(), bank1.calculate_capitalization()); - - // Depending on RUSTFLAGS, this test exposes rust's checked math behavior or not... - // So do some convolted setup; anyway this test itself will just be temporary - let bank0 = std::panic::AssertUnwindSafe(bank0); - let overflowing_capitalization = - std::panic::catch_unwind(|| bank0.calculate_capitalization()); - if let Ok(overflowing_capitalization) = overflowing_capitalization { - info!("asserting overflowing capitalization for bank0"); - assert_eq!(overflowing_capitalization, bank0.capitalization()); - } else { - info!("NOT-asserting overflowing capitalization for bank0"); - } - } #[derive(Debug)] struct TestExecutor {} impl Executor for TestExecutor { @@ -11523,8 +11610,11 @@ pub(crate) mod tests { }; let loaders = &[ - vec![(key3, Account::default()), (key4, Account::default())], - vec![(key1, Account::default())], + vec![ + (key3, AccountSharedData::default()), + (key4, AccountSharedData::default()), + ], + vec![(key1, AccountSharedData::default())], ]; // don't do any work if not dirty @@ -11586,7 +11676,10 @@ pub(crate) mod tests { let key2 = solana_sdk::pubkey::new_rand(); let executor: Arc = Arc::new(TestExecutor {}); - let loaders = &[vec![(key1, Account::default()), (key2, Account::default())]]; + let loaders = &[vec![ + (key1, AccountSharedData::default()), + (key2, AccountSharedData::default()), + ]]; // add one to root bank let mut executors = Executors::default(); @@ -11685,20 +11778,18 @@ pub(crate) mod tests { // Setup original token account bank.store_account_and_update_capitalization( &inline_spl_token_v2_0::id(), - &Account { + &AccountSharedData::from(Account { lamports: 100, ..Account::default() - }, + }), ); assert_eq!(bank.get_balance(&inline_spl_token_v2_0::id()), 100); // Setup new token account - let new_token_account = Account { + let new_token_account = AccountSharedData::from(Account { lamports: 123, - data: vec![1, 2, 3], - executable: true, ..Account::default() - }; + }); bank.store_account_and_update_capitalization( &inline_spl_token_v2_0::new_token_program::id(), &new_token_account, @@ -11903,7 +11994,7 @@ pub(crate) mod tests { update_vote_account_timestamp( BlockTimestamp { slot: bank.slot(), - timestamp: recent_timestamp + additional_secs, + timestamp: bank.unix_timestamp_from_genesis() - 1, }, &bank, &voting_keypair.pubkey(), @@ -11980,7 +12071,11 @@ pub(crate) mod tests { let bank = Bank::new(&genesis_config); let tx = Transaction::new_signed_with_payer( - &[Instruction::new(native_loader::id(), &(), vec![])], + &[Instruction::new_with_bincode( + native_loader::id(), + &(), + vec![], + )], Some(&mint_keypair.pubkey()), &[&mint_keypair], bank.last_blockhash(), @@ -12009,7 +12104,7 @@ pub(crate) mod tests { 0, &native_loader::id(), ), - Instruction::new( + Instruction::new_with_bincode( native_loader::id(), &(), vec![AccountMeta::new(to_keypair.pubkey(), false)], @@ -12036,7 +12131,7 @@ pub(crate) mod tests { 100, &native_loader::id(), ), - Instruction::new( + Instruction::new_with_bincode( native_loader::id(), &(), vec![AccountMeta::new(to_keypair.pubkey(), false)], @@ -12079,7 +12174,7 @@ pub(crate) mod tests { genesis_config.rent = Rent::free(); let bank0 = Arc::new(Bank::new_with_config( &genesis_config, - HashSet::new(), + AccountSecondaryIndexes::default(), accounts_db_caching_enabled, )); @@ -12091,7 +12186,7 @@ pub(crate) mod tests { .collect(); let program_id = system_program::id(); let starting_lamports = 1; - let starting_account = Account::new(starting_lamports, 0, &program_id); + let starting_account = AccountSharedData::new(starting_lamports, 0, &program_id); // Write accounts to the store for key in &all_pubkeys { @@ -12196,7 +12291,7 @@ pub(crate) mod tests { &solana_sdk::pubkey::new_rand(), current_minor_fork_bank.slot() + 2, )); - let account = Account::new(lamports, 0, &program_id); + let account = AccountSharedData::new(lamports, 0, &program_id); // Write partial updates to each of the banks in the minor fork so if any of them // get cleaned up, there will be keys with the wrong account value/missing. for key in pubkeys_to_modify { @@ -12222,7 +12317,7 @@ pub(crate) mod tests { current_minor_fork_bank.slot() - 1, )); let lamports = current_major_fork_bank.slot() + starting_lamports + 1; - let account = Account::new(lamports, 0, &program_id); + let account = AccountSharedData::new(lamports, 0, &program_id); for key in pubkeys_to_modify.iter() { // Store rooted updates to these pubkeys such that the minor // fork updates to the same keys will be deleted by clean @@ -12268,7 +12363,7 @@ pub(crate) mod tests { let mut prev_bank = bank0; loop { let lamports_this_round = current_bank.slot() + starting_lamports + 1; - let account = Account::new(lamports_this_round, 0, &program_id); + let account = AccountSharedData::new(lamports_this_round, 0, &program_id); for key in pubkeys_to_modify.iter() { current_bank.store_account(key, &account); } @@ -12322,7 +12417,7 @@ pub(crate) mod tests { } #[test] - fn test_get_inflation_start_slot() { + fn test_get_inflation_start_slot_devnet_testnet() { let GenesisConfigInfo { mut genesis_config, .. } = create_genesis_config_with_leader(42, &solana_sdk::pubkey::new_rand(), 42); @@ -12335,8 +12430,8 @@ pub(crate) mod tests { .remove(&feature_set::full_inflation::devnet_and_testnet_velas_mainnet::id()) .unwrap(); for pair in feature_set::FULL_INFLATION_FEATURE_PAIRS.iter() { - let _ = genesis_config.accounts.remove(&pair.vote_id); - let _ = genesis_config.accounts.remove(&pair.enable_id); + genesis_config.accounts.remove(&pair.vote_id).unwrap(); + genesis_config.accounts.remove(&pair.enable_id).unwrap(); } let bank = Bank::new(&genesis_config); @@ -12402,6 +12497,91 @@ pub(crate) mod tests { assert_eq!(bank.get_inflation_start_slot(), 2); } + #[test] + fn test_get_inflation_start_slot_mainnet() { + let GenesisConfigInfo { + mut genesis_config, .. + } = create_genesis_config_with_leader(42, &solana_sdk::pubkey::new_rand(), 42); + genesis_config + .accounts + .remove(&feature_set::pico_inflation::id()) + .unwrap(); + genesis_config + .accounts + .remove(&feature_set::full_inflation::devnet_and_testnet_velas_mainnet::id()) + .unwrap(); + for pair in feature_set::FULL_INFLATION_FEATURE_PAIRS.iter() { + genesis_config.accounts.remove(&pair.vote_id).unwrap(); + genesis_config.accounts.remove(&pair.enable_id).unwrap(); + } + + let bank = Bank::new(&genesis_config); + + // Advance slot + let mut bank = new_from_parent(&Arc::new(bank)); + bank = new_from_parent(&Arc::new(bank)); + assert_eq!(bank.get_inflation_start_slot(), 0); + assert_eq!(bank.slot(), 2); + + // Request `pico_inflation` activation + bank.store_account( + &feature_set::pico_inflation::id(), + &feature::create_account( + &Feature { + activated_at: Some(1), + }, + 42, + ), + ); + bank.compute_active_feature_set(true); + assert_eq!(bank.get_inflation_start_slot(), 1); + + // Advance slot + bank = new_from_parent(&Arc::new(bank)); + assert_eq!(bank.slot(), 3); + + // Request `full_inflation::mainnet::certusone` activation, + // which takes priority over pico_inflation + bank.store_account( + &feature_set::full_inflation::mainnet::certusone::vote::id(), + &feature::create_account( + &Feature { + activated_at: Some(2), + }, + 42, + ), + ); + bank.store_account( + &feature_set::full_inflation::mainnet::certusone::enable::id(), + &feature::create_account( + &Feature { + activated_at: Some(2), + }, + 42, + ), + ); + bank.compute_active_feature_set(true); + assert_eq!(bank.get_inflation_start_slot(), 2); + + // Advance slot + bank = new_from_parent(&Arc::new(bank)); + assert_eq!(bank.slot(), 4); + + // Request `full_inflation::devnet_and_testnet_velas_mainnet` activation, + // which should have no effect on `get_inflation_start_slot` + bank.store_account( + &feature_set::full_inflation::devnet_and_testnet_velas_mainnet::id(), + &feature::create_account( + &Feature { + activated_at: Some(bank.slot()), + }, + 42, + ), + ); + bank.compute_active_feature_set(true); + assert_eq!(bank.get_inflation_start_slot(), 2); + } + #[test] fn test_get_inflation_num_slots_with_activations() { let GenesisConfigInfo { @@ -12418,8 +12598,8 @@ pub(crate) mod tests { .remove(&feature_set::full_inflation::devnet_and_testnet_velas_mainnet::id()) .unwrap(); for pair in feature_set::FULL_INFLATION_FEATURE_PAIRS.iter() { - let _ = genesis_config.accounts.remove(&pair.vote_id); - let _ = genesis_config.accounts.remove(&pair.enable_id); + genesis_config.accounts.remove(&pair.vote_id).unwrap(); + genesis_config.accounts.remove(&pair.enable_id).unwrap(); } let mut bank = Bank::new(&genesis_config); @@ -12551,9 +12731,9 @@ pub(crate) mod tests { mint_keypair, .. } = create_genesis_config_with_leader( - 1_000_000_000_000_000 + 2 * min_stake, + 1_000_000_000_000_000, &Pubkey::new_unique(), - min_stake, + bootstrap_validator_stake_lamports(), ); let bank = Arc::new(Bank::new(&genesis_config)); @@ -12582,7 +12762,7 @@ pub(crate) mod tests { &vote_keypair.pubkey(), &Authorized::auto(&mint_keypair.pubkey()), &Lockup::default(), - min_stake, + 1_000_000_000_000, ) .into_iter(), ); @@ -12707,15 +12887,15 @@ pub(crate) mod tests { .collect(); // Initialize accounts; all have larger SOL balances than current Bank built-ins - let account0 = Account::new(pubkeys_balances[0].1, 0, &Pubkey::default()); + let account0 = AccountSharedData::new(pubkeys_balances[0].1, 0, &Pubkey::default()); bank.store_account(&pubkeys_balances[0].0, &account0); - let account1 = Account::new(pubkeys_balances[1].1, 0, &Pubkey::default()); + let account1 = AccountSharedData::new(pubkeys_balances[1].1, 0, &Pubkey::default()); bank.store_account(&pubkeys_balances[1].0, &account1); - let account2 = Account::new(pubkeys_balances[2].1, 0, &Pubkey::default()); + let account2 = AccountSharedData::new(pubkeys_balances[2].1, 0, &Pubkey::default()); bank.store_account(&pubkeys_balances[2].0, &account2); - let account3 = Account::new(pubkeys_balances[3].1, 0, &Pubkey::default()); + let account3 = AccountSharedData::new(pubkeys_balances[3].1, 0, &Pubkey::default()); bank.store_account(&pubkeys_balances[3].0, &account3); - let account4 = Account::new(pubkeys_balances[4].1, 0, &Pubkey::default()); + let account4 = AccountSharedData::new(pubkeys_balances[4].1, 0, &Pubkey::default()); bank.store_account(&pubkeys_balances[4].0, &account4); // Create HashSet to exclude an account @@ -12785,4 +12965,93 @@ pub(crate) mod tests { vec![pubkeys_balances[3], pubkeys_balances[1]] ); } + + #[test] + fn test_transfer_sysvar() { + solana_logger::setup(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config_with_leader( + 1_000_000_000_000_000, + &Pubkey::new_unique(), + bootstrap_validator_stake_lamports(), + ); + let mut bank = Bank::new(&genesis_config); + + fn mock_ix_processor( + _pubkey: &Pubkey, + ka: &[KeyedAccount], + _data: &[u8], + _invoke_context: &mut dyn InvokeContext, + ) -> std::result::Result<(), InstructionError> { + use solana_sdk::account::WritableAccount; + let mut data = ka[1].try_account_ref_mut()?; + data.data_as_mut_slice()[0] = 5; + Ok(()) + } + + let program_id = solana_sdk::pubkey::new_rand(); + bank.add_builtin("mock_program1", program_id, mock_ix_processor); + + let blockhash = bank.last_blockhash(); + let blockhash_sysvar = sysvar::recent_blockhashes::id(); + let orig_lamports = bank + .get_account(&sysvar::recent_blockhashes::id()) + .unwrap() + .lamports; + info!("{:?}", bank.get_account(&sysvar::recent_blockhashes::id())); + let tx = system_transaction::transfer(&mint_keypair, &blockhash_sysvar, 10, blockhash); + assert_eq!( + bank.process_transaction(&tx), + Err(TransactionError::InstructionError( + 0, + InstructionError::ReadonlyLamportChange + )) + ); + assert_eq!( + bank.get_account(&sysvar::recent_blockhashes::id()) + .unwrap() + .lamports, + orig_lamports + ); + info!("{:?}", bank.get_account(&sysvar::recent_blockhashes::id())); + + let accounts = vec![ + AccountMeta::new(mint_keypair.pubkey(), true), + AccountMeta::new(blockhash_sysvar, false), + ]; + let ix = Instruction::new_with_bincode(program_id, &0, accounts); + let message = Message::new(&[ix], Some(&mint_keypair.pubkey())); + let tx = Transaction::new(&[&mint_keypair], message, blockhash); + assert_eq!( + bank.process_transaction(&tx), + Err(TransactionError::InstructionError( + 0, + InstructionError::ReadonlyDataModified + )) + ); + } + + #[test] + fn test_rent_debits() { + let mut rent_debits = RentDebits::default(); + + // No entry for 0 rewards + rent_debits.push(&Pubkey::default(), 0, 0); + assert_eq!(rent_debits.0.len(), 0); + + // Doesn't fit an `i64`, no entry. (we'll die elsewhere) + rent_debits.push(&Pubkey::default(), u64::MAX, 0); + assert_eq!(rent_debits.0.len(), 0); + + // Since we're casting from `u64` the `i64::checked_neg()` is infallible + + // Some that actually work + rent_debits.push(&Pubkey::default(), 1, 0); + assert_eq!(rent_debits.0.len(), 1); + rent_debits.push(&Pubkey::default(), i64::MAX as u64, 0); + assert_eq!(rent_debits.0.len(), 2); + } } diff --git a/runtime/src/bank_client.rs b/runtime/src/bank_client.rs index a65e8ef3bf..111cbad6f1 100644 --- a/runtime/src/bank_client.rs +++ b/runtime/src/bank_client.rs @@ -127,7 +127,7 @@ impl SyncClient for BankClient { } fn get_account(&self, pubkey: &Pubkey) -> Result> { - Ok(self.bank.get_account(pubkey)) + Ok(self.bank.get_account(pubkey).map(Account::from)) } fn get_account_with_commitment( @@ -135,7 +135,7 @@ impl SyncClient for BankClient { pubkey: &Pubkey, _commitment_config: CommitmentConfig, ) -> Result> { - Ok(self.bank.get_account(pubkey)) + Ok(self.bank.get_account(pubkey).map(Account::from)) } fn get_balance(&self, pubkey: &Pubkey) -> Result { diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index fd950b21cf..fda46e11c9 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -6,7 +6,7 @@ use crate::{ }; use log::*; use solana_metrics::inc_new_counter_info; -use solana_sdk::{clock::Slot, timing}; +use solana_sdk::{clock::Slot, hash::Hash, timing}; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, ops::Index, @@ -106,6 +106,17 @@ impl BankForks { self.banks.get(&bank_slot) } + pub fn get_with_checked_hash( + &self, + (bank_slot, expected_hash): (Slot, Hash), + ) -> Option<&Arc> { + let maybe_bank = self.banks.get(&bank_slot); + if let Some(bank) = maybe_bank { + assert_eq!(bank.hash(), expected_hash); + } + maybe_bank + } + pub fn root_bank(&self) -> Arc { self[self.root()].clone() } @@ -225,12 +236,6 @@ impl BankForks { let parents = root_bank.parents(); banks.extend(parents.iter()); for bank in banks.iter() { - // bank.evm_state - // .write() - // .expect("evm state was poisoned") - // .dump_all() - // .expect("internal evm state error"); - let bank_slot = bank.slot(); if bank.block_height() % self.accounts_hash_interval_slots == 0 && bank_slot > self.last_accounts_hash_slot diff --git a/runtime/src/epoch_stakes.rs b/runtime/src/epoch_stakes.rs index 02d14f92c5..3185d4bdb3 100644 --- a/runtime/src/epoch_stakes.rs +++ b/runtime/src/epoch_stakes.rs @@ -116,13 +116,13 @@ impl EpochStakes { #[cfg(test)] pub(crate) mod tests { use super::*; - use solana_sdk::account::Account; + use solana_sdk::account::AccountSharedData; use solana_vote_program::vote_state::create_account_with_authorized; use std::iter; struct VoteAccountInfo { vote_account: Pubkey, - account: Account, + account: AccountSharedData, authorized_voter: Pubkey, } diff --git a/runtime/src/genesis_utils.rs b/runtime/src/genesis_utils.rs index ad610d532b..73b57d9492 100644 --- a/runtime/src/genesis_utils.rs +++ b/runtime/src/genesis_utils.rs @@ -1,5 +1,5 @@ use solana_sdk::{ - account::Account, + account::{Account, AccountSharedData}, feature::{self, Feature}, feature_set::FeatureSet, fee_calculator::FeeRateGovernor, @@ -122,8 +122,8 @@ pub fn create_genesis_config_with_vote_accounts_and_cluster_type( // Put newly created accounts into genesis genesis_config_info.genesis_config.accounts.extend(vec![ (node_pubkey, node_account), - (vote_pubkey, vote_account), - (stake_pubkey, stake_account), + (vote_pubkey, Account::from(vote_account)), + (stake_pubkey, Account::from(stake_account)), ]); } @@ -163,12 +163,12 @@ pub fn activate_velas_features_on_prod(genesis_config: &mut GenesisConfig) { for feature_id in (*solana_sdk::feature_set::FEATURE_NAMES_BEFORE_MAINNET).keys() { genesis_config.accounts.insert( *feature_id, - feature::create_account( + Account::from(feature::create_account( &Feature { activated_at: Some(0), }, std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1), - ), + )), ); } } @@ -178,12 +178,12 @@ pub fn activate_all_features(genesis_config: &mut GenesisConfig) { for feature_id in FeatureSet::default().inactive { genesis_config.accounts.insert( feature_id, - feature::create_account( + Account::from(feature::create_account( &Feature { activated_at: Some(0), }, std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1), - ), + )), ); } } @@ -200,7 +200,7 @@ pub fn create_genesis_config_with_leader_ex( fee_rate_governor: FeeRateGovernor, rent: Rent, cluster_type: ClusterType, - mut initial_accounts: Vec<(Pubkey, Account)>, + mut initial_accounts: Vec<(Pubkey, AccountSharedData)>, ) -> GenesisConfig { let validator_vote_account = vote_state::create_account( &validator_vote_account_pubkey, @@ -219,17 +219,28 @@ pub fn create_genesis_config_with_leader_ex( initial_accounts.push(( *mint_pubkey, - Account::new(mint_lamports, 0, &system_program::id()), + AccountSharedData::new(mint_lamports, 0, &system_program::id()), )); initial_accounts.push(( *validator_pubkey, - Account::new(validator_lamports, 0, &system_program::id()), + AccountSharedData::new(validator_lamports, 0, &system_program::id()), )); - initial_accounts.push((*validator_vote_account_pubkey, validator_vote_account)); - initial_accounts.push((*validator_stake_account_pubkey, validator_stake_account)); + initial_accounts.push(( + *validator_vote_account_pubkey, + AccountSharedData::from(validator_vote_account), + )); + initial_accounts.push(( + *validator_stake_account_pubkey, + AccountSharedData::from(validator_stake_account), + )); + + let accounts = initial_accounts + .into_iter() + .map(|(key, account)| (key, Account::from(account))) + .collect(); let mut genesis_config = GenesisConfig { - accounts: initial_accounts.iter().cloned().collect(), + accounts, fee_rate_governor, rent, cluster_type, diff --git a/runtime/src/hardened_unpack.rs b/runtime/src/hardened_unpack.rs index f872627657..8f18d5cd39 100644 --- a/runtime/src/hardened_unpack.rs +++ b/runtime/src/hardened_unpack.rs @@ -1,20 +1,24 @@ -use bzip2::bufread::BzDecoder; -use log::*; -use solana_sdk::genesis_config::GenesisConfig; -use std::{ - fs::{self, File}, - io::{BufReader, Read}, - path::{ - Component::{CurDir, Normal}, - Path, +use { + bzip2::bufread::BzDecoder, + log::*, + rand::{thread_rng, Rng}, + solana_sdk::genesis_config::GenesisConfig, + std::{ + collections::HashMap, + fs::{self, File}, + io::{BufReader, Read}, + path::{ + Component::{CurDir, Normal}, + Path, PathBuf, + }, + time::Instant, }, - time::Instant, -}; -use tar::{ - Archive, - EntryType::{Directory, GNUSparse, Regular}, + tar::{ + Archive, + EntryType::{Directory, GNUSparse, Regular}, + }, + thiserror::Error, }; -use thiserror::Error; #[derive(Error, Debug)] pub enum UnpackError { @@ -79,16 +83,15 @@ fn check_unpack_result(unpack_result: bool, path: String) -> Result<()> { Ok(()) } -fn unpack_archive, C>( +fn unpack_archive<'a, A: Read, C>( archive: &mut Archive, - unpack_dir: P, apparent_limit_size: u64, actual_limit_size: u64, limit_count: u64, - entry_checker: C, + mut entry_checker: C, ) -> Result<()> where - C: Fn(&[&str], tar::EntryType) -> bool, + C: FnMut(&[&str], tar::EntryType) -> Option<&'a Path>, { let mut apparent_total_size: u64 = 0; let mut actual_total_size: u64 = 0; @@ -110,7 +113,14 @@ where Normal(c) => c.to_str(), _ => None, // Prefix (for Windows) and RootDir are forbidden }); - if parts.clone().any(|p| p.is_none()) { + + // Reject old-style BSD directory entries that aren't explicitly tagged as directories + let legacy_dir_entry = + entry.header().as_ustar().is_none() && entry.path_bytes().ends_with(b"/"); + let kind = entry.header().entry_type(); + let reject_legacy_dir_entry = legacy_dir_entry && (kind != Directory); + + if parts.clone().any(|p| p.is_none()) || reject_legacy_dir_entry { return Err(UnpackError::Archive(format!( "invalid path found: {:?}", path_str @@ -118,13 +128,17 @@ where } let parts: Vec<_> = parts.map(|p| p.unwrap()).collect(); - if !entry_checker(parts.as_slice(), entry.header().entry_type()) { - return Err(UnpackError::Archive(format!( - "extra entry found: {:?} {:?}", - path_str, - entry.header().entry_type(), - ))); - } + let unpack_dir = match entry_checker(parts.as_slice(), kind) { + None => { + return Err(UnpackError::Archive(format!( + "extra entry found: {:?} {:?}", + path_str, + entry.header().entry_type(), + ))); + } + Some(unpack_dir) => unpack_dir, + }; + apparent_total_size = checked_total_size_sum( apparent_total_size, entry.header().size()?, @@ -139,7 +153,15 @@ where // unpack_in does its own sanitization // ref: https://docs.rs/tar/*/tar/struct.Entry.html#method.unpack_in - check_unpack_result(entry.unpack_in(&unpack_dir)?, path_str)?; + check_unpack_result(entry.unpack_in(unpack_dir)?, path_str)?; + + // Sanitize permissions. + let mode = match entry.header().entry_type() { + GNUSparse | Regular => 0o644, + _ => 0o755, + }; + set_perms(&unpack_dir.join(entry.path()?), mode)?; + total_entries += 1; let now = Instant::now(); if now.duration_since(last_log_update).as_secs() >= 10 { @@ -149,21 +171,59 @@ where } info!("unpacked {} entries total", total_entries); - Ok(()) + return Ok(()); + + #[cfg(unix)] + fn set_perms(dst: &Path, mode: u32) -> std::io::Result<()> { + use std::os::unix::fs::PermissionsExt; + + let perm = fs::Permissions::from_mode(mode as _); + fs::set_permissions(dst, perm) + } + + #[cfg(windows)] + fn set_perms(dst: &Path, _mode: u32) -> std::io::Result<()> { + let mut perm = fs::metadata(dst)?.permissions(); + perm.set_readonly(false); + fs::set_permissions(dst, perm) + } } -pub fn unpack_snapshot>( +// Map from AppendVec file name to unpacked file system location +pub type UnpackedAppendVecMap = HashMap; + +pub fn unpack_snapshot( archive: &mut Archive, - unpack_dir: P, -) -> Result<()> { + ledger_dir: &Path, + account_paths: &[PathBuf], +) -> Result { + assert!(!account_paths.is_empty()); + let mut unpacked_append_vec_map = UnpackedAppendVecMap::new(); + unpack_archive( archive, - unpack_dir, MAX_SNAPSHOT_ARCHIVE_UNPACKED_APPARENT_SIZE, MAX_SNAPSHOT_ARCHIVE_UNPACKED_ACTUAL_SIZE, MAX_SNAPSHOT_ARCHIVE_UNPACKED_COUNT, - is_valid_snapshot_archive_entry, + |parts, kind| { + if is_valid_snapshot_archive_entry(parts, kind) { + if let ["accounts", file] = parts { + // Randomly distribute the accounts files about the available `account_paths`, + let path_index = thread_rng().gen_range(0, account_paths.len()); + account_paths.get(path_index).map(|path_buf| { + unpacked_append_vec_map + .insert(file.to_string(), path_buf.join("accounts").join(file)); + path_buf.as_path() + }) + } else { + Some(ledger_dir) + } + } else { + None + } + }, ) + .map(|_| unpacked_append_vec_map) } fn all_digits(v: &str) -> bool { @@ -266,18 +326,23 @@ pub fn unpack_genesis_archive( Ok(()) } -fn unpack_genesis>( +fn unpack_genesis( archive: &mut Archive, - unpack_dir: P, + unpack_dir: &Path, max_genesis_archive_unpacked_size: u64, ) -> Result<()> { unpack_archive( archive, - unpack_dir, max_genesis_archive_unpacked_size, max_genesis_archive_unpacked_size, MAX_GENESIS_ARCHIVE_UNPACKED_COUNT, - is_valid_genesis_archive_entry, + |p, k| { + if is_valid_genesis_archive_entry(p, k) { + Some(unpack_dir) + } else { + None + } + }, ) } @@ -422,6 +487,10 @@ mod tests { &["genesis.bin"], tar::EntryType::Regular )); + assert!(is_valid_genesis_archive_entry( + &["genesis.bin"], + tar::EntryType::GNUSparse, + )); assert!(is_valid_genesis_archive_entry( &["rocksdb"], tar::EntryType::Directory @@ -431,14 +500,42 @@ mod tests { tar::EntryType::Regular )); assert!(is_valid_genesis_archive_entry( - &["rocksdb", "foo", "bar"], - tar::EntryType::Regular + &["rocksdb", "foo"], + tar::EntryType::GNUSparse, )); assert!(!is_valid_genesis_archive_entry( &["aaaa"], tar::EntryType::Regular )); + assert!(!is_valid_genesis_archive_entry( + &["aaaa"], + tar::EntryType::GNUSparse, + )); + assert!(!is_valid_genesis_archive_entry( + &["rocksdb"], + tar::EntryType::Regular + )); + assert!(!is_valid_genesis_archive_entry( + &["rocksdb"], + tar::EntryType::GNUSparse, + )); + assert!(!is_valid_genesis_archive_entry( + &["rocksdb", "foo"], + tar::EntryType::Directory, + )); + assert!(!is_valid_genesis_archive_entry( + &["rocksdb", "foo", "bar"], + tar::EntryType::Directory, + )); + assert!(!is_valid_genesis_archive_entry( + &["rocksdb", "foo", "bar"], + tar::EntryType::Regular + )); + assert!(!is_valid_genesis_archive_entry( + &["rocksdb", "foo", "bar"], + tar::EntryType::GNUSparse + )); } fn with_finalize_and_unpack(archive: tar::Builder>, checker: C) -> Result<()> @@ -450,11 +547,17 @@ mod tests { let mut archive: Archive> = Archive::new(reader); let temp_dir = tempfile::TempDir::new().unwrap(); - checker(&mut archive, &temp_dir.into_path()) + checker(&mut archive, temp_dir.path())?; + // Check that there is no bad permissions preventing deletion. + let result = temp_dir.close(); + assert_matches!(result, Ok(())); + Ok(()) } fn finalize_and_unpack_snapshot(archive: tar::Builder>) -> Result<()> { - with_finalize_and_unpack(archive, |a, b| unpack_snapshot(a, b)) + with_finalize_and_unpack(archive, |a, b| { + unpack_snapshot(a, b, &[PathBuf::new()]).map(|_| ()) + }) } fn finalize_and_unpack_genesis(archive: tar::Builder>) -> Result<()> { @@ -495,6 +598,65 @@ mod tests { assert_matches!(result, Ok(())); } + #[test] + fn test_archive_unpack_genesis_bad_perms() { + let mut archive = Builder::new(Vec::new()); + + let mut header = Header::new_gnu(); + header.set_path("rocksdb").unwrap(); + header.set_entry_type(Directory); + header.set_size(0); + header.set_cksum(); + let data: &[u8] = &[]; + archive.append(&header, data).unwrap(); + + let mut header = Header::new_gnu(); + header.set_path("rocksdb/test").unwrap(); + header.set_size(4); + header.set_cksum(); + let data: &[u8] = &[1, 2, 3, 4]; + archive.append(&header, data).unwrap(); + + // Removing all permissions makes it harder to delete this directory + // or work with files inside it. + let mut header = Header::new_gnu(); + header.set_path("rocksdb").unwrap(); + header.set_entry_type(Directory); + header.set_mode(0o000); + header.set_size(0); + header.set_cksum(); + let data: &[u8] = &[]; + archive.append(&header, data).unwrap(); + + let result = finalize_and_unpack_genesis(archive); + assert_matches!(result, Ok(())); + } + + #[test] + fn test_archive_unpack_genesis_bad_rocksdb_subdir() { + let mut archive = Builder::new(Vec::new()); + + let mut header = Header::new_gnu(); + header.set_path("rocksdb").unwrap(); + header.set_entry_type(Directory); + header.set_size(0); + header.set_cksum(); + let data: &[u8] = &[]; + archive.append(&header, data).unwrap(); + + // tar-rs treats following entry as a Directory to support old tar formats. + let mut header = Header::new_gnu(); + header.set_path("rocksdb/test/").unwrap(); + header.set_entry_type(Regular); + header.set_size(0); + header.set_cksum(); + let data: &[u8] = &[]; + archive.append(&header, data).unwrap(); + + let result = finalize_and_unpack_genesis(archive); + assert_matches!(result, Err(UnpackError::Archive(ref message)) if message == "invalid path found: \"rocksdb/test/\""); + } + #[test] fn test_archive_unpack_snapshot_invalid_path() { let mut header = Header::new_gnu(); diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index fd7089aa8b..7c5c627904 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -4,6 +4,7 @@ pub mod accounts; pub mod accounts_background_service; pub mod accounts_cache; pub mod accounts_db; +pub mod accounts_hash; pub mod accounts_index; pub mod append_vec; pub mod bank; diff --git a/runtime/src/loader_utils.rs b/runtime/src/loader_utils.rs index 4fa5343e8b..e2dfdd3979 100644 --- a/runtime/src/loader_utils.rs +++ b/runtime/src/loader_utils.rs @@ -199,5 +199,5 @@ pub fn create_invoke_instruction( data: &T, ) -> Instruction { let account_metas = vec![AccountMeta::new(from_pubkey, true)]; - Instruction::new(program_id, data, account_metas) + Instruction::new_with_bincode(program_id, data, account_metas) } diff --git a/runtime/src/message_processor.rs b/runtime/src/message_processor.rs index 67c2247f33..9652075531 100644 --- a/runtime/src/message_processor.rs +++ b/runtime/src/message_processor.rs @@ -1,15 +1,18 @@ use crate::{ - instruction_recorder::InstructionRecorder, log_collector::LogCollector, - native_loader::NativeLoader, rent_collector::RentCollector, + accounts::Accounts, accounts_index::Ancestors, instruction_recorder::InstructionRecorder, + log_collector::LogCollector, native_loader::NativeLoader, rent_collector::RentCollector, }; use log::*; use serde::{Deserialize, Serialize}; use solana_evm_loader_program::EvmProcessor; use solana_sdk::{ - account::Account, + account::{AccountSharedData, ReadableAccount, WritableAccount}, account_utils::StateMut, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, - feature_set::{instructions_sysvar_enabled, track_writable_deescalation, FeatureSet}, + feature_set::{ + cpi_share_ro_and_exec_accounts, demote_sysvar_write_locks, instructions_sysvar_enabled, + FeatureSet, + }, ic_msg, instruction::{CompiledInstruction, Instruction, InstructionError}, keyed_account::{create_keyed_readonly_accounts, KeyedAccount}, @@ -22,6 +25,7 @@ use solana_sdk::{ pubkey::Pubkey, rent::Rent, system_program, + sysvar::instructions, transaction::TransactionError, }; use std::{ @@ -53,47 +57,68 @@ impl Executors { } } +#[derive(Default, Debug)] +pub struct ExecuteDetailsTimings { + pub serialize_us: u64, + pub create_vm_us: u64, + pub execute_us: u64, + pub deserialize_us: u64, + pub changed_account_count: u64, + pub total_account_count: u64, + pub total_data_size: usize, + pub data_size_changed: usize, +} + +impl ExecuteDetailsTimings { + pub fn accumulate(&mut self, other: &ExecuteDetailsTimings) { + self.serialize_us += other.serialize_us; + self.create_vm_us += other.create_vm_us; + self.execute_us += other.execute_us; + self.deserialize_us += other.deserialize_us; + self.changed_account_count += other.changed_account_count; + self.total_account_count += other.total_account_count; + self.total_data_size += other.total_data_size; + self.data_size_changed += other.data_size_changed; + } +} + // The relevant state of an account before an Instruction executes, used // to verify account integrity after the Instruction completes #[derive(Clone, Debug, Default)] pub struct PreAccount { key: Pubkey, - is_writable: bool, - account: RefCell, + account: Rc>, + changed: bool, } impl PreAccount { - pub fn new(key: &Pubkey, account: &Account, is_writable: bool) -> Self { + pub fn new(key: &Pubkey, account: &AccountSharedData) -> Self { Self { key: *key, - is_writable, - account: RefCell::new(account.clone()), + account: Rc::new(RefCell::new(account.clone())), + changed: false, } } pub fn verify( &self, program_id: &Pubkey, - is_writable: Option, + is_writable: bool, rent: &Rent, - post: &Account, + post: &AccountSharedData, + timings: &mut ExecuteDetailsTimings, ) -> Result<(), InstructionError> { let pre = self.account.borrow(); - let is_writable = if let Some(is_writable) = is_writable { - is_writable - } else { - self.is_writable - }; - // Only the owner of the account may change owner and // only if the account is writable and // only if the account is not executable and // only if the data is zero-initialized or empty - if pre.owner != post.owner + let owner_changed = pre.owner != post.owner; + if owner_changed && (!is_writable // line coverage used to get branch coverage || pre.executable || *program_id != pre.owner - || !Self::is_zeroed(&post.data)) + || !Self::is_zeroed(&post.data())) { return Err(InstructionError::ModifiedProgramId); } @@ -106,7 +131,8 @@ impl PreAccount { } // The balance of read-only and executable accounts may not change - if pre.lamports != post.lamports { + let lamports_changed = pre.lamports != post.lamports; + if lamports_changed { if !is_writable { return Err(InstructionError::ReadonlyLamportChange); } @@ -117,7 +143,8 @@ impl PreAccount { // Only the system program can change the size of the data // and only if the system program owns the account - if pre.data.len() != post.data.len() + let data_len_changed = pre.data().len() != post.data().len(); + if data_len_changed && (!system_program::check_id(program_id) // line coverage used to get branch coverage || !system_program::check_id(&pre.owner)) { @@ -130,7 +157,7 @@ impl PreAccount { if !(*program_id == pre.owner && is_writable // line coverage used to get branch coverage && !pre.executable) - && pre.data != post.data + && pre.data() != post.data() { if pre.executable { return Err(InstructionError::ExecutableDataModified); @@ -142,8 +169,9 @@ impl PreAccount { } // executable is one-way (false->true) and only the account owner may set it. - if pre.executable != post.executable { - if !rent.is_exempt(post.lamports, post.data.len()) { + let executable_changed = pre.executable != post.executable; + if executable_changed { + if !rent.is_exempt(post.lamports, post.data().len()) { return Err(InstructionError::ExecutableAccountNotRentExempt); } if !is_writable // line coverage used to get branch coverage @@ -155,26 +183,42 @@ impl PreAccount { } // No one modifies rent_epoch (yet). - if pre.rent_epoch != post.rent_epoch { + let rent_epoch_changed = pre.rent_epoch != post.rent_epoch; + if rent_epoch_changed { return Err(InstructionError::RentEpochModified); } + timings.total_account_count += 1; + timings.total_data_size += post.data().len(); + if owner_changed + || lamports_changed + || data_len_changed + || executable_changed + || rent_epoch_changed + || self.changed + { + timings.changed_account_count += 1; + timings.data_size_changed += post.data().len(); + } + Ok(()) } - pub fn update(&mut self, account: &Account) { + pub fn update(&mut self, account: &AccountSharedData) { let mut pre = self.account.borrow_mut(); pre.lamports = account.lamports; pre.owner = account.owner; pre.executable = account.executable; - if pre.data.len() != account.data.len() { + if pre.data().len() != account.data().len() { // Only system account can change data size, copy with alloc - pre.data = account.data.clone(); + pre.set_data(account.data().clone()); } else { // Copy without allocate - pre.data.clone_from_slice(&account.data); + pre.data_as_mut_slice().clone_from_slice(&account.data()); } + + self.changed = true; } pub fn key(&self) -> Pubkey { @@ -215,7 +259,8 @@ pub struct ThisInvokeContext<'a> { program_ids: Vec, rent: Rent, pre_accounts: Vec, - account_deps: &'a [(Pubkey, RefCell)], + executables: &'a [(Pubkey, Rc>)], + account_deps: &'a [(Pubkey, Rc>)], programs: &'a [(Pubkey, ProcessInstructionWithContext)], logger: Rc>, bpf_compute_budget: BpfComputeBudget, @@ -223,6 +268,11 @@ pub struct ThisInvokeContext<'a> { executors: Rc>, instruction_recorder: Option, feature_set: Arc, + pub timings: ExecuteDetailsTimings, + account_db: Arc, + ancestors: &'a Ancestors, + #[allow(clippy::type_complexity)] + sysvars: RefCell>>)>>, } impl<'a> ThisInvokeContext<'a> { #[allow(clippy::too_many_arguments)] @@ -230,13 +280,16 @@ impl<'a> ThisInvokeContext<'a> { program_id: &Pubkey, rent: Rent, pre_accounts: Vec, - account_deps: &'a [(Pubkey, RefCell)], + executables: &'a [(Pubkey, Rc>)], + account_deps: &'a [(Pubkey, Rc>)], programs: &'a [(Pubkey, ProcessInstructionWithContext)], log_collector: Option>, bpf_compute_budget: BpfComputeBudget, executors: Rc>, instruction_recorder: Option, feature_set: Arc, + account_db: Arc, + ancestors: &'a Ancestors, ) -> Self { let mut program_ids = Vec::with_capacity(bpf_compute_budget.max_invoke_depth); program_ids.push(*program_id); @@ -244,6 +297,7 @@ impl<'a> ThisInvokeContext<'a> { program_ids, rent, pre_accounts, + executables, account_deps, programs, logger: Rc::new(RefCell::new(ThisLogger { log_collector })), @@ -254,6 +308,10 @@ impl<'a> ThisInvokeContext<'a> { executors, instruction_recorder, feature_set, + timings: ExecuteDetailsTimings::default(), + account_db, + ancestors, + sysvars: RefCell::new(vec![]), } } } @@ -279,11 +337,9 @@ impl<'a> InvokeContext for ThisInvokeContext<'a> { &mut self, message: &Message, instruction: &CompiledInstruction, - accounts: &[Rc>], - caller_privileges: Option<&[bool]>, + accounts: &[Rc>], + caller_write_privileges: Option<&[bool]>, ) -> Result<(), InstructionError> { - let track_writable_deescalation = - self.is_feature_active(&track_writable_deescalation::id()); match self.program_ids.last() { Some(program_id) => MessageProcessor::verify_and_update( message, @@ -292,8 +348,9 @@ impl<'a> InvokeContext for ThisInvokeContext<'a> { accounts, program_id, &self.rent, - track_writable_deescalation, - caller_privileges, + caller_write_privileges, + &mut self.timings, + self.feature_set.is_active(&demote_sysvar_write_locks::id()), ), None => Err(InstructionError::GenericError), // Should never happen } @@ -329,26 +386,70 @@ impl<'a> InvokeContext for ThisInvokeContext<'a> { fn is_feature_active(&self, feature_id: &Pubkey) -> bool { self.feature_set.is_active(feature_id) } - fn get_account(&self, pubkey: &Pubkey) -> Option> { - if let Some(account) = self.pre_accounts.iter().find_map(|pre| { - if pre.key == *pubkey { - Some(pre.account.clone()) + fn get_account(&self, pubkey: &Pubkey) -> Option>> { + if self.is_feature_active(&cpi_share_ro_and_exec_accounts::id()) { + if let Some((_, account)) = self.executables.iter().find(|(key, _)| key == pubkey) { + Some(account.clone()) + } else if let Some((_, account)) = + self.account_deps.iter().find(|(key, _)| key == pubkey) + { + Some(account.clone()) } else { self.pre_accounts .iter() .find(|pre| pre.key == *pubkey) .map(|pre| pre.account.clone()) } - }) { - return Some(account); + } else { + if let Some(account) = self.pre_accounts.iter().find_map(|pre| { + if pre.key == *pubkey { + Some(pre.account.clone()) + } else { + None + } + }) { + return Some(account); + } + self.account_deps.iter().find_map(|(key, account)| { + if key == pubkey { + Some(account.clone()) + } else { + None + } + }) } - self.account_deps.iter().find_map(|(key, account)| { - if key == pubkey { - Some(account.clone()) - } else { - None + } + fn update_timing( + &mut self, + serialize_us: u64, + create_vm_us: u64, + execute_us: u64, + deserialize_us: u64, + ) { + self.timings.serialize_us += serialize_us; + self.timings.create_vm_us += create_vm_us; + self.timings.execute_us += execute_us; + self.timings.deserialize_us += deserialize_us; + } + fn get_sysvar_data(&self, id: &Pubkey) -> Option>> { + if let Ok(mut sysvars) = self.sysvars.try_borrow_mut() { + // Try share from cache + let mut result = sysvars + .iter() + .find_map(|(key, sysvar)| if id == key { sysvar.clone() } else { None }); + if result.is_none() { + // Load it + result = self + .account_db + .load_slow(self.ancestors, id) + .map(|(account, _)| Rc::new(account.data().clone())); + // Cache it + sysvars.push((*id, result.clone())); } - }) + result + } else { + None + } } } pub struct ThisLogger { @@ -466,8 +567,9 @@ impl MessageProcessor { fn create_keyed_accounts<'a>( message: &'a Message, instruction: &'a CompiledInstruction, - executable_accounts: &'a [(Pubkey, RefCell)], - accounts: &'a [Rc>], + executable_accounts: &'a [(Pubkey, Rc>)], + accounts: &'a [Rc>], + demote_sysvar_write_locks: bool, ) -> Vec> { let mut keyed_accounts = create_keyed_readonly_accounts(&executable_accounts); let mut keyed_accounts2: Vec<_> = instruction @@ -478,7 +580,7 @@ impl MessageProcessor { let index = index as usize; let key = &message.account_keys[index]; let account = &accounts[index]; - if message.is_writable(index) { + if message.is_writable(index, demote_sysvar_write_locks) { KeyedAccount::new(key, is_signer, account) } else { KeyedAccount::new_readonly(key, is_signer, account) @@ -589,7 +691,7 @@ impl MessageProcessor { ) { ic_msg!( invoke_context, - "{}'s signer priviledge escalated", + "{}'s signer privilege escalated", account.pubkey ); return Err(InstructionError::PrivilegeEscalation); @@ -633,7 +735,14 @@ impl MessageProcessor { ) -> Result<(), InstructionError> { let invoke_context = RefCell::new(invoke_context); - let (message, executables, accounts, account_refs, caller_privileges) = { + let ( + message, + executables, + accounts, + account_refs, + caller_write_privileges, + demote_sysvar_write_locks, + ) = { let invoke_context = invoke_context.borrow(); let caller_program_id = invoke_context.get_caller()?; @@ -644,11 +753,11 @@ impl MessageProcessor { .iter() .map(|seeds| Pubkey::create_program_address(&seeds, caller_program_id)) .collect::, solana_sdk::pubkey::PubkeyError>>()?; - let mut caller_privileges = keyed_accounts + let mut caller_write_privileges = keyed_accounts .iter() .map(|keyed_account| keyed_account.is_writable()) .collect::>(); - caller_privileges.insert(0, false); + caller_write_privileges.insert(0, false); let (message, callee_program_id, _) = Self::create_message(&instruction, &keyed_accounts, &signers, &invoke_context)?; let mut accounts = vec![]; @@ -724,7 +833,8 @@ impl MessageProcessor { executables, accounts, account_refs, - caller_privileges, + caller_write_privileges, + invoke_context.is_feature_active(&demote_sysvar_write_locks::id()), ) }; @@ -733,7 +843,7 @@ impl MessageProcessor { &message, &executables, &accounts, - &caller_privileges, + &caller_write_privileges, *(&mut *(invoke_context.borrow_mut())), )?; @@ -743,10 +853,11 @@ impl MessageProcessor { let invoke_context = invoke_context.borrow(); for (i, (account, account_ref)) in accounts.iter().zip(account_refs).enumerate() { let account = account.borrow(); - if message.is_writable(i) && !account.executable { + if message.is_writable(i, demote_sysvar_write_locks) && !account.executable { account_ref.try_account_ref_mut()?.lamports = account.lamports; account_ref.try_account_ref_mut()?.owner = account.owner; - if account_ref.data_len()? != account.data.len() && account_ref.data_len()? != 0 + if account_ref.data_len()? != account.data().len() + && account_ref.data_len()? != 0 { // Only support for `CreateAccount` at this time. // Need a way to limit total realloc size across multiple CPI calls @@ -756,7 +867,9 @@ impl MessageProcessor { ); return Err(InstructionError::InvalidRealloc); } - account_ref.try_account_ref_mut()?.data = account.data.clone(); + account_ref + .try_account_ref_mut()? + .set_data(account.data().clone()); } } } @@ -768,9 +881,9 @@ impl MessageProcessor { /// This method calls the instruction's program entrypoint function pub fn process_cross_program_instruction( message: &Message, - executable_accounts: &[(Pubkey, RefCell)], - accounts: &[Rc>], - caller_privileges: &[bool], + executable_accounts: &[(Pubkey, Rc>)], + accounts: &[Rc>], + caller_write_privileges: &[bool], invoke_context: &mut dyn InvokeContext, ) -> Result<(), InstructionError> { if let Some(instruction) = message.instructions.get(0) { @@ -781,12 +894,18 @@ impl MessageProcessor { message, instruction, accounts, - Some(caller_privileges), + Some(caller_write_privileges), )?; - + let demote_sysvar_write_locks = + invoke_context.is_feature_active(&demote_sysvar_write_locks::id()); // Construct keyed accounts - let keyed_accounts = - Self::create_keyed_accounts(message, instruction, executable_accounts, accounts); + let keyed_accounts = Self::create_keyed_accounts( + message, + instruction, + executable_accounts, + accounts, + demote_sysvar_write_locks, + ); // Invoke callee invoke_context.push(program_id)?; @@ -821,15 +940,14 @@ impl MessageProcessor { pub fn create_pre_accounts( message: &Message, instruction: &CompiledInstruction, - accounts: &[Rc>], + accounts: &[Rc>], ) -> Vec { - let mut pre_accounts = Vec::with_capacity(accounts.len()); + let mut pre_accounts = Vec::with_capacity(instruction.accounts.len()); { let mut work = |_unique_index: usize, account_index: usize| { let key = &message.account_keys[account_index]; - let is_writable = message.is_writable(account_index); let account = accounts[account_index].borrow(); - pre_accounts.push(PreAccount::new(key, &account, is_writable)); + pre_accounts.push(PreAccount::new(key, &account)); Ok(()) }; let _ = instruction.visit_each_account(&mut work); @@ -839,7 +957,7 @@ impl MessageProcessor { /// Verify there are no outstanding borrows pub fn verify_account_references( - accounts: &[(Pubkey, RefCell)], + accounts: &[(Pubkey, Rc>)], ) -> Result<(), InstructionError> { for (_, account) in accounts.iter() { account @@ -854,9 +972,11 @@ impl MessageProcessor { message: &Message, instruction: &CompiledInstruction, pre_accounts: &[PreAccount], - executable_accounts: &[(Pubkey, RefCell)], - accounts: &[Rc>], + executable_accounts: &[(Pubkey, Rc>)], + accounts: &[Rc>], rent: &Rent, + timings: &mut ExecuteDetailsTimings, + demote_sysvar_write_locks: bool, ) -> Result<(), InstructionError> { // Verify all executable accounts have zero outstanding refs Self::verify_account_references(executable_accounts)?; @@ -866,15 +986,19 @@ impl MessageProcessor { { let program_id = instruction.program_id(&message.account_keys); let mut work = |unique_index: usize, account_index: usize| { - // Verify account has no outstanding references and take one - let account = accounts[account_index] - .try_borrow_mut() - .map_err(|_| InstructionError::AccountBorrowOutstanding)?; + { + // Verify account has no outstanding references + let _ = accounts[account_index] + .try_borrow_mut() + .map_err(|_| InstructionError::AccountBorrowOutstanding)?; + } + let account = accounts[account_index].borrow(); pre_accounts[unique_index].verify( &program_id, - Some(message.is_writable(account_index)), + message.is_writable(account_index, demote_sysvar_write_locks), rent, &account, + timings, )?; pre_sum += u128::from(pre_accounts[unique_index].lamports()); post_sum += u128::from(account.lamports); @@ -895,11 +1019,12 @@ impl MessageProcessor { message: &Message, instruction: &CompiledInstruction, pre_accounts: &mut [PreAccount], - accounts: &[Rc>], + accounts: &[Rc>], program_id: &Pubkey, rent: &Rent, - track_writable_deescalation: bool, - caller_privileges: Option<&[bool]>, + caller_write_privileges: Option<&[bool]>, + timings: &mut ExecuteDetailsTimings, + demote_sysvar_write_locks: bool, ) -> Result<(), InstructionError> { // Verify the per-account instruction results let (mut pre_sum, mut post_sum) = (0_u128, 0_u128); @@ -907,29 +1032,27 @@ impl MessageProcessor { if account_index < message.account_keys.len() && account_index < accounts.len() { let key = &message.account_keys[account_index]; let account = &accounts[account_index]; - let is_writable = if track_writable_deescalation { - Some(if let Some(caller_privileges) = caller_privileges { - caller_privileges[account_index] - } else { - message.is_writable(account_index) - }) + let is_writable = if let Some(caller_write_privileges) = caller_write_privileges { + caller_write_privileges[account_index] } else { - None + message.is_writable(account_index, demote_sysvar_write_locks) }; // Find the matching PreAccount for pre_account in pre_accounts.iter_mut() { if *key == pre_account.key() { - // Verify account has no outstanding references and take one - let account = account - .try_borrow_mut() - .map_err(|_| InstructionError::AccountBorrowOutstanding)?; - - pre_account.verify(&program_id, is_writable, &rent, &account)?; + { + // Verify account has no outstanding references + let _ = account + .try_borrow_mut() + .map_err(|_| InstructionError::AccountBorrowOutstanding)?; + } + let account = account.borrow(); + pre_account.verify(&program_id, is_writable, &rent, &account, timings)?; pre_sum += u128::from(pre_account.lamports()); post_sum += u128::from(account.lamports); - - pre_account.update(&account); - + if is_writable && !account.executable { + pre_account.update(&account); + } return Ok(()); } } @@ -955,9 +1078,9 @@ impl MessageProcessor { &self, message: &Message, instruction: &CompiledInstruction, - executable_accounts: &[(Pubkey, RefCell)], - accounts: &[Rc>], - account_deps: &[(Pubkey, RefCell)], + executable_accounts: &[(Pubkey, Rc>)], + accounts: &[Rc>], + account_deps: &[(Pubkey, Rc>)], rent_collector: &RentCollector, log_collector: Option>, executors: Rc>, @@ -965,16 +1088,20 @@ impl MessageProcessor { instruction_index: usize, feature_set: Arc, bpf_compute_budget: BpfComputeBudget, + timings: &mut ExecuteDetailsTimings, + demote_sysvar_write_locks: bool, + account_db: Arc, + ancestors: &Ancestors, evm_executor: Option<&mut evm_state::Executor>, ) -> Result<(), InstructionError> { // Fixup the special instructions key if present // before the account pre-values are taken care of if feature_set.is_active(&instructions_sysvar_enabled::id()) { for (i, key) in message.account_keys.iter().enumerate() { - if solana_sdk::sysvar::instructions::check_id(key) { + if instructions::check_id(key) { let mut mut_account_ref = accounts[i].borrow_mut(); - solana_sdk::sysvar::instructions::store_current_index( - &mut mut_account_ref.data, + instructions::store_current_index( + mut_account_ref.data_as_mut_slice(), instruction_index as u16, ); break; @@ -988,6 +1115,7 @@ impl MessageProcessor { program_id, rent_collector.rent, pre_accounts, + executable_accounts, account_deps, &self.programs, log_collector, @@ -995,9 +1123,16 @@ impl MessageProcessor { executors, instruction_recorder, feature_set, + account_db, + ancestors, + ); + let keyed_accounts = Self::create_keyed_accounts( + message, + instruction, + executable_accounts, + accounts, + demote_sysvar_write_locks, ); - let keyed_accounts = - Self::create_keyed_accounts(message, instruction, executable_accounts, accounts); self.process_instruction( program_id, &keyed_accounts, @@ -1012,7 +1147,12 @@ impl MessageProcessor { executable_accounts, accounts, &rent_collector.rent, + timings, + demote_sysvar_write_locks, )?; + + timings.accumulate(&invoke_context.timings); + Ok(()) } @@ -1020,20 +1160,25 @@ impl MessageProcessor { /// This method calls each instruction in the message over the set of loaded Accounts /// The accounts are committed back to the bank only if every instruction succeeds #[allow(clippy::too_many_arguments)] + #[allow(clippy::type_complexity)] pub fn process_message( &self, message: &Message, - loaders: &[Vec<(Pubkey, RefCell)>], - accounts: &[Rc>], - account_deps: &[(Pubkey, RefCell)], + loaders: &[Vec<(Pubkey, Rc>)>], + accounts: &[Rc>], + account_deps: &[(Pubkey, Rc>)], rent_collector: &RentCollector, log_collector: Option>, executors: Rc>, instruction_recorders: Option<&[InstructionRecorder]>, feature_set: Arc, bpf_compute_budget: BpfComputeBudget, + timings: &mut ExecuteDetailsTimings, + account_db: Arc, + ancestors: &Ancestors, mut evm_executor: Option<&mut evm_state::Executor>, ) -> Result<(), TransactionError> { + let demote_sysvar_write_locks = feature_set.is_active(&demote_sysvar_write_locks::id()); for (instruction_index, instruction) in message.instructions.iter().enumerate() { let instruction_recorder = instruction_recorders .as_ref() @@ -1051,6 +1196,10 @@ impl MessageProcessor { instruction_index, feature_set.clone(), bpf_compute_budget, + timings, + demote_sysvar_write_locks, + account_db.clone(), + ancestors, evm_executor.as_deref_mut(), ) .map_err(|err| TransactionError::InstructionError(instruction_index as u8, err))?; @@ -1063,6 +1212,7 @@ impl MessageProcessor { mod tests { use super::*; use solana_sdk::{ + account::Account, instruction::{AccountMeta, Instruction, InstructionError}, message::Message, native_loader::create_loadable_account_for_test, @@ -1078,29 +1228,33 @@ mod tests { for i in 0..MAX_DEPTH { program_ids.push(solana_sdk::pubkey::new_rand()); keys.push(solana_sdk::pubkey::new_rand()); - accounts.push(Rc::new(RefCell::new(Account::new( + accounts.push(Rc::new(RefCell::new(AccountSharedData::new( i as u64, 1, &program_ids[i], )))); - pre_accounts.push(PreAccount::new(&keys[i], &accounts[i].borrow(), false)) + pre_accounts.push(PreAccount::new(&keys[i], &accounts[i].borrow())) } - let account = Account::new(1, 1, &solana_sdk::pubkey::Pubkey::default()); + let account = AccountSharedData::new(1, 1, &solana_sdk::pubkey::Pubkey::default()); for program_id in program_ids.iter() { - pre_accounts.push(PreAccount::new(program_id, &account.clone(), false)); + pre_accounts.push(PreAccount::new(program_id, &account.clone())); } + let ancestors = Ancestors::default(); let mut invoke_context = ThisInvokeContext::new( &program_ids[0], Rent::default(), pre_accounts, &[], &[], + &[], None, BpfComputeBudget::default(), Rc::new(RefCell::new(Executors::default())), None, Arc::new(FeatureSet::all_enabled()), + Arc::new(Accounts::default()), + &ancestors, ); // Check call depth increases and has a limit @@ -1122,14 +1276,19 @@ mod tests { AccountMeta::new(keys[owned_index], false), ]; let message = Message::new( - &[Instruction::new(program_ids[owned_index], &[0_u8], metas)], + &[Instruction::new_with_bytes( + program_ids[owned_index], + &[0], + metas, + )], None, ); // modify account owned by the program - accounts[owned_index].borrow_mut().data[0] = (MAX_DEPTH + owned_index) as u8; + accounts[owned_index].borrow_mut().data_as_mut_slice()[0] = + (MAX_DEPTH + owned_index) as u8; let mut these_accounts = accounts[not_owned_index..owned_index + 1].to_vec(); - these_accounts.push(Rc::new(RefCell::new(Account::new( + these_accounts.push(Rc::new(RefCell::new(AccountSharedData::new( 1, 1, &solana_sdk::pubkey::Pubkey::default(), @@ -1141,13 +1300,14 @@ mod tests { invoke_context.pre_accounts[owned_index] .account .borrow() - .data[0], + .data()[0], (MAX_DEPTH + owned_index) as u8 ); // modify account not owned by the program - let data = accounts[not_owned_index].borrow_mut().data[0]; - accounts[not_owned_index].borrow_mut().data[0] = (MAX_DEPTH + not_owned_index) as u8; + let data = accounts[not_owned_index].borrow_mut().data()[0]; + accounts[not_owned_index].borrow_mut().data_as_mut_slice()[0] = + (MAX_DEPTH + not_owned_index) as u8; assert_eq!( invoke_context.verify_and_update( &message, @@ -1161,10 +1321,10 @@ mod tests { invoke_context.pre_accounts[not_owned_index] .account .borrow() - .data[0], + .data()[0], data ); - accounts[not_owned_index].borrow_mut().data[0] = data; + accounts[not_owned_index].borrow_mut().data_as_mut_slice()[0] = data; invoke_context.pop(); } @@ -1196,7 +1356,7 @@ mod tests { fn test_verify_account_references() { let accounts = vec![( solana_sdk::pubkey::new_rand(), - RefCell::new(Account::default()), + Rc::new(RefCell::new(AccountSharedData::default())), )]; assert!(MessageProcessor::verify_account_references(&accounts).is_ok()); @@ -1213,7 +1373,7 @@ mod tests { is_writable: bool, rent: Rent, pre: PreAccount, - post: Account, + post: AccountSharedData, } impl Change { pub fn new(owner: &Pubkey, program_id: &Pubkey) -> Self { @@ -1223,19 +1383,18 @@ mod tests { is_writable: true, pre: PreAccount::new( &solana_sdk::pubkey::new_rand(), - &Account { + &AccountSharedData::from(Account { owner: *owner, lamports: std::u64::MAX, data: vec![], ..Account::default() - }, - false, + }), ), - post: Account { + post: AccountSharedData::from(Account { owner: *owner, lamports: std::u64::MAX, ..Account::default() - }, + }), } } pub fn read_only(mut self) -> Self { @@ -1257,8 +1416,8 @@ mod tests { self } pub fn data(mut self, pre: Vec, post: Vec) -> Self { - self.pre.account.borrow_mut().data = pre; - self.post.data = post; + self.pre.account.borrow_mut().set_data(pre); + self.post.set_data(post); self } pub fn rent_epoch(mut self, pre: u64, post: u64) -> Self { @@ -1269,9 +1428,10 @@ mod tests { pub fn verify(&self) -> Result<(), InstructionError> { self.pre.verify( &self.program_id, - Some(self.is_writable), + self.is_writable, &self.rent, &self.post, + &mut ExecuteDetailsTimings::default(), ) } } @@ -1619,7 +1779,7 @@ mod tests { } // Change data in a read-only account MockSystemInstruction::AttemptDataChange { data } => { - keyed_accounts[1].account.borrow_mut().data = vec![data]; + keyed_accounts[1].account.borrow_mut().set_data(vec![data]); Ok(()) } } @@ -1633,17 +1793,20 @@ mod tests { let mut message_processor = MessageProcessor::default(); message_processor.add_program(mock_system_program_id, mock_system_process_instruction); - let mut accounts: Vec>> = Vec::new(); - let account = Account::new_ref(100, 1, &mock_system_program_id); + let mut accounts: Vec>> = Vec::new(); + let account = AccountSharedData::new_ref(100, 1, &mock_system_program_id); accounts.push(account); - let account = Account::new_ref(0, 1, &mock_system_program_id); + let account = AccountSharedData::new_ref(0, 1, &mock_system_program_id); accounts.push(account); - let mut loaders: Vec)>> = Vec::new(); - let account = RefCell::new(create_loadable_account_for_test("mock_system_program")); + let mut loaders: Vec>)>> = Vec::new(); + let account = Rc::new(RefCell::new(create_loadable_account_for_test( + "mock_system_program", + ))); loaders.push(vec![(mock_system_program_id, account)]); let executors = Rc::new(RefCell::new(Executors::default())); + let ancestors = Ancestors::default(); let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -1652,7 +1815,7 @@ mod tests { AccountMeta::new_readonly(to_pubkey, false), ]; let message = Message::new( - &[Instruction::new( + &[Instruction::new_with_bincode( mock_system_program_id, &MockSystemInstruction::Correct, account_metas.clone(), @@ -1670,7 +1833,10 @@ mod tests { executors.clone(), None, Arc::new(FeatureSet::all_enabled()), - BpfComputeBudget::new(&FeatureSet::all_enabled()), + BpfComputeBudget::new(), + &mut ExecuteDetailsTimings::default(), + Arc::new(Accounts::default()), + &ancestors, None, ); assert_eq!(result, Ok(())); @@ -1678,7 +1844,7 @@ mod tests { assert_eq!(accounts[1].borrow().lamports, 0); let message = Message::new( - &[Instruction::new( + &[Instruction::new_with_bincode( mock_system_program_id, &MockSystemInstruction::AttemptCredit { lamports: 50 }, account_metas.clone(), @@ -1696,7 +1862,10 @@ mod tests { executors.clone(), None, Arc::new(FeatureSet::all_enabled()), - BpfComputeBudget::new(&FeatureSet::all_enabled()), + BpfComputeBudget::new(), + &mut ExecuteDetailsTimings::default(), + Arc::new(Accounts::default()), + &ancestors, None, ); assert_eq!( @@ -1708,7 +1877,7 @@ mod tests { ); let message = Message::new( - &[Instruction::new( + &[Instruction::new_with_bincode( mock_system_program_id, &MockSystemInstruction::AttemptDataChange { data: 50 }, account_metas, @@ -1726,7 +1895,10 @@ mod tests { executors, None, Arc::new(FeatureSet::all_enabled()), - BpfComputeBudget::new(&FeatureSet::all_enabled()), + BpfComputeBudget::new(), + &mut ExecuteDetailsTimings::default(), + Arc::new(Accounts::default()), + &ancestors, None, ); assert_eq!( @@ -1783,7 +1955,7 @@ mod tests { let mut dup_account = keyed_accounts[2].try_account_ref_mut()?; dup_account.lamports -= lamports; to_account.lamports += lamports; - dup_account.data = vec![data]; + dup_account.set_data(vec![data]); } keyed_accounts[0].try_account_ref_mut()?.lamports -= lamports; keyed_accounts[1].try_account_ref_mut()?.lamports += lamports; @@ -1800,17 +1972,20 @@ mod tests { let mut message_processor = MessageProcessor::default(); message_processor.add_program(mock_program_id, mock_system_process_instruction); - let mut accounts: Vec>> = Vec::new(); - let account = Account::new_ref(100, 1, &mock_program_id); + let mut accounts: Vec>> = Vec::new(); + let account = AccountSharedData::new_ref(100, 1, &mock_program_id); accounts.push(account); - let account = Account::new_ref(0, 1, &mock_program_id); + let account = AccountSharedData::new_ref(0, 1, &mock_program_id); accounts.push(account); - let mut loaders: Vec)>> = Vec::new(); - let account = RefCell::new(create_loadable_account_for_test("mock_system_program")); + let mut loaders: Vec>)>> = Vec::new(); + let account = Rc::new(RefCell::new(create_loadable_account_for_test( + "mock_system_program", + ))); loaders.push(vec![(mock_program_id, account)]); let executors = Rc::new(RefCell::new(Executors::default())); + let ancestors = Ancestors::default(); let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -1823,7 +1998,7 @@ mod tests { // Try to borrow mut the same account let message = Message::new( - &[Instruction::new( + &[Instruction::new_with_bincode( mock_program_id, &MockSystemInstruction::BorrowFail, account_metas.clone(), @@ -1840,7 +2015,10 @@ mod tests { executors.clone(), None, Arc::new(FeatureSet::all_enabled()), - BpfComputeBudget::new(&FeatureSet::all_enabled()), + BpfComputeBudget::new(), + &mut ExecuteDetailsTimings::default(), + Arc::new(Accounts::default()), + &ancestors, None, ); assert_eq!( @@ -1853,7 +2031,7 @@ mod tests { // Try to borrow mut the same account in a safe way let message = Message::new( - &[Instruction::new( + &[Instruction::new_with_bincode( mock_program_id, &MockSystemInstruction::MultiBorrowMut, account_metas.clone(), @@ -1870,14 +2048,17 @@ mod tests { executors.clone(), None, Arc::new(FeatureSet::all_enabled()), - BpfComputeBudget::new(&FeatureSet::all_enabled()), + BpfComputeBudget::new(), + &mut ExecuteDetailsTimings::default(), + Arc::new(Accounts::default()), + &ancestors, None, ); assert_eq!(result, Ok(())); // Do work on the same account but at different location in keyed_accounts[] let message = Message::new( - &[Instruction::new( + &[Instruction::new_with_bincode( mock_program_id, &MockSystemInstruction::DoWork { lamports: 10, @@ -1887,6 +2068,7 @@ mod tests { )], Some(&from_pubkey), ); + let ancestors = Ancestors::default(); let result = message_processor.process_message( &message, &loaders, @@ -1897,13 +2079,16 @@ mod tests { executors, None, Arc::new(FeatureSet::all_enabled()), - BpfComputeBudget::new(&FeatureSet::all_enabled()), + BpfComputeBudget::new(), + &mut ExecuteDetailsTimings::default(), + Arc::new(Accounts::default()), + &ancestors, None, ); assert_eq!(result, Ok(())); assert_eq!(accounts[0].borrow().lamports, 80); assert_eq!(accounts[1].borrow().lamports, 20); - assert_eq!(accounts[0].borrow().data, vec![42]); + assert_eq!(accounts[0].borrow().data(), &vec![42]); } #[test] @@ -1933,10 +2118,10 @@ mod tests { MockInstruction::NoopSuccess => (), MockInstruction::NoopFail => return Err(InstructionError::GenericError), MockInstruction::ModifyOwned => { - keyed_accounts[0].try_account_ref_mut()?.data[0] = 1 + keyed_accounts[0].try_account_ref_mut()?.data_as_mut_slice()[0] = 1 } MockInstruction::ModifyNotOwned => { - keyed_accounts[1].try_account_ref_mut()?.data[0] = 1 + keyed_accounts[1].try_account_ref_mut()?.data_as_mut_slice()[0] = 1 } } } else { @@ -1948,18 +2133,21 @@ mod tests { let caller_program_id = solana_sdk::pubkey::new_rand(); let callee_program_id = solana_sdk::pubkey::new_rand(); - let mut program_account = Account::new(1, 0, &native_loader::id()); + let mut program_account = AccountSharedData::new(1, 0, &native_loader::id()); program_account.executable = true; - let executable_preaccount = PreAccount::new(&callee_program_id, &program_account, true); - let executable_accounts = vec![(callee_program_id, RefCell::new(program_account.clone()))]; + let executable_preaccount = PreAccount::new(&callee_program_id, &program_account); + let executable_accounts = vec![( + callee_program_id, + Rc::new(RefCell::new(program_account.clone())), + )]; let owned_key = solana_sdk::pubkey::new_rand(); - let owned_account = Account::new(42, 1, &callee_program_id); - let owned_preaccount = PreAccount::new(&owned_key, &owned_account, true); + let owned_account = AccountSharedData::new(42, 1, &callee_program_id); + let owned_preaccount = PreAccount::new(&owned_key, &owned_account); let not_owned_key = solana_sdk::pubkey::new_rand(); - let not_owned_account = Account::new(84, 1, &solana_sdk::pubkey::new_rand()); - let not_owned_preaccount = PreAccount::new(¬_owned_key, ¬_owned_account, true); + let not_owned_account = AccountSharedData::new(84, 1, &solana_sdk::pubkey::new_rand()); + let not_owned_preaccount = PreAccount::new(¬_owned_key, ¬_owned_account); #[allow(unused_mut)] let mut accounts = vec![ @@ -1969,6 +2157,7 @@ mod tests { ]; let programs: Vec<(_, ProcessInstructionWithContext)> = vec![(callee_program_id, mock_process_instruction)]; + let ancestors = Ancestors::default(); let mut invoke_context = ThisInvokeContext::new( &caller_program_id, Rent::default(), @@ -1978,12 +2167,15 @@ mod tests { executable_preaccount, ], &[], + &[], programs.as_slice(), None, BpfComputeBudget::default(), Rc::new(RefCell::new(Executors::default())), None, Arc::new(FeatureSet::all_enabled()), + Arc::new(Accounts::default()), + &ancestors, ); let metas = vec![ AccountMeta::new(owned_key, false), @@ -1991,30 +2183,31 @@ mod tests { ]; // not owned account modified by the caller (before the invoke) - accounts[0].borrow_mut().data[0] = 1; - let instruction = Instruction::new( + accounts[0].borrow_mut().data_as_mut_slice()[0] = 1; + let instruction = Instruction::new_with_bincode( callee_program_id, &MockInstruction::NoopSuccess, metas.clone(), ); + let demote_sysvar_write_locks = true; let message = Message::new(&[instruction], None); - let caller_privileges = message + let caller_write_privileges = message .account_keys .iter() .enumerate() - .map(|(i, _)| message.is_writable(i)) + .map(|(i, _)| message.is_writable(i, demote_sysvar_write_locks)) .collect::>(); assert_eq!( MessageProcessor::process_cross_program_instruction( &message, &executable_accounts, &accounts, - &caller_privileges, + &caller_write_privileges, &mut invoke_context, ), Err(InstructionError::ExternalAccountDataModified) ); - accounts[0].borrow_mut().data[0] = 0; + accounts[0].borrow_mut().data_as_mut_slice()[0] = 0; let cases = vec![ (MockInstruction::NoopSuccess, Ok(())), @@ -2030,20 +2223,21 @@ mod tests { ]; for case in cases { - let instruction = Instruction::new(callee_program_id, &case.0, metas.clone()); + let instruction = + Instruction::new_with_bincode(callee_program_id, &case.0, metas.clone()); let message = Message::new(&[instruction], None); - let caller_privileges = message + let caller_write_privileges = message .account_keys .iter() .enumerate() - .map(|(i, _)| message.is_writable(i)) + .map(|(i, _)| message.is_writable(i, demote_sysvar_write_locks)) .collect::>(); assert_eq!( MessageProcessor::process_cross_program_instruction( &message, &executable_accounts, &accounts, - &caller_privileges, + &caller_write_privileges, &mut invoke_context, ), case.1 diff --git a/runtime/src/native_loader.rs b/runtime/src/native_loader.rs index 72449a454d..b60b587708 100644 --- a/runtime/src/native_loader.rs +++ b/runtime/src/native_loader.rs @@ -6,6 +6,7 @@ use libloading::os::windows::*; use log::*; use num_derive::{FromPrimitive, ToPrimitive}; use solana_sdk::{ + account::ReadableAccount, decode_error::DecodeError, entrypoint_native::ProgramEntrypoint, instruction::InstructionError, @@ -150,8 +151,8 @@ impl NativeLoader { } let params = keyed_accounts_iter.as_slice(); - let name_vec = &program.try_account_ref()?.data; - let name = match str::from_utf8(name_vec) { + let account = program.try_account_ref()?; + let name = match str::from_utf8(account.data()) { Ok(v) => v, Err(e) => { error!("Invalid UTF-8 sequence: {}", e); diff --git a/runtime/src/rent_collector.rs b/runtime/src/rent_collector.rs index 3be5d07154..f14dfefc2f 100644 --- a/runtime/src/rent_collector.rs +++ b/runtime/src/rent_collector.rs @@ -1,7 +1,13 @@ //! calculate and collect rent from Accounts use solana_sdk::{ - account::Account, clock::Epoch, epoch_schedule::EpochSchedule, genesis_config::GenesisConfig, - incinerator, pubkey::Pubkey, rent::Rent, sysvar, + account::{AccountSharedData, ReadableAccount}, + clock::Epoch, + epoch_schedule::EpochSchedule, + genesis_config::GenesisConfig, + incinerator, + pubkey::Pubkey, + rent::Rent, + sysvar, }; #[derive(Serialize, Deserialize, Clone, PartialEq, Debug, AbiExample)] @@ -50,12 +56,15 @@ impl RentCollector { // the account rent collected, if any // #[must_use = "add to Bank::collected_rent"] - pub fn collect_from_existing_account(&self, address: &Pubkey, account: &mut Account) -> u64 { - if account.executable + pub fn collect_from_existing_account( + &self, + address: &Pubkey, + account: &mut AccountSharedData, + ) -> u64 { + if account.executable // executable accounts must be rent-exempt balance || account.rent_epoch > self.epoch || sysvar::check_id(&account.owner) || *address == incinerator::id() - || *address == solana_sdk::evm_state::id() { 0 } else { @@ -72,14 +81,14 @@ impl RentCollector { let (rent_due, exempt) = self.rent - .due(account.lamports, account.data.len(), years_elapsed); + .due(account.lamports, account.data().len(), years_elapsed); if exempt || rent_due != 0 { if account.lamports > rent_due { account.rent_epoch = self.epoch + if exempt { // Rent isn't collected for the next epoch - // Make sure to check exempt status later in curent epoch again + // Make sure to check exempt status later in current epoch again 0 } else { // Rent is collected for next epoch @@ -89,7 +98,7 @@ impl RentCollector { rent_due } else { let rent_charged = account.lamports; - *account = Account::default(); + *account = AccountSharedData::default(); rent_charged } } else { @@ -100,7 +109,11 @@ impl RentCollector { } #[must_use = "add to Bank::collected_rent"] - pub fn collect_from_created_account(&self, address: &Pubkey, account: &mut Account) -> u64 { + pub fn collect_from_created_account( + &self, + address: &Pubkey, + account: &mut AccountSharedData, + ) -> u64 { // initialize rent_epoch as created at this epoch account.rent_epoch = self.epoch; self.collect_from_existing_account(address, account) @@ -110,6 +123,7 @@ impl RentCollector { #[cfg(test)] mod tests { use super::*; + use solana_sdk::account::Account; #[test] fn test_collect_from_account_created_and_existing() { @@ -118,11 +132,11 @@ mod tests { let new_epoch = 3; let (mut created_account, mut existing_account) = { - let account = Account { + let account = AccountSharedData::from(Account { lamports: old_lamports, rent_epoch: old_epoch, ..Account::default() - }; + }); (account.clone(), account) }; @@ -150,7 +164,7 @@ mod tests { #[test] fn test_rent_exempt_temporal_escape() { - let mut account = Account::default(); + let mut account = AccountSharedData::default(); let epoch = 3; let huge_lamports = 123_456_789_012; let tiny_lamports = 789_012; diff --git a/runtime/src/secondary_index.rs b/runtime/src/secondary_index.rs index eabe32f529..c6d45961b4 100644 --- a/runtime/src/secondary_index.rs +++ b/runtime/src/secondary_index.rs @@ -1,157 +1,121 @@ -use crate::contains::Contains; use dashmap::{mapref::entry::Entry::Occupied, DashMap}; -use log::*; -use solana_sdk::{clock::Slot, pubkey::Pubkey}; +use solana_sdk::pubkey::Pubkey; use std::{ - borrow::Borrow, - collections::{hash_map, HashMap, HashSet}, + collections::HashSet, fmt::Debug, - sync::{Arc, RwLock}, + sync::{ + atomic::{AtomicU64, Ordering}, + RwLock, + }, }; -pub type SecondaryReverseIndexEntry = RwLock>; +// The only cases where an inner key should map to a different outer key is +// if the key had different account data for the indexed key across different +// slots. As this is rare, it should be ok to use a Vec here over a HashSet, even +// though we are running some key existence checks. +pub type SecondaryReverseIndexEntry = RwLock>; pub trait SecondaryIndexEntry: Debug { - fn get_or_create(&self, key: &Pubkey, f: &dyn Fn(&RwLock>)); - fn get(&self, key: &Pubkey, f: &dyn Fn(Option<&RwLock>>) -> T) -> T; - fn remove_key_if_empty(&self, key: &Pubkey); + fn insert_if_not_exists(&self, key: &Pubkey, inner_keys_count: &AtomicU64); + // Removes a value from the set. Returns whether the value was present in the set. + fn remove_inner_key(&self, key: &Pubkey) -> bool; fn is_empty(&self) -> bool; fn keys(&self) -> Vec; fn len(&self) -> usize; } +#[derive(Debug, Default)] +pub struct SecondaryIndexStats { + last_report: AtomicU64, + num_inner_keys: AtomicU64, +} + #[derive(Debug, Default)] pub struct DashMapSecondaryIndexEntry { - pubkey_to_slot_set: DashMap>>, + account_keys: DashMap, } impl SecondaryIndexEntry for DashMapSecondaryIndexEntry { - fn get_or_create(&self, key: &Pubkey, f: &dyn Fn(&RwLock>)) { - let slot_set = self.pubkey_to_slot_set.get(key).unwrap_or_else(|| { - self.pubkey_to_slot_set - .entry(*key) - .or_insert(RwLock::new(HashSet::new())) - .downgrade() - }); - - f(&slot_set) + fn insert_if_not_exists(&self, key: &Pubkey, inner_keys_count: &AtomicU64) { + if self.account_keys.get(key).is_none() { + self.account_keys.entry(*key).or_insert_with(|| { + inner_keys_count.fetch_add(1, Ordering::Relaxed); + }); + } } - fn get(&self, key: &Pubkey, f: &dyn Fn(Option<&RwLock>>) -> T) -> T { - let slot_set = self.pubkey_to_slot_set.get(key); - - f(slot_set.as_ref().map(|entry_ref| entry_ref.value())) - } - - fn remove_key_if_empty(&self, key: &Pubkey) { - if let Occupied(key_entry) = self.pubkey_to_slot_set.entry(*key) { - // Delete the `key` if the slot set is empty - let slot_set = key_entry.get(); - - // Write lock on `key_entry` above through the `entry` - // means nobody else has access to this lock at this time, - // so this check for empty -> remove() is atomic - if slot_set.read().unwrap().is_empty() { - key_entry.remove(); - } - } + fn remove_inner_key(&self, key: &Pubkey) -> bool { + self.account_keys.remove(key).is_some() } fn is_empty(&self) -> bool { - self.pubkey_to_slot_set.is_empty() + self.account_keys.is_empty() } fn keys(&self) -> Vec { - self.pubkey_to_slot_set + self.account_keys .iter() .map(|entry_ref| *entry_ref.key()) .collect() } fn len(&self) -> usize { - self.pubkey_to_slot_set.len() + self.account_keys.len() } } #[derive(Debug, Default)] pub struct RwLockSecondaryIndexEntry { - pubkey_to_slot_set: RwLock>>>>, + account_keys: RwLock>, } impl SecondaryIndexEntry for RwLockSecondaryIndexEntry { - fn get_or_create(&self, key: &Pubkey, f: &dyn Fn(&RwLock>)) { - let slot_set = self.pubkey_to_slot_set.read().unwrap().get(key).cloned(); - - let slot_set = { - if let Some(slot_set) = slot_set { - slot_set - } else { - self.pubkey_to_slot_set - .write() - .unwrap() - .entry(*key) - .or_insert_with(|| Arc::new(RwLock::new(HashSet::new()))) - .clone() - } + fn insert_if_not_exists(&self, key: &Pubkey, inner_keys_count: &AtomicU64) { + let exists = self.account_keys.read().unwrap().contains(key); + if !exists { + let mut w_account_keys = self.account_keys.write().unwrap(); + w_account_keys.insert(*key); + inner_keys_count.fetch_add(1, Ordering::Relaxed); }; - - f(&slot_set) - } - - fn get(&self, key: &Pubkey, f: &dyn Fn(Option<&RwLock>>) -> T) -> T { - let slot_set = self.pubkey_to_slot_set.read().unwrap().get(key).cloned(); - f(slot_set.as_deref()) } - fn remove_key_if_empty(&self, key: &Pubkey) { - if let hash_map::Entry::Occupied(key_entry) = - self.pubkey_to_slot_set.write().unwrap().entry(*key) - { - // Delete the `key` if the slot set is empty - let slot_set = key_entry.get(); - - // Write lock on `key_entry` above through the `entry` - // means nobody else has access to this lock at this time, - // so this check for empty -> remove() is atomic - if slot_set.read().unwrap().is_empty() { - key_entry.remove(); - } - } + fn remove_inner_key(&self, key: &Pubkey) -> bool { + self.account_keys.write().unwrap().remove(key) } fn is_empty(&self) -> bool { - self.pubkey_to_slot_set.read().unwrap().is_empty() + self.account_keys.read().unwrap().is_empty() } fn keys(&self) -> Vec { - self.pubkey_to_slot_set - .read() - .unwrap() - .keys() - .cloned() - .collect() + self.account_keys.read().unwrap().iter().cloned().collect() } fn len(&self) -> usize { - self.pubkey_to_slot_set.read().unwrap().len() + self.account_keys.read().unwrap().len() } } #[derive(Debug, Default)] pub struct SecondaryIndex { + metrics_name: &'static str, // Map from index keys to index values pub index: DashMap, - // Map from index values back to index keys, used for cleanup. - // Alternative is to store Option in each AccountInfo in the - // AccountsIndex if something is an SPL account with a mint, but then - // every AccountInfo would have to allocate `Option` pub reverse_index: DashMap, + stats: SecondaryIndexStats, } impl SecondaryIndex { - pub fn insert(&self, key: &Pubkey, inner_key: &Pubkey, slot: Slot) { + pub fn new(metrics_name: &'static str) -> Self { + Self { + metrics_name, + ..Self::default() + } + } + + pub fn insert(&self, key: &Pubkey, inner_key: &Pubkey) { { let pubkeys_map = self.index.get(key).unwrap_or_else(|| { self.index @@ -160,92 +124,70 @@ impl .downgrade() }); - pubkeys_map.get_or_create(inner_key, &|slots_set: &RwLock>| { - let contains_key = slots_set.read().unwrap().contains(&slot); - if !contains_key { - slots_set.write().unwrap().insert(slot); - } - }); + pubkeys_map.insert_if_not_exists(inner_key, &self.stats.num_inner_keys); } - let prev_key = { - let slots_map = self.reverse_index.get(inner_key).unwrap_or_else(|| { - self.reverse_index - .entry(*inner_key) - .or_insert(RwLock::new(HashMap::new())) - .downgrade() - }); - let should_insert = { - // Most of the time, key should already exist and match - // the one in the update - if let Some(existing_key) = slots_map.read().unwrap().get(&slot) { - existing_key != key - } else { - // If there is no key yet, then insert - true - } - }; - if should_insert { - slots_map.write().unwrap().insert(slot, *key) - } else { - None - } - }; - - if let Some(prev_key) = prev_key { - // If the inner key was moved to a different primary key, remove - // the previous index entry. + let outer_keys = self.reverse_index.get(inner_key).unwrap_or_else(|| { + self.reverse_index + .entry(*inner_key) + .or_insert(RwLock::new(Vec::with_capacity(1))) + .downgrade() + }); - // Check is necessary because anoher thread's writes could feasibly be - // interleaved between `should_insert = { ... slots_map.get(...) ... }` and - // `prev_key = { ... slots_map.insert(...) ... }` - // Currently this isn't possible due to current AccountsIndex's (pubkey, slot)-per-thread - // exclusive-locking, but check is here for future-proofing a more relaxed implementation - if prev_key != *key { - self.remove_index_entries(&prev_key, inner_key, &[slot]); + let should_insert = !outer_keys.read().unwrap().contains(&key); + if should_insert { + let mut w_outer_keys = outer_keys.write().unwrap(); + if !w_outer_keys.contains(&key) { + w_outer_keys.push(*key); } } - } - pub fn remove_index_entries(&self, key: &Pubkey, inner_key: &Pubkey, slots: &[Slot]) { - let is_key_empty = if let Some(inner_key_map) = self.index.get(&key) { - // Delete the slot from the slot set - let is_inner_key_empty = - inner_key_map.get(&inner_key, &|slot_set: Option<&RwLock>>| { - if let Some(slot_set) = slot_set { - let mut w_slot_set = slot_set.write().unwrap(); - for slot in slots.iter() { - let is_present = w_slot_set.remove(slot); - if !is_present { - warn!("Reverse index is missing previous entry for key {}, inner_key: {}, slot: {}", - key, inner_key, slot); - } - } - w_slot_set.is_empty() - } else { - false - } - }); + let now = solana_sdk::timing::timestamp(); + let last = self.stats.last_report.load(Ordering::Relaxed); + let should_report = now.saturating_sub(last) > 1000 + && self.stats.last_report.compare_exchange( + last, + now, + Ordering::Relaxed, + Ordering::Relaxed, + ) == Ok(last); + + if should_report { + datapoint_info!( + self.metrics_name, + ("num_secondary_keys", self.index.len() as i64, i64), + ( + "num_inner_keys", + self.stats.num_inner_keys.load(Ordering::Relaxed) as i64, + i64 + ), + ( + "num_reverse_index_keys", + self.reverse_index.len() as i64, + i64 + ), + ); + } + } - // Check if `key` is empty - if is_inner_key_empty { - // Write lock on `inner_key_entry` above through the `entry` - // means nobody else has access to this lock at this time, - // so this check for empty -> remove() is atomic - inner_key_map.remove_key_if_empty(inner_key); - inner_key_map.is_empty() - } else { - false - } - } else { - false + // Only safe to call from `remove_by_inner_key()` due to asserts + fn remove_index_entries(&self, outer_key: &Pubkey, removed_inner_key: &Pubkey) { + let is_outer_key_empty = { + let inner_key_map = self + .index + .get_mut(&outer_key) + .expect("If we're removing a key, then it must have an entry in the map"); + // If we deleted a pubkey from the reverse_index, then the corresponding entry + // better exist in this index as well or the two indexes are out of sync! + assert!(inner_key_map.value().remove_inner_key(&removed_inner_key)); + inner_key_map.is_empty() }; // Delete the `key` if the set of inner keys is empty - if is_key_empty { + if is_outer_key_empty { // Other threads may have interleaved writes to this `key`, // so double-check again for its emptiness - if let Occupied(key_entry) = self.index.entry(*key) { + if let Occupied(key_entry) = self.index.entry(*outer_key) { if key_entry.get().is_empty() { key_entry.remove(); } @@ -253,70 +195,31 @@ impl } } - // Specifying `slots_to_remove` == Some will only remove keys for those specific slots - // found for the `inner_key` in the reverse index. Otherwise, passing `None` - // will remove all keys that are found for the `inner_key` in the reverse index. - - // Note passing `None` is dangerous unless you're sure there's no other competing threads - // writing updates to the index for this Pubkey at the same time! - pub fn remove_by_inner_key<'a, C>(&'a self, inner_key: &Pubkey, slots_to_remove: Option<&'a C>) - where - C: Contains<'a, Slot>, - { + pub fn remove_by_inner_key(&self, inner_key: &Pubkey) { // Save off which keys in `self.index` had slots removed so we can remove them // after we purge the reverse index - let mut key_to_removed_slots: HashMap> = HashMap::new(); + let mut removed_outer_keys: HashSet = HashSet::new(); // Check if the entry for `inner_key` in the reverse index is empty // and can be removed - let needs_remove = { - if let Some(slots_to_remove) = slots_to_remove { - self.reverse_index - .get(inner_key) - .map(|slots_map| { - // Ideally we use a concurrent map here as well to prevent clean - // from blocking writes, but memory usage of DashMap is high - let mut w_slots_map = slots_map.value().write().unwrap(); - for slot in slots_to_remove.contains_iter() { - if let Some(removed_key) = w_slots_map.remove(slot.borrow()) { - key_to_removed_slots - .entry(removed_key) - .or_default() - .push(*slot.borrow()); - } - } - w_slots_map.is_empty() - }) - .unwrap_or(false) - } else { - if let Some((_, removed_slot_map)) = self.reverse_index.remove(inner_key) { - for (slot, removed_key) in removed_slot_map.into_inner().unwrap().into_iter() { - key_to_removed_slots - .entry(removed_key) - .or_default() - .push(slot); - } - } - // We just removed the key, no need to remove it again - false - } - }; - - if needs_remove { - // Other threads may have interleaved writes to this `inner_key`, between - // releasing the `self.reverse_index.get(inner_key)` lock and now, - // so double-check again for emptiness - if let Occupied(slot_map) = self.reverse_index.entry(*inner_key) { - if slot_map.get().read().unwrap().is_empty() { - slot_map.remove(); - } + if let Some((_, outer_keys_set)) = self.reverse_index.remove(inner_key) { + for removed_outer_key in outer_keys_set.into_inner().unwrap().into_iter() { + removed_outer_keys.insert(removed_outer_key); } } // Remove this value from those keys - for (key, slots) in key_to_removed_slots { - self.remove_index_entries(&key, inner_key, &slots); + for outer_key in &removed_outer_keys { + self.remove_index_entries(outer_key, inner_key); } + + // Safe to `fetch_sub()` here because a dead key cannot be removed more than once, + // and the `num_inner_keys` must have been incremented by exactly removed_outer_keys.len() + // in previous unique insertions of `inner_key` into `self.index` for each key + // in `removed_outer_keys` + self.stats + .num_inner_keys + .fetch_sub(removed_outer_keys.len() as u64, Ordering::Relaxed); } pub fn get(&self, key: &Pubkey) -> Vec { diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 070ae4a3b9..d0d4a03106 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -2,11 +2,12 @@ use { crate::{ accounts::Accounts, accounts_db::{AccountStorageEntry, AccountsDb, AppendVecId, BankHashInfo}, - accounts_index::{AccountIndex, Ancestors}, + accounts_index::{AccountSecondaryIndexes, Ancestors}, append_vec::AppendVec, bank::{Bank, BankFieldsToDeserialize, BankRc, Builtins}, blockhash_queue::{BlockHashEvm, BlockhashQueue}, epoch_stakes::EpochStakes, + hardened_unpack::UnpackedAppendVecMap, message_processor::MessageProcessor, rent_collector::RentCollector, serde_snapshot::future::SerializableStorage, @@ -14,9 +15,7 @@ use { }, bincode, bincode::{config::Options, Error}, - fs_extra::dir::CopyOptions, - log::{info, warn}, - rand::{thread_rng, Rng}, + log::*, serde::{de::DeserializeOwned, Deserialize, Serialize}, solana_sdk::{ clock::{Epoch, Slot, UnixTimestamp}, @@ -31,7 +30,7 @@ use { }, std::{ collections::{HashMap, HashSet}, - io::{BufReader, BufWriter, Read, Write}, + io::{self, BufReader, BufWriter, Read, Write}, path::{Path, PathBuf}, result::Result, sync::{atomic::Ordering, Arc, RwLock}, @@ -49,7 +48,6 @@ mod tests; mod utils; use future::Context as TypeContextFuture; -use future_legacy::Context as TypeContextFutureLegacy; #[allow(unused_imports)] use utils::{serialize_iter_as_map, serialize_iter_as_seq, serialize_iter_as_tuple}; @@ -59,9 +57,11 @@ pub(crate) use self::tests::reconstruct_accounts_db_via_serialization; pub(crate) use crate::accounts_db::{SnapshotStorage, SnapshotStorages}; +// NOTE(velas): +// - old enum `SerdeStyle` was removed as single variant enum +// - this enum should be treated as new, EVM only related enum without any previous history #[derive(Copy, Clone, Eq, PartialEq)] pub(crate) enum EvmStateVersion { - V1_3_0, V1_4_0, } @@ -122,22 +122,21 @@ where } #[allow(clippy::too_many_arguments)] -pub(crate) fn bank_from_stream( +pub(crate) fn bank_from_stream( + evm_state_path: &Path, evm_state_version: EvmStateVersion, stream: &mut BufReader, - append_vecs_path: P, - evm_state_path: &Path, account_paths: &[PathBuf], + unpacked_append_vec_map: UnpackedAppendVecMap, genesis_config: &GenesisConfig, frozen_account_pubkeys: &[Pubkey], debug_keys: Option>>, additional_builtins: Option<&Builtins>, - account_indexes: HashSet, + account_indexes: AccountSecondaryIndexes, caching_enabled: bool, ) -> std::result::Result where R: Read, - P: AsRef, { macro_rules! INTO { ($x:ident) => {{ @@ -150,7 +149,7 @@ where frozen_account_pubkeys, evm_state_path, account_paths, - append_vecs_path, + unpacked_append_vec_map, debug_keys, additional_builtins, account_indexes, @@ -160,7 +159,6 @@ where }}; } match evm_state_version { - EvmStateVersion::V1_3_0 => INTO!(TypeContextFutureLegacy), EvmStateVersion::V1_4_0 => INTO!(TypeContextFuture), } .map_err(|err| { @@ -191,7 +189,6 @@ where }; } match evm_version { - EvmStateVersion::V1_3_0 => INTO!(TypeContextFutureLegacy), EvmStateVersion::V1_4_0 => INTO!(TypeContextFuture), } .map_err(|err| { @@ -235,27 +232,26 @@ impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDb<'a, C> { impl<'a, C> IgnoreAsHelper for SerializableAccountsDb<'a, C> {} #[allow(clippy::too_many_arguments)] -fn reconstruct_bank_from_fields( +fn reconstruct_bank_from_fields( bank_fields: BankFieldsToDeserialize, accounts_db_fields: AccountsDbFields, genesis_config: &GenesisConfig, frozen_account_pubkeys: &[Pubkey], evm_state_path: &Path, account_paths: &[PathBuf], - append_vecs_path: P, + unpacked_append_vec_map: UnpackedAppendVecMap, debug_keys: Option>>, additional_builtins: Option<&Builtins>, - account_indexes: HashSet, + account_indexes: AccountSecondaryIndexes, caching_enabled: bool, ) -> Result where E: SerializableStorage, - P: AsRef, { let mut accounts_db = reconstruct_accountsdb_from_fields( accounts_db_fields, account_paths, - append_vecs_path, + unpacked_append_vec_map, &genesis_config.cluster_type, account_indexes, caching_enabled, @@ -278,17 +274,16 @@ where Ok(bank) } -fn reconstruct_accountsdb_from_fields( +fn reconstruct_accountsdb_from_fields( accounts_db_fields: AccountsDbFields, account_paths: &[PathBuf], - stream_append_vecs_path: P, + unpacked_append_vec_map: UnpackedAppendVecMap, cluster_type: &ClusterType, - account_indexes: HashSet, + account_indexes: AccountSecondaryIndexes, caching_enabled: bool, ) -> Result where E: SerializableStorage, - P: AsRef, { let mut accounts_db = AccountsDb::new_with_config( account_paths.to_vec(), @@ -320,30 +315,17 @@ where let mut new_slot_storage = HashMap::new(); for storage_entry in slot_storage.drain(..) { - let path_index = thread_rng().gen_range(0, accounts_db.paths.len()); - let local_dir = &accounts_db.paths[path_index]; - - // Move the corresponding AppendVec from the snapshot into the directory pointed - // at by `local_dir` - let append_vec_relative_path = - AppendVec::new_relative_path(slot, storage_entry.id()); - let append_vec_abs_path = stream_append_vecs_path - .as_ref() - .join(&append_vec_relative_path); - let target = local_dir.join(append_vec_abs_path.file_name().unwrap()); - std::fs::rename(append_vec_abs_path.clone(), target).or_else(|_| { - let mut copy_options = CopyOptions::new(); - copy_options.overwrite = true; - fs_extra::move_items(&[&append_vec_abs_path], &local_dir, ©_options) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) - .and(Ok(())) - })?; + let file_name = AppendVec::file_name(slot, storage_entry.id()); - // Notify the AppendVec of the new file location - let local_path = local_dir.join(append_vec_relative_path); + let append_vec_path = unpacked_append_vec_map.get(&file_name).ok_or_else(|| { + io::Error::new( + io::ErrorKind::NotFound, + format!("{} not found in unpacked append vecs", file_name), + ) + })?; let (accounts, num_accounts) = - AppendVec::new_from_file(&local_path, storage_entry.current_len())?; + AppendVec::new_from_file(append_vec_path, storage_entry.current_len())?; let u_storage_entry = AccountStorageEntry::new_existing( slot, storage_entry.id(), diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index d85820c7e0..fd4cbb346b 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -5,17 +5,21 @@ use { accounts::{create_test_accounts, Accounts}, accounts_db::get_temp_accounts_paths, bank::{Bank, StatusCacheRc}, + hardened_unpack::UnpackedAppendVecMap, }, bincode::serialize_into, rand::{thread_rng, Rng}, solana_sdk::{ - account::Account, + account::AccountSharedData, clock::Slot, genesis_config::{create_genesis_config, ClusterType}, pubkey::Pubkey, signature::{Keypair, Signer}, }, - std::io::{BufReader, Cursor}, + std::{ + io::{BufReader, Cursor}, + path::Path, + }, tempfile::TempDir, }; @@ -23,19 +27,18 @@ use { fn copy_append_vecs>( accounts_db: &AccountsDb, output_dir: P, -) -> std::io::Result<()> { +) -> std::io::Result { let storage_entries = accounts_db.get_snapshot_storages(Slot::max_value()); + let mut unpacked_append_vec_map = UnpackedAppendVecMap::new(); for storage in storage_entries.iter().flatten() { let storage_path = storage.get_path(); - let output_path = output_dir.as_ref().join(AppendVec::new_relative_path( - storage.slot(), - storage.append_vec_id(), - )); - - std::fs::copy(storage_path, output_path)?; + let file_name = AppendVec::file_name(storage.slot(), storage.append_vec_id()); + let output_path = output_dir.as_ref().join(&file_name); + std::fs::copy(&storage_path, &output_path)?; + unpacked_append_vec_map.insert(file_name, output_path); } - Ok(()) + Ok(unpacked_append_vec_map) } #[cfg(test)] @@ -45,7 +48,7 @@ fn check_accounts(accounts: &Accounts, pubkeys: &[Pubkey], num: usize) { let ancestors = vec![(0, 0)].into_iter().collect(); let account = accounts.load_slow(&ancestors, &pubkeys[idx]); let account1 = Some(( - Account::new((idx + 1) as u64, 0, &Account::default().owner), + AccountSharedData::new((idx + 1) as u64, 0, &AccountSharedData::default().owner), 0, )); assert_eq!(account, account1); @@ -53,41 +56,39 @@ fn check_accounts(accounts: &Accounts, pubkeys: &[Pubkey], num: usize) { } #[cfg(test)] -fn context_accountsdb_from_stream<'a, C, R, P>( +fn context_accountsdb_from_stream<'a, C, R>( stream: &mut BufReader, account_paths: &[PathBuf], - stream_append_vecs_path: P, + unpacked_append_vec_map: UnpackedAppendVecMap, ) -> Result where C: TypeContext<'a>, R: Read, - P: AsRef, { // read and deserialise the accounts database directly from the stream reconstruct_accountsdb_from_fields( C::deserialize_accounts_db_fields(stream)?, account_paths, - stream_append_vecs_path, + unpacked_append_vec_map, &ClusterType::Development, - HashSet::new(), + AccountSecondaryIndexes::default(), false, ) } #[cfg(test)] -fn accountsdb_from_stream( +fn accountsdb_from_stream( stream: &mut BufReader, account_paths: &[PathBuf], - stream_append_vecs_path: P, + unpacked_append_vec_map: UnpackedAppendVecMap, ) -> Result where R: Read, - P: AsRef, { - context_accountsdb_from_stream::( + context_accountsdb_from_stream::( stream, account_paths, - stream_append_vecs_path, + unpacked_append_vec_map, ) } @@ -116,8 +117,12 @@ where fn test_accounts_serialize_style() { solana_logger::setup(); let (_accounts_dir, paths) = get_temp_accounts_paths(4).unwrap(); - let accounts = - Accounts::new_with_config(paths, &ClusterType::Development, HashSet::new(), false); + let accounts = Accounts::new_with_config( + paths, + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + ); let mut pubkeys: Vec = vec![]; create_test_accounts(&accounts, &mut pubkeys, 100, 0); @@ -136,13 +141,19 @@ fn test_accounts_serialize_style() { let copied_accounts = TempDir::new().unwrap(); // Simulate obtaining a copy of the AppendVecs from a tarball - copy_append_vecs(&accounts.accounts_db, copied_accounts.path()).unwrap(); + let unpacked_append_vec_map = + copy_append_vecs(&accounts.accounts_db, copied_accounts.path()).unwrap(); let buf = writer.into_inner(); let mut reader = BufReader::new(&buf[..]); let (_accounts_dir, daccounts_paths) = get_temp_accounts_paths(2).unwrap(); let daccounts = Accounts::new_empty( - accountsdb_from_stream(&mut reader, &daccounts_paths, copied_accounts.path()).unwrap(), + accountsdb_from_stream( + &mut reader, + &daccounts_paths, + unpacked_append_vec_map, + ) + .unwrap(), ); check_accounts(&daccounts, &pubkeys, 100); assert_eq!(accounts.bank_hash_at(0), daccounts.bank_hash_at(0)); @@ -195,18 +206,19 @@ fn test_bank_serialize_style(evm_version: EvmStateVersion) { ref_sc.status_cache.write().unwrap().add_root(2); // Create a directory to simulate AppendVecs unpackaged from a snapshot tar let copied_accounts = TempDir::new().unwrap(); - copy_append_vecs(&bank2.rc.accounts.accounts_db, copied_accounts.path()).unwrap(); + let unpacked_append_vec_map = + copy_append_vecs(&bank2.rc.accounts.accounts_db, copied_accounts.path()).unwrap(); let mut dbank = crate::serde_snapshot::bank_from_stream( + &evm_state_dir.path(), evm_version, &mut reader, - copied_accounts.path(), - &evm_state_dir.path(), &dbank_paths, + unpacked_append_vec_map, &genesis_config, &[], None, None, - HashSet::new(), + AccountSecondaryIndexes::default(), false, ) .unwrap(); @@ -229,9 +241,21 @@ pub(crate) fn reconstruct_accounts_db_via_serialization( let buf = writer.into_inner(); let mut reader = BufReader::new(&buf[..]); let copied_accounts = TempDir::new().unwrap(); + // Simulate obtaining a copy of the AppendVecs from a tarball - copy_append_vecs(&accounts, copied_accounts.path()).unwrap(); - accountsdb_from_stream(&mut reader, &[], copied_accounts.path()).unwrap() + let unpacked_append_vec_map = copy_append_vecs(&accounts, copied_accounts.path()).unwrap(); + let mut accounts_db = + accountsdb_from_stream(&mut reader, &[], unpacked_append_vec_map).unwrap(); + + // The append vecs will be used from `copied_accounts` directly by the new AccountsDb so keep + // its TempDir alive + accounts_db + .temp_paths + .as_mut() + .unwrap() + .push(copied_accounts); + + accounts_db } #[test] @@ -244,11 +268,6 @@ fn test_bank_serialize_newer() { test_bank_serialize_style(EvmStateVersion::V1_4_0) } -#[test] -fn test_bank_serialize_older() { - test_bank_serialize_style(EvmStateVersion::V1_3_0) -} - #[cfg(all(test, RUSTC_WITH_SPECIALIZATION))] mod test_bank_serialize { use super::*; diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 74ed16a69b..bb5e520e20 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1,9 +1,9 @@ use crate::{ accounts_db::AccountsDb, - accounts_index::AccountIndex, + accounts_index::AccountSecondaryIndexes, bank::{Bank, BankSlotDelta, Builtins}, bank_forks::ArchiveFormat, - hardened_unpack::{unpack_snapshot, UnpackError}, + hardened_unpack::{unpack_snapshot, UnpackError, UnpackedAppendVecMap}, serde_snapshot::{ bank_from_stream, bank_to_stream, EvmStateVersion, SnapshotStorage, SnapshotStorages, }, @@ -41,14 +41,12 @@ pub const TAR_VERSION_FILE: &str = "version"; pub const MAX_SNAPSHOTS: usize = 8; // Save some snapshots but not too many const EVM_STATE_DIR: &str = "evm-state"; const MAX_SNAPSHOT_DATA_FILE_SIZE: u64 = 32 * 1024 * 1024 * 1024; // 32 GiB -const VERSION_STRING_V1_3_0: &str = "1.3.0"; const VERSION_STRING_V1_4_0: &str = "1.4.0"; const DEFAULT_SNAPSHOT_VERSION: SnapshotVersion = SnapshotVersion::V1_4_0; const TMP_SNAPSHOT_PREFIX: &str = "tmp-snapshot-"; #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum SnapshotVersion { - V1_3_0, V1_4_0, } @@ -67,7 +65,6 @@ impl fmt::Display for SnapshotVersion { impl From for &'static str { fn from(snapshot_version: SnapshotVersion) -> &'static str { match snapshot_version { - SnapshotVersion::V1_3_0 => VERSION_STRING_V1_3_0, SnapshotVersion::V1_4_0 => VERSION_STRING_V1_4_0, } } @@ -87,7 +84,6 @@ impl FromStr for SnapshotVersion { version_string }; match version_string { - VERSION_STRING_V1_3_0 => Ok(SnapshotVersion::V1_3_0), VERSION_STRING_V1_4_0 => Ok(SnapshotVersion::V1_4_0), _ => Err("unsupported snapshot version"), } @@ -280,11 +276,10 @@ pub fn archive_snapshot_package(snapshot_package: &AccountsPackage) -> Result<() for storage in snapshot_package.storages.iter().flatten() { storage.flush()?; let storage_path = storage.get_path(); - let output_path = - staging_accounts_dir.join(crate::append_vec::AppendVec::new_relative_path( - storage.slot(), - storage.append_vec_id(), - )); + let output_path = staging_accounts_dir.join(crate::append_vec::AppendVec::file_name( + storage.slot(), + storage.append_vec_id(), + )); // `storage_path` - The file path where the AppendVec itself is located // `output_path` - The file path where the AppendVec will be placed in the staging directory. @@ -531,7 +526,6 @@ pub fn add_snapshot>( let mut bank_serialize = Measure::start("bank-serialize-ms"); let bank_snapshot_serializer = move |stream: &mut BufWriter| -> Result<()> { let evm_version = match snapshot_version { - SnapshotVersion::V1_3_0 => EvmStateVersion::V1_3_0, SnapshotVersion::V1_4_0 => EvmStateVersion::V1_4_0, }; bank_to_stream(evm_version, stream.by_ref(), bank, snapshot_storages)?; @@ -637,17 +631,22 @@ pub fn bank_from_archive>( genesis_config: &GenesisConfig, debug_keys: Option>>, additional_builtins: Option<&Builtins>, - account_indexes: HashSet, + account_indexes: AccountSecondaryIndexes, accounts_db_caching_enabled: bool, ) -> Result { // Untar the snapshot into a temporary directory let unpack_dir = tempfile::Builder::new() .prefix(TMP_SNAPSHOT_PREFIX) .tempdir_in(snapshot_path)?; - untar_snapshot_in(&snapshot_tar, &unpack_dir, archive_format)?; + + let unpacked_append_vec_map = untar_snapshot_in( + &snapshot_tar, + &unpack_dir.as_ref(), + account_paths, + archive_format, + )?; let mut measure = Measure::start("bank rebuild from snapshot"); - let unpacked_accounts_dir = unpack_dir.as_ref().join(TAR_ACCOUNTS_DIR); let unpacked_snapshots_dir = unpack_dir.as_ref().join(TAR_SNAPSHOTS_DIR); let unpacked_version_file = unpack_dir.as_ref().join(TAR_VERSION_FILE); @@ -657,10 +656,10 @@ pub fn bank_from_archive>( let bank = rebuild_bank_from_snapshots( snapshot_version.trim(), evm_state_path, - account_paths, frozen_account_pubkeys, &unpacked_snapshots_dir, - unpacked_accounts_dir, + account_paths, + unpacked_append_vec_map, genesis_config, debug_keys, additional_builtins, @@ -768,57 +767,55 @@ pub fn purge_old_snapshot_archives>(snapshot_output_dir: P) { } } -pub fn untar_snapshot_in, Q: AsRef>( +fn untar_snapshot_in>( snapshot_tar: P, - unpack_dir: Q, + unpack_dir: &Path, + account_paths: &[PathBuf], archive_format: ArchiveFormat, -) -> Result<()> { +) -> Result { let mut measure = Measure::start("snapshot untar"); let tar_name = File::open(&snapshot_tar)?; - match archive_format { + let account_paths_map = match archive_format { ArchiveFormat::TarBzip2 => { let tar = BzDecoder::new(BufReader::new(tar_name)); let mut archive = Archive::new(tar); - unpack_snapshot(&mut archive, unpack_dir)?; + unpack_snapshot(&mut archive, unpack_dir, account_paths)? } ArchiveFormat::TarGzip => { let tar = GzDecoder::new(BufReader::new(tar_name)); let mut archive = Archive::new(tar); - unpack_snapshot(&mut archive, unpack_dir)?; + unpack_snapshot(&mut archive, unpack_dir, account_paths)? } ArchiveFormat::TarZstd => { let tar = zstd::stream::read::Decoder::new(BufReader::new(tar_name))?; let mut archive = Archive::new(tar); - unpack_snapshot(&mut archive, unpack_dir)?; + unpack_snapshot(&mut archive, unpack_dir, account_paths)? } ArchiveFormat::Tar => { let tar = BufReader::new(tar_name); let mut archive = Archive::new(tar); - unpack_snapshot(&mut archive, unpack_dir)?; + unpack_snapshot(&mut archive, unpack_dir, account_paths)? } }; measure.stop(); info!("{}", measure); - Ok(()) + Ok(account_paths_map) } #[allow(clippy::too_many_arguments)] -fn rebuild_bank_from_snapshots

( +fn rebuild_bank_from_snapshots( snapshot_version: &str, evm_state_path: &Path, - account_paths: &[PathBuf], frozen_account_pubkeys: &[Pubkey], unpacked_snapshots_dir: &Path, - append_vecs_path: P, + account_paths: &[PathBuf], + unpacked_append_vec_map: UnpackedAppendVecMap, genesis_config: &GenesisConfig, debug_keys: Option>>, additional_builtins: Option<&Builtins>, - account_indexes: HashSet, + account_indexes: AccountSecondaryIndexes, accounts_db_caching_enabled: bool, -) -> Result -where - P: AsRef, -{ +) -> Result { info!("snapshot version: {}", snapshot_version); let snapshot_version_enum = @@ -835,10 +832,8 @@ where let root_paths = snapshot_paths .pop() .ok_or_else(|| get_io_error("No snapshots found in snapshots directory"))?; - info!( - "restoring database from storage backup: {:?}", - root_paths.evm_state_backup_path - ); + + // EVM State load let mut measure = Measure::start("evm state database restore"); if evm_state_path.exists() { warn!( @@ -858,25 +853,12 @@ where ); let bank = deserialize_snapshot_data_file(&root_paths.snapshot_file_path, |mut stream| { Ok(match snapshot_version_enum { - SnapshotVersion::V1_3_0 => bank_from_stream( - EvmStateVersion::V1_3_0, - &mut stream, - &append_vecs_path, - &evm_state_path, - account_paths, - genesis_config, - frozen_account_pubkeys, - debug_keys, - additional_builtins, - account_indexes, - accounts_db_caching_enabled, - ), SnapshotVersion::V1_4_0 => bank_from_stream( + &evm_state_path, EvmStateVersion::V1_4_0, &mut stream, - &append_vecs_path, - &evm_state_path, account_paths, + unpacked_append_vec_map, genesis_config, frozen_account_pubkeys, debug_keys, @@ -932,7 +914,13 @@ pub fn verify_snapshot_archive( { let temp_dir = tempfile::TempDir::new().unwrap(); let unpack_dir = temp_dir.path(); - untar_snapshot_in(snapshot_archive, &unpack_dir, archive_format).unwrap(); + untar_snapshot_in( + snapshot_archive, + &unpack_dir, + &[unpack_dir.to_path_buf()], + archive_format, + ) + .unwrap(); // Check snapshots are the same let unpacked_snapshots = unpack_dir.join(&TAR_SNAPSHOTS_DIR); diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 675a0b3189..fcb39a5b0c 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -2,7 +2,10 @@ //! node stakes use crate::vote_account::{ArcVoteAccount, VoteAccounts}; use solana_sdk::{ - account::Account, clock::Epoch, pubkey::Pubkey, sysvar::stake_history::StakeHistory, + account::{AccountSharedData, ReadableAccount}, + clock::Epoch, + pubkey::Pubkey, + sysvar::stake_history::StakeHistory, }; use solana_stake_program::stake_state::{new_stake_history_entry, Delegation, StakeState}; use solana_vote_program::vote_state::VoteState; @@ -106,16 +109,16 @@ impl Stakes { .sum::() } - pub fn is_stake(account: &Account) -> bool { + pub fn is_stake(account: &AccountSharedData) -> bool { solana_vote_program::check_id(&account.owner) || solana_stake_program::check_id(&account.owner) - && account.data.len() >= std::mem::size_of::() + && account.data().len() >= std::mem::size_of::() } pub fn store( &mut self, pubkey: &Pubkey, - account: &Account, + account: &AccountSharedData, fix_stake_deactivate: bool, check_vote_init: bool, ) -> Option { @@ -127,7 +130,7 @@ impl Stakes { // when account is removed (lamports == 0 or data uninitialized), don't read so that // given `pubkey` can be used for any owner in the future, while not affecting Stakes. if account.lamports != 0 - && !(check_vote_init && VoteState::is_uninitialized_no_deser(&account.data)) + && !(check_vote_init && VoteState::is_uninitialized_no_deser(&account.data())) { let stake = old.as_ref().map_or_else( || { @@ -232,7 +235,9 @@ pub mod tests { use solana_vote_program::vote_state::{self, VoteState, VoteStateVersions}; // set up some dummies for a staked node (( vote ) ( stake )) - pub fn create_staked_node_accounts(stake: u64) -> ((Pubkey, Account), (Pubkey, Account)) { + pub fn create_staked_node_accounts( + stake: u64, + ) -> ((Pubkey, AccountSharedData), (Pubkey, AccountSharedData)) { let vote_pubkey = solana_sdk::pubkey::new_rand(); let vote_account = vote_state::create_account(&vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1); @@ -243,7 +248,7 @@ pub mod tests { } // add stake to a vote_pubkey ( stake ) - pub fn create_stake_account(stake: u64, vote_pubkey: &Pubkey) -> (Pubkey, Account) { + pub fn create_stake_account(stake: u64, vote_pubkey: &Pubkey) -> (Pubkey, AccountSharedData) { let stake_pubkey = solana_sdk::pubkey::new_rand(); ( stake_pubkey, @@ -260,7 +265,7 @@ pub mod tests { pub fn create_warming_staked_node_accounts( stake: u64, epoch: Epoch, - ) -> ((Pubkey, Account), (Pubkey, Account)) { + ) -> ((Pubkey, AccountSharedData), (Pubkey, AccountSharedData)) { let vote_pubkey = solana_sdk::pubkey::new_rand(); let vote_account = vote_state::create_account(&vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1); @@ -275,7 +280,7 @@ pub mod tests { stake: u64, epoch: Epoch, vote_pubkey: &Pubkey, - ) -> (Pubkey, Account) { + ) -> (Pubkey, AccountSharedData) { let stake_pubkey = solana_sdk::pubkey::new_rand(); ( stake_pubkey, @@ -407,7 +412,7 @@ pub mod tests { } // Vote account too big - let cache_data = vote_account.data.clone(); + let cache_data = vote_account.data().to_vec(); vote_account.data.push(0); stakes.store(&vote_pubkey, &vote_account, true, true); @@ -427,7 +432,7 @@ pub mod tests { assert!(vote_accounts.get(&vote_pubkey).is_none()); } - vote_account.data = cache_data; + vote_account.set_data(cache_data); stakes.store(&vote_pubkey, &vote_account, true, true); { @@ -557,7 +562,7 @@ pub mod tests { // not a stake account, and whacks above entry stakes.store( &stake_pubkey, - &Account::new(1, 0, &solana_stake_program::id()), + &AccountSharedData::new(1, 0, &solana_stake_program::id()), true, true, ); diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index 104943ad8e..f362c01ef0 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -1,8 +1,8 @@ use log::*; use solana_sdk::{ - account::Account, + account::{AccountSharedData, ReadableAccount}, account_utils::StateMut, - ic_msg, + feature_set, ic_msg, instruction::InstructionError, keyed_account::{from_keyed_account, get_signers, next_keyed_account, KeyedAccount}, nonce, @@ -62,7 +62,7 @@ impl Address { } fn allocate( - account: &mut Account, + account: &mut AccountSharedData, address: &Address, space: u64, signers: &HashSet, @@ -79,7 +79,7 @@ fn allocate( // if it looks like the `to` account is already in use, bail // (note that the id check is also enforced by message_processor) - if !account.data.is_empty() || !system_program::check_id(&account.owner) { + if !account.data().is_empty() || !system_program::check_id(&account.owner) { ic_msg!( invoke_context, "Allocate: account {:?} already in use", @@ -98,13 +98,13 @@ fn allocate( return Err(SystemError::InvalidAccountDataLength.into()); } - account.data = vec![0; space as usize]; + account.set_data(vec![0; space as usize]); Ok(()) } fn assign( - account: &mut Account, + account: &mut AccountSharedData, address: &Address, owner: &Pubkey, signers: &HashSet, @@ -131,7 +131,7 @@ fn assign( } fn allocate_and_assign( - to: &mut Account, + to: &mut AccountSharedData, to_address: &Address, space: u64, owner: &Pubkey, @@ -200,14 +200,16 @@ fn transfer( lamports: u64, invoke_context: &mut dyn InvokeContext, ) -> Result<(), InstructionError> { - if lamports == 0 { + if !invoke_context.is_feature_active(&feature_set::system_transfer_zero_check::id()) + && lamports == 0 + { return Ok(()); } if from.signer_key().is_none() { ic_msg!( invoke_context, - "Transfer: `from` accont {} must sign", + "Transfer: `from` account {} must sign", from.unsigned_key() ); return Err(InstructionError::MissingRequiredSignature); @@ -225,7 +227,9 @@ fn transfer_with_seed( lamports: u64, invoke_context: &mut dyn InvokeContext, ) -> Result<(), InstructionError> { - if lamports == 0 { + if !invoke_context.is_feature_active(&feature_set::system_transfer_zero_check::id()) + && lamports == 0 + { return Ok(()); } @@ -421,11 +425,11 @@ pub enum SystemAccountKind { Nonce, } -pub fn get_system_account_kind(account: &Account) -> Option { +pub fn get_system_account_kind(account: &AccountSharedData) -> Option { if system_program::check_id(&account.owner) { - if account.data.is_empty() { + if account.data().is_empty() { Some(SystemAccountKind::System) - } else if account.data.len() == nonce::State::size() { + } else if account.data().len() == nonce::State::size() { match account.state().ok()? { nonce::state::Versions::Current(state) => match *state { nonce::State::Initialized(_) => Some(SystemAccountKind::Nonce), @@ -446,7 +450,7 @@ mod tests { use crate::{bank::Bank, bank_client::BankClient}; use bincode::serialize; use solana_sdk::{ - account::{self, Account}, + account::{self, Account, AccountSharedData}, client::SyncClient, fee_calculator::FeeCalculator, genesis_config::create_genesis_config, @@ -486,10 +490,10 @@ mod tests { ) } - fn create_default_account() -> RefCell { - RefCell::new(Account::default()) + fn create_default_account() -> RefCell { + RefCell::new(AccountSharedData::default()) } - fn create_default_recent_blockhashes_account() -> RefCell { + fn create_default_recent_blockhashes_account() -> RefCell { RefCell::new( recent_blockhashes_account::create_account_with_data_for_test( vec![ @@ -500,8 +504,8 @@ mod tests { ), ) } - fn create_default_rent_account() -> RefCell { - RefCell::new(account::create_account_for_test(&Rent::free())) + fn create_default_rent_account() -> RefCell { + RefCell::new(account::create_account_shared_data_for_test(&Rent::free())) } #[test] @@ -509,8 +513,8 @@ mod tests { let new_owner = Pubkey::new(&[9; 32]); let from = solana_sdk::pubkey::new_rand(); let to = solana_sdk::pubkey::new_rand(); - let from_account = Account::new_ref(100, 0, &system_program::id()); - let to_account = Account::new_ref(0, 0, &Pubkey::default()); + let from_account = AccountSharedData::new_ref(100, 0, &system_program::id()); + let to_account = AccountSharedData::new_ref(0, 0, &Pubkey::default()); assert_eq!( process_instruction( @@ -531,7 +535,7 @@ mod tests { assert_eq!(from_account.borrow().lamports, 50); assert_eq!(to_account.borrow().lamports, 50); assert_eq!(to_account.borrow().owner, new_owner); - assert_eq!(to_account.borrow().data, [0, 0]); + assert_eq!(to_account.borrow().data(), &[0, 0]); } #[test] @@ -541,8 +545,8 @@ mod tests { let seed = "shiny pepper"; let to = Pubkey::create_with_seed(&from, seed, &new_owner).unwrap(); - let from_account = Account::new_ref(100, 0, &system_program::id()); - let to_account = Account::new_ref(0, 0, &Pubkey::default()); + let from_account = AccountSharedData::new_ref(100, 0, &system_program::id()); + let to_account = AccountSharedData::new_ref(0, 0, &Pubkey::default()); assert_eq!( process_instruction( @@ -565,7 +569,7 @@ mod tests { assert_eq!(from_account.borrow().lamports, 50); assert_eq!(to_account.borrow().lamports, 50); assert_eq!(to_account.borrow().owner, new_owner); - assert_eq!(to_account.borrow().data, [0, 0]); + assert_eq!(to_account.borrow().data(), &[0, 0]); } #[test] @@ -576,9 +580,9 @@ mod tests { let seed = "shiny pepper"; let to = Pubkey::create_with_seed(&base, seed, &new_owner).unwrap(); - let from_account = Account::new_ref(100, 0, &system_program::id()); - let to_account = Account::new_ref(0, 0, &Pubkey::default()); - let base_account = Account::new_ref(0, 0, &Pubkey::default()); + let from_account = AccountSharedData::new_ref(100, 0, &system_program::id()); + let to_account = AccountSharedData::new_ref(0, 0, &Pubkey::default()); + let base_account = AccountSharedData::new_ref(0, 0, &Pubkey::default()); assert_eq!( process_instruction( @@ -602,7 +606,7 @@ mod tests { assert_eq!(from_account.borrow().lamports, 50); assert_eq!(to_account.borrow().lamports, 50); assert_eq!(to_account.borrow().owner, new_owner); - assert_eq!(to_account.borrow().data, [0, 0]); + assert_eq!(to_account.borrow().data(), &[0, 0]); } #[test] @@ -629,8 +633,8 @@ mod tests { let seed = "dull boy"; let to = Pubkey::create_with_seed(&from, seed, &new_owner).unwrap(); - let from_account = Account::new_ref(100, 0, &system_program::id()); - let to_account = Account::new_ref(0, 0, &Pubkey::default()); + let from_account = AccountSharedData::new_ref(100, 0, &system_program::id()); + let to_account = AccountSharedData::new_ref(0, 0, &Pubkey::default()); let to_address = Address::create( &to, Some((&from, seed, &new_owner)), @@ -652,28 +656,28 @@ mod tests { Err(InstructionError::MissingRequiredSignature) ); assert_eq!(from_account.borrow().lamports, 100); - assert_eq!(*to_account.borrow(), Account::default()); + assert_eq!(*to_account.borrow(), AccountSharedData::default()); } #[test] fn test_create_with_zero_lamports() { - // create account with zero lamports tranferred + // create account with zero lamports transferred let new_owner = Pubkey::new(&[9; 32]); - let from = solana_sdk::pubkey::new_rand(); - let from_account = Account::new_ref(100, 1, &solana_sdk::pubkey::new_rand()); // not from system account + let from = Pubkey::new_unique(); + let from_account = AccountSharedData::new_ref(100, 0, &Pubkey::new_unique()); // not from system account - let to = solana_sdk::pubkey::new_rand(); - let to_account = Account::new_ref(0, 0, &Pubkey::default()); + let to = Pubkey::new_unique(); + let to_account = AccountSharedData::new_ref(0, 0, &Pubkey::default()); assert_eq!( create_account( - &KeyedAccount::new(&from, false, &from_account), // no signer - &KeyedAccount::new(&to, false, &to_account), + &KeyedAccount::new(&from, true, &from_account), + &KeyedAccount::new(&to, true, &to_account), &to.into(), 0, 2, &new_owner, - &[to].iter().cloned().collect::>(), + &[from, to].iter().cloned().collect::>(), &mut MockInvokeContext::default(), ), Ok(()) @@ -682,11 +686,10 @@ mod tests { let from_lamports = from_account.borrow().lamports; let to_lamports = to_account.borrow().lamports; let to_owner = to_account.borrow().owner; - let to_data = &to_account.borrow().data; assert_eq!(from_lamports, 100); assert_eq!(to_lamports, 0); assert_eq!(to_owner, new_owner); - assert_eq!(*to_data, [0, 0]); + assert_eq!(to_account.borrow().data(), &[0, 0]); } #[test] @@ -694,10 +697,10 @@ mod tests { // Attempt to create account with more lamports than remaining in from_account let new_owner = Pubkey::new(&[9; 32]); let from = solana_sdk::pubkey::new_rand(); - let from_account = Account::new_ref(100, 0, &system_program::id()); + let from_account = AccountSharedData::new_ref(100, 0, &system_program::id()); let to = solana_sdk::pubkey::new_rand(); - let to_account = Account::new_ref(0, 0, &Pubkey::default()); + let to_account = AccountSharedData::new_ref(0, 0, &Pubkey::default()); let result = create_account( &KeyedAccount::new(&from, true, &from_account), @@ -714,9 +717,9 @@ mod tests { #[test] fn test_request_more_than_allowed_data_length() { - let from_account = Account::new_ref(100, 0, &system_program::id()); + let from_account = AccountSharedData::new_ref(100, 0, &system_program::id()); let from = solana_sdk::pubkey::new_rand(); - let to_account = Account::new_ref(0, 0, &system_program::id()); + let to_account = AccountSharedData::new_ref(0, 0, &system_program::id()); let to = solana_sdk::pubkey::new_rand(); let signers = &[from, to].iter().cloned().collect::>(); @@ -753,7 +756,7 @@ mod tests { assert!(result.is_ok()); assert_eq!(to_account.borrow().lamports, 50); assert_eq!( - to_account.borrow().data.len() as u64, + to_account.borrow().data().len() as u64, MAX_PERMITTED_DATA_LENGTH ); } @@ -763,11 +766,11 @@ mod tests { // Attempt to create system account in account already owned by another program let new_owner = Pubkey::new(&[9; 32]); let from = solana_sdk::pubkey::new_rand(); - let from_account = Account::new_ref(100, 0, &system_program::id()); + let from_account = AccountSharedData::new_ref(100, 0, &system_program::id()); let original_program_owner = Pubkey::new(&[5; 32]); let owned_key = solana_sdk::pubkey::new_rand(); - let owned_account = Account::new_ref(0, 0, &original_program_owner); + let owned_account = AccountSharedData::new_ref(0, 0, &original_program_owner); let unchanged_account = owned_account.clone(); let signers = &[from, owned_key].iter().cloned().collect::>(); @@ -790,7 +793,7 @@ mod tests { assert_eq!(owned_account, unchanged_account); // Attempt to create system account in account that already has data - let owned_account = Account::new_ref(0, 1, &Pubkey::default()); + let owned_account = AccountSharedData::new_ref(0, 1, &Pubkey::default()); let unchanged_account = owned_account.borrow().clone(); let result = create_account( &KeyedAccount::new(&from, true, &from_account), @@ -808,7 +811,7 @@ mod tests { assert_eq!(*owned_account.borrow(), unchanged_account); // Attempt to create an account that already has lamports - let owned_account = Account::new_ref(1, 0, &Pubkey::default()); + let owned_account = AccountSharedData::new_ref(1, 0, &Pubkey::default()); let unchanged_account = owned_account.borrow().clone(); let result = create_account( &KeyedAccount::new(&from, true, &from_account), @@ -830,10 +833,10 @@ mod tests { // Attempt to create an account without signing the transfer let new_owner = Pubkey::new(&[9; 32]); let from = solana_sdk::pubkey::new_rand(); - let from_account = Account::new_ref(100, 0, &system_program::id()); + let from_account = AccountSharedData::new_ref(100, 0, &system_program::id()); let owned_key = solana_sdk::pubkey::new_rand(); - let owned_account = Account::new_ref(0, 0, &Pubkey::default()); + let owned_account = AccountSharedData::new_ref(0, 0, &Pubkey::default()); let owned_address = owned_key.into(); @@ -851,7 +854,7 @@ mod tests { assert_eq!(result, Err(InstructionError::MissingRequiredSignature)); // Haven't signed to account - let owned_account = Account::new_ref(0, 0, &Pubkey::default()); + let owned_account = AccountSharedData::new_ref(0, 0, &Pubkey::default()); let result = create_account( &KeyedAccount::new(&from, true, &from_account), &KeyedAccount::new(&owned_key, true, &owned_account), @@ -864,11 +867,11 @@ mod tests { ); assert_eq!(result, Err(InstructionError::MissingRequiredSignature)); - // support creation/assignment with zero lamports (ephemeral account) - let owned_account = Account::new_ref(0, 0, &Pubkey::default()); + // Don't support unsigned creation with zero lamports (ephemeral account) + let owned_account = AccountSharedData::new_ref(0, 0, &Pubkey::default()); let result = create_account( &KeyedAccount::new(&from, false, &from_account), - &KeyedAccount::new(&owned_key, false, &owned_account), + &KeyedAccount::new(&owned_key, true, &owned_account), &owned_address, 0, 2, @@ -876,17 +879,17 @@ mod tests { &[owned_key].iter().cloned().collect::>(), &mut MockInvokeContext::default(), ); - assert_eq!(result, Ok(())); + assert_eq!(result, Err(InstructionError::MissingRequiredSignature)); } #[test] fn test_create_sysvar_invalid_id() { // Attempt to create system account in account already owned by another program let from = solana_sdk::pubkey::new_rand(); - let from_account = Account::new_ref(100, 0, &system_program::id()); + let from_account = AccountSharedData::new_ref(100, 0, &system_program::id()); let to = solana_sdk::pubkey::new_rand(); - let to_account = Account::new_ref(0, 0, &system_program::id()); + let to_account = AccountSharedData::new_ref(0, 0, &system_program::id()); let signers = [from, to].iter().cloned().collect::>(); let to_address = to.into(); @@ -911,13 +914,13 @@ mod tests { // Attempt to create system account in account with populated data let new_owner = Pubkey::new(&[9; 32]); let from = solana_sdk::pubkey::new_rand(); - let from_account = Account::new_ref(100, 0, &system_program::id()); + let from_account = AccountSharedData::new_ref(100, 0, &system_program::id()); let populated_key = solana_sdk::pubkey::new_rand(); - let populated_account = Account { + let populated_account = AccountSharedData::from(Account { data: vec![0, 1, 2, 3], ..Account::default() - } + }) .into(); let signers = [from, populated_key] @@ -942,7 +945,7 @@ mod tests { #[test] fn test_create_from_account_is_nonce_fail() { let nonce = solana_sdk::pubkey::new_rand(); - let nonce_account = Account::new_ref_data( + let nonce_account = AccountSharedData::new_ref_data( 42, &nonce::state::Versions::new_current(nonce::State::Initialized( nonce::state::Data::default(), @@ -953,7 +956,7 @@ mod tests { let from = KeyedAccount::new(&nonce, true, &nonce_account); let new = solana_sdk::pubkey::new_rand(); - let new_account = Account::new_ref(0, 0, &system_program::id()); + let new_account = AccountSharedData::new_ref(0, 0, &system_program::id()); let signers = [nonce, new].iter().cloned().collect::>(); let new_address = new.into(); @@ -979,7 +982,7 @@ mod tests { let new_owner = Pubkey::new(&[9; 32]); let pubkey = solana_sdk::pubkey::new_rand(); - let mut account = Account::new(100, 0, &system_program::id()); + let mut account = AccountSharedData::new(100, 0, &system_program::id()); assert_eq!( assign( @@ -1019,7 +1022,7 @@ mod tests { let new_owner = sysvar::id(); let from = solana_sdk::pubkey::new_rand(); - let mut from_account = Account::new(100, 0, &system_program::id()); + let mut from_account = AccountSharedData::new(100, 0, &system_program::id()); assert_eq!( assign( @@ -1044,7 +1047,7 @@ mod tests { assert_eq!(result, Err(InstructionError::NotEnoughAccountKeys)); let from = solana_sdk::pubkey::new_rand(); - let from_account = Account::new_ref(100, 0, &system_program::id()); + let from_account = AccountSharedData::new_ref(100, 0, &system_program::id()); // Attempt to transfer with no destination let instruction = SystemInstruction::Transfer { lamports: 0 }; let data = serialize(&instruction).unwrap(); @@ -1059,9 +1062,9 @@ mod tests { #[test] fn test_transfer_lamports() { let from = solana_sdk::pubkey::new_rand(); - let from_account = Account::new_ref(100, 0, &Pubkey::new(&[2; 32])); // account owner should not matter + let from_account = AccountSharedData::new_ref(100, 0, &Pubkey::new(&[2; 32])); // account owner should not matter let to = Pubkey::new(&[3; 32]); - let to_account = Account::new_ref(1, 0, &to); // account owner should not matter + let to_account = AccountSharedData::new_ref(1, 0, &to); // account owner should not matter let from_keyed_account = KeyedAccount::new(&from, true, &from_account); let to_keyed_account = KeyedAccount::new(&to, false, &to_account); transfer( @@ -1088,9 +1091,7 @@ mod tests { assert_eq!(from_keyed_account.account.borrow().lamports, 50); assert_eq!(to_keyed_account.account.borrow().lamports, 51); - // test unsigned transfer of zero - let from_keyed_account = KeyedAccount::new(&from, false, &from_account); - + // test signed transfer of zero assert!(transfer( &from_keyed_account, &to_keyed_account, @@ -1100,19 +1101,34 @@ mod tests { .is_ok(),); assert_eq!(from_keyed_account.account.borrow().lamports, 50); assert_eq!(to_keyed_account.account.borrow().lamports, 51); + + // test unsigned transfer of zero + let from_keyed_account = KeyedAccount::new(&from, false, &from_account); + + assert_eq!( + transfer( + &from_keyed_account, + &to_keyed_account, + 0, + &mut MockInvokeContext::default(), + ), + Err(InstructionError::MissingRequiredSignature) + ); + assert_eq!(from_keyed_account.account.borrow().lamports, 50); + assert_eq!(to_keyed_account.account.borrow().lamports, 51); } #[test] fn test_transfer_with_seed() { let base = solana_sdk::pubkey::new_rand(); - let base_account = Account::new_ref(100, 0, &Pubkey::new(&[2; 32])); // account owner should not matter + let base_account = AccountSharedData::new_ref(100, 0, &Pubkey::new(&[2; 32])); // account owner should not matter let from_base_keyed_account = KeyedAccount::new(&base, true, &base_account); let from_seed = "42"; let from_owner = system_program::id(); let from = Pubkey::create_with_seed(&base, from_seed, &from_owner).unwrap(); - let from_account = Account::new_ref(100, 0, &Pubkey::new(&[2; 32])); // account owner should not matter + let from_account = AccountSharedData::new_ref(100, 0, &Pubkey::new(&[2; 32])); // account owner should not matter let to = Pubkey::new(&[3; 32]); - let to_account = Account::new_ref(1, 0, &to); // account owner should not matter + let to_account = AccountSharedData::new_ref(1, 0, &to); // account owner should not matter let from_keyed_account = KeyedAccount::new(&from, true, &from_account); let to_keyed_account = KeyedAccount::new(&to, false, &to_account); transfer_with_seed( @@ -1164,7 +1180,7 @@ mod tests { #[test] fn test_transfer_lamports_from_nonce_account_fail() { let from = solana_sdk::pubkey::new_rand(); - let from_account = Account::new_ref_data( + let from_account = AccountSharedData::new_ref_data( 100, &nonce::state::Versions::new_current(nonce::State::Initialized(nonce::state::Data { authority: from, @@ -1179,7 +1195,7 @@ mod tests { ); let to = Pubkey::new(&[3; 32]); - let to_account = Account::new_ref(1, 0, &to); // account owner should not matter + let to_account = AccountSharedData::new_ref(1, 0, &to); // account owner should not matter assert_eq!( transfer( &KeyedAccount::new(&from, true, &from_account), @@ -1354,7 +1370,7 @@ mod tests { AccountMeta::new(alice_pubkey, false), AccountMeta::new(mallory_pubkey, true), ]; - let malicious_instruction = Instruction::new( + let malicious_instruction = Instruction::new_with_bincode( system_program::id(), &SystemInstruction::Transfer { lamports: 10 }, account_metas, @@ -1378,9 +1394,9 @@ mod tests { RefCell::new(if sysvar::recent_blockhashes::check_id(&meta.pubkey) { create_default_recent_blockhashes_account().into_inner() } else if sysvar::rent::check_id(&meta.pubkey) { - account::create_account_for_test(&Rent::free()) + account::create_account_shared_data_for_test(&Rent::free()) } else { - Account::default() + AccountSharedData::default() }) }) .collect(); @@ -1752,7 +1768,7 @@ mod tests { #[test] fn test_get_system_account_kind_system_ok() { - let system_account = Account::default(); + let system_account = AccountSharedData::default(); assert_eq!( get_system_account_kind(&system_account), Some(SystemAccountKind::System) @@ -1761,7 +1777,7 @@ mod tests { #[test] fn test_get_system_account_kind_nonce_ok() { - let nonce_account = Account::new_data( + let nonce_account = AccountSharedData::new_data( 42, &nonce::state::Versions::new_current(nonce::State::Initialized( nonce::state::Data::default(), @@ -1785,13 +1801,14 @@ mod tests { #[test] fn test_get_system_account_kind_system_owner_nonzero_nonnonce_data_fail() { - let other_data_account = Account::new_data(42, b"other", &Pubkey::default()).unwrap(); + let other_data_account = + AccountSharedData::new_data(42, b"other", &Pubkey::default()).unwrap(); assert_eq!(get_system_account_kind(&other_data_account), None); } #[test] fn test_get_system_account_kind_nonsystem_owner_with_nonce_data_fail() { - let nonce_account = Account::new_data( + let nonce_account = AccountSharedData::new_data( 42, &nonce::state::Versions::new_current(nonce::State::Initialized( nonce::state::Data::default(), diff --git a/runtime/src/transaction_batch.rs b/runtime/src/transaction_batch.rs index 076ed77d6d..ba4b963174 100644 --- a/runtime/src/transaction_batch.rs +++ b/runtime/src/transaction_batch.rs @@ -83,7 +83,7 @@ mod tests { let (bank, txs) = setup(); // Prepare batch without locks - let batch = bank.prepare_simulation_batch(&txs); + let batch = bank.prepare_simulation_batch(&txs[0]); assert!(batch.lock_results().iter().all(|x| x.is_ok())); // Grab locks @@ -91,7 +91,7 @@ mod tests { assert!(batch2.lock_results().iter().all(|x| x.is_ok())); // Prepare another batch without locks - let batch3 = bank.prepare_simulation_batch(&txs); + let batch3 = bank.prepare_simulation_batch(&txs[0]); assert!(batch3.lock_results().iter().all(|x| x.is_ok())); } diff --git a/runtime/src/vote_account.rs b/runtime/src/vote_account.rs index 09fd86ae13..104ca85e7f 100644 --- a/runtime/src/vote_account.rs +++ b/runtime/src/vote_account.rs @@ -1,6 +1,8 @@ use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use solana_sdk::{account::Account, instruction::InstructionError, pubkey::Pubkey}; +use solana_sdk::{ + account::Account, account::AccountSharedData, instruction::InstructionError, pubkey::Pubkey, +}; use solana_vote_program::vote_state::VoteState; use std::{ borrow::Borrow, @@ -172,12 +174,27 @@ impl<'de> Deserialize<'de> for ArcVoteAccount { } } +impl From for ArcVoteAccount { + fn from(account: AccountSharedData) -> Self { + Self(Arc::new(VoteAccount::from(account))) + } +} impl From for ArcVoteAccount { fn from(account: Account) -> Self { Self(Arc::new(VoteAccount::from(account))) } } +impl From for VoteAccount { + fn from(account: AccountSharedData) -> Self { + Self { + account: Account::from(account), + vote_state: RwLock::new(INVALID_VOTE_STATE), + vote_state_once: Once::new(), + } + } +} + impl From for VoteAccount { fn from(account: Account) -> Self { Self { @@ -299,7 +316,7 @@ mod tests { fn new_rand_vote_account( rng: &mut R, node_pubkey: Option, - ) -> (Account, VoteState) { + ) -> (AccountSharedData, VoteState) { let vote_init = VoteInit { node_pubkey: node_pubkey.unwrap_or_else(Pubkey::new_unique), authorized_voter: Pubkey::new_unique(), @@ -314,7 +331,7 @@ mod tests { unix_timestamp: rng.gen(), }; let vote_state = VoteState::new(&vote_init, &clock); - let account = Account::new_data( + let account = AccountSharedData::new_data( rng.gen(), // lamports &VoteStateVersions::new_current(vote_state.clone()), &Pubkey::new_unique(), // owner diff --git a/runtime/store-tool/Cargo.toml b/runtime/store-tool/Cargo.toml index 2b53b8867a..96038ae144 100644 --- a/runtime/store-tool/Cargo.toml +++ b/runtime/store-tool/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-store-tool" description = "Tool to inspect append vecs" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -11,11 +11,11 @@ publish = false [dependencies] log = { version = "0.4.11" } -solana-logger = { path = "../../logger", version = "=1.5.19" } -solana-version = { path = "../../version" } -solana-measure = { path = "../../measure", version = "=1.5.19" } -solana-runtime = { path = "..", version = "=1.5.19" } -solana-sdk = { path = "../../sdk", version = "=1.5.19" } +solana-logger = { path = "../../logger", version = "=1.6.14" } +solana-version = { path = "../../version", version = "=1.6.14" } +solana-measure = { path = "../../measure", version = "=1.6.14" } +solana-runtime = { path = "..", version = "=1.6.14" } +solana-sdk = { path = "../../sdk", version = "=1.6.14" } clap = "2.33.1" [package.metadata.docs.rs] diff --git a/runtime/tests/accounts.rs b/runtime/tests/accounts.rs index 7a0b50fcd5..1edef655b0 100644 --- a/runtime/tests/accounts.rs +++ b/runtime/tests/accounts.rs @@ -3,7 +3,7 @@ use rand::{thread_rng, Rng}; use rayon::prelude::*; use solana_runtime::{accounts_db::AccountsDb, accounts_index::Ancestors}; use solana_sdk::genesis_config::ClusterType; -use solana_sdk::{account::Account, clock::Slot, pubkey::Pubkey}; +use solana_sdk::{account::AccountSharedData, clock::Slot, pubkey::Pubkey}; use std::collections::HashSet; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; @@ -37,7 +37,7 @@ fn test_shrink_and_clean() { while alive_accounts.len() <= 10 { alive_accounts.push(( solana_sdk::pubkey::new_rand(), - Account::new(thread_rng().gen_range(0, 50), 0, &owner), + AccountSharedData::new(thread_rng().gen_range(0, 50), 0, &owner), )); } @@ -78,7 +78,7 @@ fn test_bad_bank_hash() { let key = Keypair::new().pubkey(); let lamports = thread_rng().gen_range(0, 100); let some_data_len = thread_rng().gen_range(0, 1000); - let account = Account::new(lamports, some_data_len, &key); + let account = AccountSharedData::new(lamports, some_data_len, &key); (key, account) }) .collect(); diff --git a/runtime/tests/stake.rs b/runtime/tests/stake.rs index 1926414ebd..5f1d36bd1d 100644 --- a/runtime/tests/stake.rs +++ b/runtime/tests/stake.rs @@ -11,9 +11,7 @@ use solana_sdk::{ message::Message, pubkey::Pubkey, signature::{Keypair, Signer}, - system_instruction::SystemError, sysvar::{self, stake_history::StakeHistory}, - transaction::TransactionError, }; use solana_stake_program::{ stake_instruction::{self}, @@ -23,7 +21,6 @@ use solana_vote_program::{ vote_instruction, vote_state::{Vote, VoteInit, VoteState, VoteStateVersions}, }; -use stake_state::MIN_DELEGATE_STAKE_AMOUNT; use std::sync::Arc; fn next_epoch(bank: &Arc) -> Arc { @@ -78,7 +75,7 @@ fn warmed_up(bank: &Bank, stake_pubkey: &Pubkey) -> bool { == stake.stake( bank.epoch(), Some( - &from_account::( + &from_account::( &bank.get_account(&sysvar::stake_history::id()).unwrap(), ) .unwrap(), @@ -93,7 +90,7 @@ fn get_staked(bank: &Bank, stake_pubkey: &Pubkey) -> u64 { .stake( bank.epoch(), Some( - &from_account::( + &from_account::( &bank.get_account(&sysvar::stake_history::id()).unwrap(), ) .unwrap(), @@ -106,15 +103,14 @@ fn get_staked(bank: &Bank, stake_pubkey: &Pubkey) -> u64 { fn test_stake_create_and_split_single_signature() { solana_logger::setup(); - let min_stake = MIN_DELEGATE_STAKE_AMOUNT + 400; let GenesisConfigInfo { genesis_config, mint_keypair: staker_keypair, .. } = create_genesis_config_with_leader( - 2 * min_stake + 100_000_000_000, + 100_000_000_000, &solana_sdk::pubkey::new_rand(), - 2 * min_stake, + 1_000_000, ); let staker_pubkey = staker_keypair.pubkey(); @@ -126,7 +122,7 @@ fn test_stake_create_and_split_single_signature() { let authorized = stake_state::Authorized::auto(&staker_pubkey); - let lamports = 2 * min_stake; + let lamports = 1_000_000; // Create stake account with seed let message = Message::new( @@ -173,22 +169,19 @@ fn test_stake_create_and_split_single_signature() { #[test] fn test_stake_create_and_split_to_existing_system_account() { - // Ensure stake-split does not allow the user to promote an existing system account into + // Ensure stake-split allows the user to promote an existing system account into // a stake account. solana_logger::setup(); - let min_stake = MIN_DELEGATE_STAKE_AMOUNT + 400; - let lamports = 2 * min_stake; - let GenesisConfigInfo { genesis_config, mint_keypair: staker_keypair, .. } = create_genesis_config_with_leader( - lamports + 100_000_000_000, + 100_000_000_000, &solana_sdk::pubkey::new_rand(), - lamports, + 1_000_000, ); let staker_pubkey = staker_keypair.pubkey(); @@ -200,6 +193,8 @@ fn test_stake_create_and_split_to_existing_system_account() { let authorized = stake_state::Authorized::auto(&staker_pubkey); + let lamports = 1_000_000; + // Create stake account with seed let message = Message::new( &stake_instruction::create_account_with_seed( @@ -232,7 +227,7 @@ fn test_stake_create_and_split_to_existing_system_account() { existing_lamports ); - // Verify the split fails because the account is already in use + // Verify the split succeeds with lamports in the destination account let message = Message::new( &stake_instruction::split_with_seed( &stake_address, // original @@ -244,16 +239,12 @@ fn test_stake_create_and_split_to_existing_system_account() { ), Some(&staker_keypair.pubkey()), ); - assert_eq!( - bank_client - .send_and_confirm_message(&[&staker_keypair], message) - .unwrap_err() - .unwrap(), - TransactionError::InstructionError(0, SystemError::AccountAlreadyInUse.into()) - ); + bank_client + .send_and_confirm_message(&[&staker_keypair], message) + .expect("failed to split into account with lamports"); assert_eq!( bank_client.get_balance(&split_stake_address).unwrap(), - existing_lamports + existing_lamports + lamports / 2 ); } @@ -266,16 +257,14 @@ fn test_stake_account_lifetime() { let identity_keypair = Keypair::new(); let identity_pubkey = identity_keypair.pubkey(); - let min_stake = 100 + MIN_DELEGATE_STAKE_AMOUNT + 400; - let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config_with_leader( - min_stake * 5, + 100_000_000_000, &solana_sdk::pubkey::new_rand(), - 4 * min_stake, + 1_000_000, ); let bank = Bank::new(&genesis_config); let mint_pubkey = mint_keypair.pubkey(); @@ -310,7 +299,7 @@ fn test_stake_account_lifetime() { &vote_pubkey, &authorized, &stake_state::Lockup::default(), - 4 * min_stake, + 1_000_000, ), Some(&mint_pubkey), ); @@ -322,7 +311,7 @@ fn test_stake_account_lifetime() { let account = bank.get_account(&stake_pubkey).expect("account not found"); let stake_state = account.state().expect("couldn't unpack account data"); if let StakeState::Stake(_meta, stake) = stake_state { - assert_eq!(stake.delegation.stake, min_stake * 4); + assert_eq!(stake.delegation.stake, 1_000_000); } else { panic!("wrong account type found") } @@ -346,7 +335,7 @@ fn test_stake_account_lifetime() { let account = bank.get_account(&stake_pubkey).expect("account not found"); let stake_state = account.state().expect("couldn't unpack account data"); if let StakeState::Stake(_meta, stake) = stake_state { - assert_eq!(stake.delegation.stake, 4 * min_stake); + assert_eq!(stake.delegation.stake, 1_000_000); } else { panic!("wrong account type found") } @@ -384,7 +373,7 @@ fn test_stake_account_lifetime() { let staked = get_staked(&bank, &stake_pubkey); let lamports = bank.get_balance(&stake_pubkey); assert!(staked > pre_staked); - assert!(lamports > 4 * min_stake); + assert!(lamports > 1_000_000); // split the stake let split_stake_keypair = Keypair::new(); @@ -421,15 +410,14 @@ fn test_stake_account_lifetime() { .is_ok()); let split_staked = get_staked(&bank, &split_stake_pubkey); - assert!(split_staked > 0); - let unstaked_amount = dbg!(lamports / 2 - split_staked); + // Test that we cannot withdraw above what's staked let message = Message::new( &[stake_instruction::withdraw( &split_stake_pubkey, &stake_pubkey, &solana_sdk::pubkey::new_rand(), - unstaked_amount + 1, + lamports / 2 - split_staked + 1, None, )], Some(&mint_pubkey), @@ -444,7 +432,7 @@ fn test_stake_account_lifetime() { // assert we're still cooling down let split_staked = get_staked(&bank, &split_stake_pubkey); assert!(split_staked > 0); - let unstaked_amount = lamports / 2 - split_staked; + // withdrawal in cooldown let message = Message::new( &[stake_instruction::withdraw( @@ -461,39 +449,21 @@ fn test_stake_account_lifetime() { .send_and_confirm_message(&[&mint_keypair, &stake_keypair], message) .is_err()); - assert!(unstaked_amount > lamports / 2 - min_stake); - // And we can't left < MIN_DELEGATE_STAKE_AMOUNT on account - let message = Message::new( - &[stake_instruction::withdraw( - &split_stake_pubkey, - &stake_pubkey, - &solana_sdk::pubkey::new_rand(), - lamports / 2 - MIN_DELEGATE_STAKE_AMOUNT + 1, - None, - )], - Some(&mint_pubkey), - ); - - assert!(bank_client - .send_and_confirm_message(&[&mint_keypair, &stake_keypair], message) - .is_err()); - - // But we can withdraw all unstaked that is less than min_stake + // but we can withdraw unstaked let message = Message::new( &[stake_instruction::withdraw( &split_stake_pubkey, &stake_pubkey, &solana_sdk::pubkey::new_rand(), - lamports / 2 - min_stake, + lamports / 2 - split_staked, None, )], Some(&mint_pubkey), ); - // assert we can withdraw unstaked tokens - bank_client + assert!(bank_client .send_and_confirm_message(&[&mint_keypair, &stake_keypair], message) - .unwrap(); + .is_ok()); // finish cooldown loop { @@ -504,7 +474,6 @@ fn test_stake_account_lifetime() { } let bank_client = BankClient::new_shared(&bank); - let split_staked = bank.get_balance(&split_stake_pubkey); // Test that we can withdraw everything else out of the split let message = Message::new( &[stake_instruction::withdraw( @@ -532,15 +501,14 @@ fn test_create_stake_account_from_seed() { let identity_keypair = Keypair::new(); let identity_pubkey = identity_keypair.pubkey(); - let min_stake = MIN_DELEGATE_STAKE_AMOUNT + 400; let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config_with_leader( - min_stake * 2, + 100_000_000_000, &solana_sdk::pubkey::new_rand(), - min_stake, + 1_000_000, ); let bank = Bank::new(&genesis_config); let mint_pubkey = mint_keypair.pubkey(); @@ -581,7 +549,7 @@ fn test_create_stake_account_from_seed() { &vote_pubkey, &authorized, &stake_state::Lockup::default(), - min_stake, + 1_000_000, ), Some(&mint_pubkey), ); @@ -593,7 +561,7 @@ fn test_create_stake_account_from_seed() { let account = bank.get_account(&stake_pubkey).expect("account not found"); let stake_state = account.state().expect("couldn't unpack account data"); if let StakeState::Stake(_meta, stake) = stake_state { - assert_eq!(stake.delegation.stake, min_stake); + assert_eq!(stake.delegation.stake, 1_000_000); } else { panic!("wrong account type found") } diff --git a/scripts/Cargo.toml b/scripts/Cargo.toml index 3e3115448c..01944a758c 100644 --- a/scripts/Cargo.toml +++ b/scripts/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-scripts" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -11,7 +11,7 @@ publish = false [dependencies] csv = "1.1" -serde = { version = "1.0.118", features = ["derive"] } +serde = { version = "1.0.122", features = ["derive"] } [[bin]] name = "solana-csv-to-validator-infos" diff --git a/scripts/build-downstream-projects.sh b/scripts/build-downstream-projects.sh index a4bb85a44b..8d039604eb 100755 --- a/scripts/build-downstream-projects.sh +++ b/scripts/build-downstream-projects.sh @@ -101,5 +101,5 @@ EOF _ example_helloworld -_ spl +#_ spl _ serum_dex diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index 148b5e9fca..0e02fcca4b 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -26,17 +26,14 @@ maybeReleaseFlag=--release validatorOnly= while [[ -n $1 ]]; do - if [[ ${1:0:1} = - ]]; then - if [[ $1 = --debug ]]; then - maybeReleaseFlag= - buildVariant=debug - shift - else - usage "Unknown option: $1" - fi - elif [[ ${1:0:1} = \+ ]]; then - maybeRustVersion=$1 - shift + if [[ ${1:0:1} = - ]]; then + if [[ $1 = --debug ]]; then + maybeReleaseFlag= + buildVariant=debug + shift + elif [[ $1 = --validator-only ]]; then + validatorOnly=true + shift else installDir=$1 shift @@ -105,11 +102,15 @@ done mkdir -p "$installDir/bin" ( - set -x - # shellcheck disable=SC2086 # Don't want to double quote $rust_version - "$cargo" $maybeRustVersion build $maybeReleaseFlag "${binArgs[@]}" + set -x + # shellcheck disable=SC2086 # Don't want to double quote $rust_version + "$cargo" $maybeRustVersion build $maybeReleaseFlag "${binArgs[@]}" + + # Exclude `spl-token` binary for net.sh builds + if [[ -z "$validatorOnly" ]]; then # shellcheck disable=SC2086 # Don't want to double quote $rust_version "$cargo" $maybeRustVersion install spl-token-cli --root "$installDir" + fi ) for bin in "${BINS[@]}"; do diff --git a/scripts/increment-cargo-version.sh b/scripts/increment-cargo-version.sh index da4ec36dce..c649877df0 100755 --- a/scripts/increment-cargo-version.sh +++ b/scripts/increment-cargo-version.sh @@ -19,9 +19,11 @@ source ci/semver_bash/semver.sh source scripts/read-cargo-variable.sh ignores=( - .cache - .cargo - target + .cache + .cargo + target + web3.js/examples + node_modules ) not_paths=() @@ -108,11 +110,11 @@ esac # in dirty working trees. Gate after arg parsing to prevent breaking the # `check` subcommand. ( - set +e - if ! git diff --exit-code; then - echo -e "\nError: Working tree is dirty. Commit or discard changes before bumping version." 1>&2 - exit 1 - fi + set +e + if ! git diff --exit-code; then + echo -e "\nError: Working tree is dirty. Commit or discard changes before bumping version." 1>&2 + exit 1 + fi ) newVersion="$MAJOR.$MINOR.$PATCH$SPECIAL" @@ -121,8 +123,10 @@ newVersion="$MAJOR.$MINOR.$PATCH$SPECIAL" for Cargo_toml in "${Cargo_tomls[@]}"; do # Set new crate version ( - set -x - sed -i "$Cargo_toml" -e "0,/^version =/{s/^version = \"[^\"]*\"$/version = \"$newVersion\"/}" + set -x + sed -i "$Cargo_toml" -e " + s/^$crate = { *path *= *\"\([^\"]*\)\" *, *version *= *\"[^\"]*\"\(.*\)} *\$/$crate = \{ path = \"\1\", version = \"=$newVersion\"\2\}/ + " ) # Fix up the version references to other internal crates diff --git a/scripts/sed-i-all-rs-files-for-rust-analyzer.sh b/scripts/sed-i-all-rs-files-for-rust-analyzer.sh new file mode 100755 index 0000000000..3aa8206ee2 --- /dev/null +++ b/scripts/sed-i-all-rs-files-for-rust-analyzer.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +set -e + +# rust-analyzer doesn't support hiding noisy test calls in the call hierarchy from tests/benches +# so, here's some wild hack from ryoqun! + +if [[ $1 = "doit" ]]; then + # it's true that we put true just for truely-aligned lines + # shellcheck disable=SC2046 # our rust files are sanely named with no need to escape + true && + sed -i -e 's/#\[cfg(test)\]/#[cfg(escaped-cfg-test)]/g' $(git ls-files :**.rs :^**/build.rs) && + sed -i -e 's/#\[bench\]/#[cfg(escaped-bench)]/g' $(git ls-files :**.rs :^**/build.rs) && + sed -i -e 's/#\[test\]/#[cfg(escaped-test)]/g' $(git ls-files :**.rs :^**/build.rs) && + sed -i -e 's/#\[tokio::test\]/#[cfg(escaped-tokio-test)]/g' $(git ls-files :**.rs :^**/build.rs) +elif [[ $1 = "undoit" ]]; then + # shellcheck disable=SC2046 # our rust files are sanely named with no need to escape + true && + sed -i -e 's/#\[cfg(escaped-cfg-test)\]/#[cfg(test)]/g' $(git ls-files :**.rs :^**/build.rs) && + sed -i -e 's/#\[cfg(escaped-bench)\]/#[bench]/g' $(git ls-files :**.rs :^**/build.rs) && + sed -i -e 's/#\[cfg(escaped-test)\]/#[test]/g' $(git ls-files :**.rs :^**/build.rs) && + sed -i -e 's/#\[cfg(escaped-tokio-test)\]/#[tokio::test]/g' $(git ls-files :**.rs :^**/build.rs) +else + echo "usage: $0 [doit|undoit]" > /dev/stderr + exit 1 +fi diff --git a/scripts/set-log-filter.sh b/scripts/set-log-filter.sh deleted file mode 100755 index bee721e019..0000000000 --- a/scripts/set-log-filter.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash -# -# Reconfigures the log filter on a validator using the current RUST_LOG value -# - -if [[ -n $1 ]]; then - url=$1 -else - # Default to the local node - url=http://127.0.0.1:8899 -fi - -if [[ -z $RUST_LOG ]]; then - echo "RUST_LOG not defined" - exit 1 -fi - -set -x -exec curl $url -X POST -H "Content-Type: application/json" \ - -d " - { - \"jsonrpc\": \"2.0\", - \"id\": 1, - \"method\": \"setLogFilter\", - \"params\": [\"$RUST_LOG\"] - } - " diff --git a/scripts/solana-install-deploy.sh b/scripts/solana-install-deploy.sh index 40b88a95fb..ea77ca34bc 100755 --- a/scripts/solana-install-deploy.sh +++ b/scripts/solana-install-deploy.sh @@ -46,7 +46,7 @@ esac case $URL in stable) - URL=http://devnet.solana.com + URL=http://api.devnet.solana.com ;; localhost) URL=http://localhost:8899 @@ -57,7 +57,7 @@ esac case $TAG in edge|beta) - DOWNLOAD_URL=http://release.solana.com/"$TAG"/solana-release-$TARGET.tar.bz2 + DOWNLOAD_URL=https://release.solana.com/"$TAG"/solana-release-$TARGET.tar.bz2 ;; *) DOWNLOAD_URL=https://github.com/solana-labs/solana/releases/download/"$TAG"/solana-release-$TARGET.tar.bz2 diff --git a/scripts/wallet-sanity.sh b/scripts/wallet-sanity.sh index e1e6a41f03..b2bbbc0592 100755 --- a/scripts/wallet-sanity.sh +++ b/scripts/wallet-sanity.sh @@ -21,7 +21,7 @@ node_readiness=false timeout=60 while [[ $timeout -gt 0 ]]; do set +e - output=$($solana_cli "${args[@]}" transaction-count --commitment max) + output=$($solana_cli "${args[@]}" transaction-count --commitment finalized) rc=$? set -e if [[ $rc -eq 0 && -n $output ]]; then diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 5583ac9827..775dd31974 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-sdk" -version = "1.5.19" +version = "1.6.14" description = "Solana SDK" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -19,20 +19,21 @@ default = [ "full", ] # functionality that is not compatible or needed for on-chain programs full = [ - "assert_matches", - "byteorder", - "chrono", - "generic-array", - "memmap2", - "rand", - "rand_chacha", - "serde_json", - "ed25519-dalek", - "solana-logger", - "solana-crate-features", - "libsecp256k1", - "sha3", - "digest", + "assert_matches", + "byteorder", + "chrono", + "generic-array", + "memmap2", + "rand", + "rand_chacha", + "serde_json", + "ed25519-dalek", + "ed25519-dalek-bip32", + "solana-logger", + "solana-crate-features", + "libsecp256k1", + "sha3", + "digest", ] [dependencies] @@ -43,35 +44,40 @@ bv = { version = "0.11.1", features = ["serde"] } byteorder = { version = "1.3.4", optional = true } chrono = { version = "0.4", optional = true } curve25519-dalek = { version = "2.1.0", optional = true } +derivation-path = { version = "0.1.3", default-features = false } +digest = { version = "0.9.0", optional = true } +ed25519-dalek = { version = "=1.0.1", optional = true } +ed25519-dalek-bip32 = { version = "0.1.1", optional = true } generic-array = { version = "0.14.3", default-features = false, features = ["serde", "more_lengths"], optional = true } hex = "0.4.2" hmac = "0.10.1" itertools = "0.9.0" lazy_static = "1.4.0" +libsecp256k1 = { version = "0.3.5", optional = true } log = "0.4.11" memmap2 = { version = "0.1.0", optional = true } num-derive = "0.3" num-traits = "0.2" pbkdf2 = { version = "0.6.0", default-features = false } +qstring = "0.7.2" rand = { version = "0.7.0", optional = true } rand_chacha = { version = "0.2.2", optional = true } -serde = "1.0.118" +rand_core = "0.6.2" +rustversion = "1.0.4" +serde = "1.0.122" serde_bytes = "0.11" serde_derive = "1.0.103" serde_json = { version = "1.0.56", optional = true } sha2 = "0.9.2" -thiserror = "1.0" -ed25519-dalek = { version = "=1.0.0-pre.4", optional = true } -solana-crate-features = { path = "../crate-features", version = "=1.5.19", optional = true } -solana-logger = { path = "../logger", version = "=1.5.19", optional = true } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.5.19" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.5.19" } -solana-program = { path = "program", version = "=1.5.19" } -solana-sdk-macro = { path = "macro", version = "=1.5.19" } -rustversion = "1.0.4" -libsecp256k1 = { version = "0.3.5", optional = true } sha3 = { version = "0.9.1", optional = true } -digest = { version = "0.9.0", optional = true } +solana-crate-features = { path = "../crate-features", version = "=1.6.14", optional = true } +solana-logger = { path = "../logger", version = "=1.6.14", optional = true } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.6.14" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.6.14" } +solana-program = { path = "program", version = "=1.6.14" } +solana-sdk-macro = { path = "macro", version = "=1.6.14" } +thiserror = "1.0" +uriparse = "0.6.3" evm-state = { path = "../evm-utils/evm-state", version = "0.1" } evm-rpc = { path = "../evm-utils/evm-rpc", version = "0.1" } diff --git a/sdk/benches/serialize_instructions.rs b/sdk/benches/serialize_instructions.rs index 1dde1dd4da..1367c0474b 100644 --- a/sdk/benches/serialize_instructions.rs +++ b/sdk/benches/serialize_instructions.rs @@ -10,7 +10,7 @@ use test::Bencher; fn make_instructions() -> Vec { let meta = AccountMeta::new(pubkey::new_rand(), false); - let inst = Instruction::new(pubkey::new_rand(), &[0; 10], vec![meta; 4]); + let inst = Instruction::new_with_bincode(pubkey::new_rand(), &[0; 10], vec![meta; 4]); vec![inst; 4] } @@ -27,7 +27,9 @@ fn bench_manual_instruction_serialize(b: &mut Bencher) { let instructions = make_instructions(); let message = Message::new(&instructions, None); b.iter(|| { - test::black_box(message.serialize_instructions()); + test::black_box(message.serialize_instructions( + true, // demote_sysvar_write_locks + )); }); } @@ -44,7 +46,9 @@ fn bench_bincode_instruction_deserialize(b: &mut Bencher) { fn bench_manual_instruction_deserialize(b: &mut Bencher) { let instructions = make_instructions(); let message = Message::new(&instructions, None); - let serialized = message.serialize_instructions(); + let serialized = message.serialize_instructions( + true, // demote_sysvar_write_locks + ); b.iter(|| { for i in 0..instructions.len() { test::black_box(instructions::load_instruction_at(i, &serialized).unwrap()); @@ -56,7 +60,9 @@ fn bench_manual_instruction_deserialize(b: &mut Bencher) { fn bench_manual_instruction_deserialize_single(b: &mut Bencher) { let instructions = make_instructions(); let message = Message::new(&instructions, None); - let serialized = message.serialize_instructions(); + let serialized = message.serialize_instructions( + true, // demote_sysvar_write_locks + ); b.iter(|| { test::black_box(instructions::load_instruction_at(3, &serialized).unwrap()); }); diff --git a/sdk/benches/slot_hashes.rs b/sdk/benches/slot_hashes.rs index c2ba15df72..0167d8d777 100644 --- a/sdk/benches/slot_hashes.rs +++ b/sdk/benches/slot_hashes.rs @@ -16,6 +16,6 @@ fn bench_to_from_account(b: &mut Bencher) { } b.iter(|| { let account = create_account_for_test(&slot_hashes); - slot_hashes = from_account::(&account).unwrap(); + slot_hashes = from_account::(&account).unwrap(); }); } diff --git a/sdk/benches/slot_history.rs b/sdk/benches/slot_history.rs index c6cb4255eb..9da3cf69d0 100644 --- a/sdk/benches/slot_history.rs +++ b/sdk/benches/slot_history.rs @@ -13,7 +13,7 @@ fn bench_to_from_account(b: &mut Bencher) { b.iter(|| { let account = create_account_for_test(&slot_history); - slot_history = from_account::(&account).unwrap(); + slot_history = from_account::(&account).unwrap(); }); } diff --git a/sdk/bpf/.gitignore b/sdk/bpf/.gitignore index deda848aab..beaddb4ca6 100644 --- a/sdk/bpf/.gitignore +++ b/sdk/bpf/.gitignore @@ -1,7 +1,8 @@ /dependencies/criterion* /dependencies/hashbrown* /dependencies/llvm-native* -/dependencies/rust-bpf* +/dependencies/rust-bpf-sysroot* +/dependencies/bpf-tools* /dependencies/xargo* /dependencies/bin* /dependencies/.crates.toml diff --git a/sdk/bpf/c/bpf.ld b/sdk/bpf/c/bpf.ld index 62a7170662..227589c0c5 100644 --- a/sdk/bpf/c/bpf.ld +++ b/sdk/bpf/c/bpf.ld @@ -8,9 +8,10 @@ PHDRS SECTIONS { . = SIZEOF_HEADERS; - .text : { *(.text) } :text - .rodata : { *(.rodata) } :rodata + .text : { *(.text*) } :text + .rodata : { *(.rodata*) } :rodata .dynamic : { *(.dynamic) } :dynamic + .data.rel.ro : { *(.data.rel.ro*) } :dynamic .dynsym : { *(.dynsym) } :dynamic .dynstr : { *(.dynstr) } :dynamic .gnu.hash : { *(.gnu.hash) } :dynamic diff --git a/sdk/bpf/c/bpf.mk b/sdk/bpf/c/bpf.mk index 1709e2172f..a006ae77c1 100644 --- a/sdk/bpf/c/bpf.mk +++ b/sdk/bpf/c/bpf.mk @@ -14,20 +14,15 @@ TEST_PREFIX ?= test_ OUT_DIR ?= ./out OS := $(shell uname) -ifeq ($(DOCKER),1) -$(warning DOCKER=1 is experimential and may not work as advertised) -LLVM_DIR = $(LOCAL_PATH)../dependencies/llvm-docker/ -LLVM_SYSTEM_INC_DIRS := /usr/local/lib/clang/8.0.0/include -else -LLVM_DIR = $(LOCAL_PATH)../dependencies/llvm-native/ -LLVM_SYSTEM_INC_DIRS := $(LLVM_DIR)/lib/clang/8.0.0/include -endif +LLVM_DIR = $(LOCAL_PATH)../dependencies/bpf-tools/llvm +LLVM_SYSTEM_INC_DIRS := $(LLVM_DIR)/lib/clang/11.0.1/include ifdef LLVM_DIR CC := $(LLVM_DIR)/bin/clang CXX := $(LLVM_DIR)/bin/clang++ LLD := $(LLVM_DIR)/bin/ld.lld OBJ_DUMP := $(LLVM_DIR)/bin/llvm-objdump +READ_ELF := $(LLVM_DIR)/bin/llvm-readelf endif SYSTEM_INC_DIRS := \ @@ -68,9 +63,11 @@ BPF_LLD_FLAGS := \ --entry entrypoint \ OBJ_DUMP_FLAGS := \ - -color \ - -source \ - -disassemble \ + --source \ + --disassemble \ + +READ_ELF_FLAGS := \ + --all \ TESTFRAMEWORK_RPATH := $(abspath $(LOCAL_PATH)../dependencies/criterion/lib) TESTFRAMEWORK_FLAGS := \ @@ -126,7 +123,8 @@ help: @echo ' - make all - Build all the programs and tests, run the tests' @echo ' - make programs - Build all the programs' @echo ' - make tests - Build and run all tests' - @echo ' - make dump_ - Dumps the contents of the program to stdout' + @echo ' - make dump_ - Dump the contents of the program to stdout' + @echo ' - make readelf_ - Display information about the ELF binary' @echo ' - make - Build a single program by name' @echo ' - make - Build and run a single test by name' @echo '' @@ -137,7 +135,7 @@ help: $(foreach name, $(TEST_NAMES), @echo ' - $(name)'$(\n)) @echo '' @echo 'Example:' - @echo ' - Assuming a programed named foo (src/foo/foo.c)' + @echo ' - Assuming a program named foo (src/foo/foo.c)' @echo ' - make foo' @echo ' - make dump_foo' @echo '' @@ -150,7 +148,7 @@ $1: $2 endef define CC_RULE -$1: $2 +$1: $2 @echo "[cxx] $1 ($2)" $(_@)mkdir -p $(dir $1) $(_@)$(CXX) $(BPF_CXX_FLAGS) -o $1 -c $2 -MD -MF $(1:.o=.d) @@ -172,7 +170,7 @@ ifeq (,$(wildcard $(subst .so,-keypair.json,$1))) $(_@)velas-keygen new --no-passphrase --silent -o $(subst .so,-keypair.json,$1) endif @echo To deploy this program: - @echo $$$$ solana program deploy $(realpath $1) + @echo $$$$ solana program deploy $(abspath $1) endef define TEST_C_RULE @@ -251,5 +249,8 @@ tests: $(TEST_NAMES) dump_%: % $(_@)$(OBJ_DUMP) $(OBJ_DUMP_FLAGS) $(addprefix $(OUT_DIR)/, $(addsuffix .so, $<)) +readelf_%: % + $(_@)$(READ_ELF) $(READ_ELF_FLAGS) $(addprefix $(OUT_DIR)/, $(addsuffix .so, $<)) + clean: rm -rf $(OUT_DIR) diff --git a/sdk/bpf/c/inc/solana_sdk.h b/sdk/bpf/c/inc/solana_sdk.h index 2e4712906b..bb24e758e4 100644 --- a/sdk/bpf/c/inc/solana_sdk.h +++ b/sdk/bpf/c/inc/solana_sdk.h @@ -345,6 +345,8 @@ static bool sol_deserialize( input += MAX_PERMITTED_DATA_INCREASE; input = (uint8_t*)(((uint64_t)input + 8 - 1) & ~(8 - 1)); // padding input += sizeof(uint64_t); + } else { + input += 7; // padding } continue; } @@ -431,10 +433,28 @@ typedef struct { * @param bytes_len Number of byte arrays * @param result 32 byte array to hold the result */ -static uint64_t sol_sha256( +uint64_t sol_sha256( const SolBytes *bytes, int bytes_len, - const uint8_t *result + uint8_t *result +); + +/** + * Length of a Keccak hash result + */ +#define KECCAK_RESULT_LENGTH 32 + +/** + * Keccak + * + * @param bytes Array of byte arrays + * @param bytes_len Number of byte arrays + * @param result 32 byte array to hold the result + */ +uint64_t sol_keccak256( + const SolBytes *bytes, + int bytes_len, + uint8_t *result ); /** @@ -482,11 +502,11 @@ typedef struct { * @param program_id Program id of the signer * @param program_address Program address created, filled on return */ -static uint64_t sol_create_program_address( +uint64_t sol_create_program_address( const SolSignerSeed *seeds, int seeds_len, const SolPubkey *program_id, - const SolPubkey *program_address + SolPubkey *program_address ); /** @@ -498,12 +518,12 @@ static uint64_t sol_create_program_address( * @param program_address Program address created, filled on return * @param bump_seed Bump seed required to create a valid program address */ -static uint64_t sol_try_find_program_address( +uint64_t sol_try_find_program_address( const SolSignerSeed *seeds, int seeds_len, const SolPubkey *program_id, - const SolPubkey *program_address, - const uint8_t *bump_seed + SolPubkey *program_address, + uint8_t *bump_seed ); /** @@ -511,6 +531,17 @@ static uint64_t sol_try_find_program_address( * * @{ */ +/** + * Internal cross-program invocation function + */ +uint64_t sol_invoke_signed_c( + const SolInstruction *instruction, + const SolAccountInfo *account_infos, + int account_infos_len, + const SolSignerSeeds *signers_seeds, + int signers_seeds_len +); + /** * Invoke another program and sign for some of the keys * @@ -527,14 +558,6 @@ static uint64_t sol_invoke_signed( const SolSignerSeeds *signers_seeds, int signers_seeds_len ) { - uint64_t sol_invoke_signed_c( - const SolInstruction *instruction, - const SolAccountInfo *account_infos, - int account_infos_len, - const SolSignerSeeds *signers_seeds, - int signers_seeds_len - ); - return sol_invoke_signed_c( instruction, account_infos, diff --git a/sdk/bpf/dependencies/llvm-docker/bin/clang b/sdk/bpf/dependencies/llvm-docker/bin/clang deleted file mode 100755 index 03f69ca1aa..0000000000 --- a/sdk/bpf/dependencies/llvm-docker/bin/clang +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -set -e -PROGRAM=$(basename "$0") -SDKROOT="$(cd "$(dirname "$0")"/../..; pwd -P)" -[[ -z $V ]] || set -x -exec docker run \ - --workdir "$PWD" \ - --volume "$PWD:$PWD" \ - --volume "$SDKROOT:$SDKROOT" \ - --rm solanalabs/llvm \ - "$PROGRAM" "$@" diff --git a/sdk/bpf/dependencies/llvm-docker/bin/clang++ b/sdk/bpf/dependencies/llvm-docker/bin/clang++ deleted file mode 100755 index 03f69ca1aa..0000000000 --- a/sdk/bpf/dependencies/llvm-docker/bin/clang++ +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -set -e -PROGRAM=$(basename "$0") -SDKROOT="$(cd "$(dirname "$0")"/../..; pwd -P)" -[[ -z $V ]] || set -x -exec docker run \ - --workdir "$PWD" \ - --volume "$PWD:$PWD" \ - --volume "$SDKROOT:$SDKROOT" \ - --rm solanalabs/llvm \ - "$PROGRAM" "$@" diff --git a/sdk/bpf/dependencies/llvm-docker/bin/ld.lld b/sdk/bpf/dependencies/llvm-docker/bin/ld.lld deleted file mode 100755 index 03f69ca1aa..0000000000 --- a/sdk/bpf/dependencies/llvm-docker/bin/ld.lld +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -set -e -PROGRAM=$(basename "$0") -SDKROOT="$(cd "$(dirname "$0")"/../..; pwd -P)" -[[ -z $V ]] || set -x -exec docker run \ - --workdir "$PWD" \ - --volume "$PWD:$PWD" \ - --volume "$SDKROOT:$SDKROOT" \ - --rm solanalabs/llvm \ - "$PROGRAM" "$@" diff --git a/sdk/bpf/dependencies/llvm-docker/bin/llc b/sdk/bpf/dependencies/llvm-docker/bin/llc deleted file mode 100755 index 03f69ca1aa..0000000000 --- a/sdk/bpf/dependencies/llvm-docker/bin/llc +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -set -e -PROGRAM=$(basename "$0") -SDKROOT="$(cd "$(dirname "$0")"/../..; pwd -P)" -[[ -z $V ]] || set -x -exec docker run \ - --workdir "$PWD" \ - --volume "$PWD:$PWD" \ - --volume "$SDKROOT:$SDKROOT" \ - --rm solanalabs/llvm \ - "$PROGRAM" "$@" diff --git a/sdk/bpf/dependencies/llvm-docker/bin/llvm-objcopy b/sdk/bpf/dependencies/llvm-docker/bin/llvm-objcopy deleted file mode 100755 index 03f69ca1aa..0000000000 --- a/sdk/bpf/dependencies/llvm-docker/bin/llvm-objcopy +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -set -e -PROGRAM=$(basename "$0") -SDKROOT="$(cd "$(dirname "$0")"/../..; pwd -P)" -[[ -z $V ]] || set -x -exec docker run \ - --workdir "$PWD" \ - --volume "$PWD:$PWD" \ - --volume "$SDKROOT:$SDKROOT" \ - --rm solanalabs/llvm \ - "$PROGRAM" "$@" diff --git a/sdk/bpf/dependencies/llvm-docker/bin/llvm-objdump b/sdk/bpf/dependencies/llvm-docker/bin/llvm-objdump deleted file mode 100755 index 03f69ca1aa..0000000000 --- a/sdk/bpf/dependencies/llvm-docker/bin/llvm-objdump +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -set -e -PROGRAM=$(basename "$0") -SDKROOT="$(cd "$(dirname "$0")"/../..; pwd -P)" -[[ -z $V ]] || set -x -exec docker run \ - --workdir "$PWD" \ - --volume "$PWD:$PWD" \ - --volume "$SDKROOT:$SDKROOT" \ - --rm solanalabs/llvm \ - "$PROGRAM" "$@" diff --git a/sdk/bpf/dependencies/llvm-docker/generate.sh b/sdk/bpf/dependencies/llvm-docker/generate.sh deleted file mode 100755 index 0ec8023277..0000000000 --- a/sdk/bpf/dependencies/llvm-docker/generate.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -read -r -d '' SCRIPT << 'EOM' -#!/usr/bin/env bash -set -e -PROGRAM=$(basename "$0") -SDKROOT="$(cd "$(dirname "$0")"/../..; pwd -P)" -[[ -z $V ]] || set -x -exec docker run \ - --workdir "$PWD" \ - --volume "$PWD:$PWD" \ - --volume "$SDKROOT:$SDKROOT" \ - --rm solanalabs/llvm \ - "$PROGRAM" "$@" -EOM - -for program in clang clang++ llc ld.lld llvm-objdump llvm-objcopy; do - echo "$SCRIPT" > bin/$program -done diff --git a/sdk/bpf/env.sh b/sdk/bpf/env.sh index 8217277694..85bb596c15 100644 --- a/sdk/bpf/env.sh +++ b/sdk/bpf/env.sh @@ -10,10 +10,10 @@ fi "$bpf_sdk"/scripts/install.sh # Use the SDK's version of llvm to build the compiler-builtins for BPF -export CC="$bpf_sdk/dependencies/llvm-native/bin/clang" -export AR="$bpf_sdk/dependencies/llvm-native/bin/llvm-ar" -export OBJDUMP="$bpf_sdk/dependencies/llvm-native/bin/llvm-objdump" -export OBJCOPY="$bpf_sdk/dependencies/llvm-native/bin/llvm-objcopy" +export CC="$bpf_sdk/dependencies/bpf-tools/llvm/bin/clang" +export AR="$bpf_sdk/dependencies/bpf-tools/llvm/bin/llvm-ar" +export OBJDUMP="$bpf_sdk/dependencies/bpf-tools/llvm/bin/llvm-objdump" +export OBJCOPY="$bpf_sdk/dependencies/bpf-tools/llvm/bin/llvm-objcopy" # Use the SDK's version of Rust to build for BPF export RUSTUP_TOOLCHAIN=bpf @@ -24,16 +24,6 @@ export RUSTFLAGS=" -C link-arg=-T$bpf_sdk/rust/bpf.ld \ -C link-arg=--Bdynamic \ -C link-arg=-shared \ + -C link-arg=--threads=1 \ -C link-arg=--entry=entrypoint \ - -C link-arg=-no-threads \ - -C linker=$bpf_sdk/dependencies/llvm-native/bin/ld.lld" - -# CARGO may be set if run from within cargo, causing -# incompatibilities between cargo and xargo versions -unset CARGO - -export XARGO="$bpf_sdk"/dependencies/bin/xargo -export XARGO_TARGET=bpfel-unknown-unknown -export XARGO_HOME="$bpf_sdk/dependencies/xargo" -export XARGO_RUST_SRC="$bpf_sdk/dependencies/rust-bpf-sysroot/src" -export RUST_COMPILER_RT_ROOT="$bpf_sdk/dependencies/rust-bpf-sysroot/src/compiler-rt" + -C linker=$bpf_sdk/dependencies/bpf-tools/llvm/bin/ld.lld" diff --git a/sdk/bpf/rust/bpf.ld b/sdk/bpf/rust/bpf.ld index 62a7170662..b57c196a10 100644 --- a/sdk/bpf/rust/bpf.ld +++ b/sdk/bpf/rust/bpf.ld @@ -1,6 +1,6 @@ PHDRS { - text PT_LOAD ; + text PT_LOAD ; rodata PT_LOAD ; dynamic PT_DYNAMIC ; } @@ -8,12 +8,8 @@ PHDRS SECTIONS { . = SIZEOF_HEADERS; - .text : { *(.text) } :text - .rodata : { *(.rodata) } :rodata + .text : { *(.text*) } :text + .rodata : { *(.rodata*) } :rodata + .data.rel.ro : { *(.data.rel.ro*) } :rodata .dynamic : { *(.dynamic) } :dynamic - .dynsym : { *(.dynsym) } :dynamic - .dynstr : { *(.dynstr) } :dynamic - .gnu.hash : { *(.gnu.hash) } :dynamic - .rel.dyn : { *(.rel.dyn) } :dynamic - .hash : { *(.hash) } :dynamic } diff --git a/sdk/bpf/rust/xargo-build.sh b/sdk/bpf/rust/xargo-build.sh deleted file mode 100755 index 0d86044422..0000000000 --- a/sdk/bpf/rust/xargo-build.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -bpf_sdk=$(cd "$(dirname "$0")/.." && pwd) -# shellcheck source=sdk/bpf/env.sh -source "$bpf_sdk"/env.sh - -set -e -( - while true; do - if [[ -r Xargo.toml ]]; then - break; - fi - if [[ $PWD = / ]]; then - cat <&2 exit 1 fi if [[ ! -r $so ]]; then - echo "Error: File not found or readable: $so" + echo "Error: File not found or readable: $so" >&2 exit 1 fi if ! command -v rustfilt > /dev/null; then - echo "Error: rustfilt not found. It can be installed by running: cargo install rustfilt" - exit 1 -fi -if ! command -v readelf > /dev/null; then - if [[ $(uname) = Darwin ]]; then - echo "Error: readelf not found. It can be installed by running: brew install binutils" - else - echo "Error: readelf not found." - fi + echo "Error: rustfilt not found. It can be installed by running: cargo install rustfilt" >&2 exit 1 fi @@ -39,15 +31,16 @@ dump_mangled=$dump.mangled ( set -ex ls -la "$so" > "$dump_mangled" - readelf -aW "$so" >>"$dump_mangled" - "$OBJDUMP" -print-imm-hex --source --disassemble "$so" >> "$dump_mangled" + "$bpf_sdk"/dependencies/bpf-tools/llvm/bin/llvm-readelf -aW "$so" >>"$dump_mangled" + "$OBJDUMP" --print-imm-hex --source --disassemble "$so" >> "$dump_mangled" sed s/://g < "$dump_mangled" | rustfilt > "$dump" ) rm -f "$dump_mangled" if [[ ! -f "$dump" ]]; then - echo "Error: Failed to create $dump" + echo "Error: Failed to create $dump" >&2 exit 1 fi -echo "Wrote $dump" +echo >&2 +echo "Wrote $dump" >&2 diff --git a/sdk/bpf/scripts/install.sh b/sdk/bpf/scripts/install.sh index cd58a21942..684c492850 100755 --- a/sdk/bpf/scripts/install.sh +++ b/sdk/bpf/scripts/install.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash +mkdir -p "$(dirname "$0")"/../dependencies cd "$(dirname "$0")"/../dependencies if [[ "$(uname)" = Darwin ]]; then @@ -39,24 +40,6 @@ download() { return 1 } -clone() { - declare url=$1 - declare version=$2 - - rm -rf temp - if ( - set -x - git clone --recursive --depth 1 --single-branch --branch "$version" "$url" temp - ); then - ( - shopt -s dotglob nullglob - mv temp/* . - ) - return 0 - fi - return 1 -} - get() { declare version=$1 declare dirname=$2 @@ -84,26 +67,6 @@ get() { return 1 } -# Install xargo -version=0.3.22 -if [[ ! -e xargo-$version.md ]] || [[ ! -x bin/xargo ]]; then - ( - args=() - # shellcheck disable=SC2154 - if [[ -n $rust_stable ]]; then - args+=(+"$rust_stable") - fi - args+=(install xargo --version "$version" --root .) - set -ex - cargo "${args[@]}" - ) - exitcode=$? - if [[ $exitcode -ne 0 ]]; then - exit 1 - fi - ./bin/xargo --version >xargo-$version.md 2>&1 -fi - # Install Criterion if [[ $machine == "linux" ]]; then version=v2.3.3 @@ -128,72 +91,32 @@ if [[ ! -e criterion-$version.md || ! -e criterion ]]; then touch criterion-$version.md fi -# Install LLVM -version=v0.0.15 -if [[ ! -e llvm-native-$version.md || ! -e llvm-native ]]; then - ( - set -e - rm -rf llvm-native* - rm -rf xargo - job="download \ - https://github.com/solana-labs/llvm-builder/releases/download \ - $version \ - solana-llvm-$machine.tar.bz2 \ - llvm-native" - get $version llvm-native "$job" - ) - exitcode=$? - if [[ $exitcode -ne 0 ]]; then - exit 1 - fi - touch llvm-native-$version.md -fi - # Install Rust-BPF -version=v0.2.5 -if [[ ! -e rust-bpf-$machine-$version.md || ! -e rust-bpf-$machine ]]; then +version=v1.5 +if [[ ! -e bpf-tools-$version.md || ! -e bpf-tools ]]; then ( set -e - rm -rf rust-bpf-$machine* + rm -rf bpf-tools* rm -rf xargo job="download \ - https://github.com/solana-labs/rust-bpf-builder/releases/download \ + https://github.com/solana-labs/bpf-tools/releases/download \ $version \ - solana-rust-bpf-$machine.tar.bz2 \ - rust-bpf-$machine" - get $version rust-bpf-$machine "$job" - ) - exitcode=$? - if [[ $exitcode -ne 0 ]]; then - exit 1 - fi - touch rust-bpf-$machine-$version.md -fi -set -ex -./rust-bpf-$machine/bin/rustc --version -./rust-bpf-$machine/bin/rustc --print sysroot -set +e -rustup toolchain uninstall bpf -set -e -rustup toolchain link bpf rust-bpf-$machine - -# Install Rust-BPF Sysroot sources -version=v0.14 -if [[ ! -e rust-bpf-sysroot-$version.md || ! -e rust-bpf-sysroot ]]; then - ( - set -e - rm -rf rust-bpf-sysroot* - rm -rf xargo - job="clone \ - https://github.com/solana-labs/rust-bpf-sysroot.git \ - $version" - get $version rust-bpf-sysroot "$job" + solana-bpf-tools-$machine.tar.bz2 \ + bpf-tools" + get $version bpf-tools "$job" ) exitcode=$? if [[ $exitcode -ne 0 ]]; then exit 1 fi - touch rust-bpf-sysroot-$version.md + touch bpf-tools-$version.md + set -ex + ./bpf-tools/rust/bin/rustc --version + ./bpf-tools/rust/bin/rustc --print sysroot + set +e + rustup toolchain uninstall bpf + set -e + rustup toolchain link bpf bpf-tools/rust fi exit 0 diff --git a/sdk/bpf/scripts/strip.sh b/sdk/bpf/scripts/strip.sh index e80459829e..eafa4fd822 100755 --- a/sdk/bpf/scripts/strip.sh +++ b/sdk/bpf/scripts/strip.sh @@ -20,4 +20,4 @@ out_dir=$(dirname "$so_stripped") if [[ ! -d $out_dir ]]; then mkdir -p "$out_dir" fi -"$bpf_sdk"/dependencies/llvm-native/bin/llvm-objcopy --strip-all "$so" "$so_stripped" +"$bpf_sdk"/dependencies/bpf-tools/llvm/bin/llvm-objcopy --strip-all "$so" "$so_stripped" diff --git a/sdk/cargo-build-bpf/Cargo.toml b/sdk/cargo-build-bpf/Cargo.toml index 5a55bf3832..e571a20b8b 100644 --- a/sdk/cargo-build-bpf/Cargo.toml +++ b/sdk/cargo-build-bpf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-cargo-build-bpf" -version = "1.5.19" +version = "1.6.14" description = "Compile a local package and all of its dependencies using the Solana BPF SDK" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,9 +10,12 @@ edition = "2018" publish = false [dependencies] +bzip2 = "0.3.3" clap = "2.33.3" cargo_metadata = "0.12.0" -solana-sdk = { path = "..", version = "=1.5.19" } +solana-download-utils = { path = "../../download-utils", version = "=1.6.14" } +solana-sdk = { path = "..", version = "=1.6.14" } +tar = "0.4.28" [features] program = [] diff --git a/sdk/cargo-build-bpf/src/main.rs b/sdk/cargo-build-bpf/src/main.rs index 181b42d78d..9ea688a9b7 100644 --- a/sdk/cargo-build-bpf/src/main.rs +++ b/sdk/cargo-build-bpf/src/main.rs @@ -1,16 +1,20 @@ use { + bzip2::bufread::BzDecoder, clap::{ crate_description, crate_name, crate_version, value_t, value_t_or_exit, values_t, App, Arg, }, + solana_download_utils::download_file, solana_sdk::signature::{write_keypair_file, Keypair}, std::{ env, ffi::OsStr, - fs, + fs::{self, File}, + io::BufReader, path::{Path, PathBuf}, process::exit, - process::Command, + process::{Command, Stdio}, }, + tar::Archive, }; struct Config { @@ -44,7 +48,7 @@ impl Default for Config { } } -fn spawn(program: &Path, args: I) +fn spawn(program: &Path, args: I) -> String where I: IntoIterator, S: AsRef, @@ -56,18 +60,125 @@ where } println!(); - let mut child = Command::new(program) + let child = Command::new(program) .args(&args) + .stdout(Stdio::piped()) .spawn() .unwrap_or_else(|err| { eprintln!("Failed to execute {}: {}", program.display(), err); exit(1); }); - let exit_status = child.wait().expect("failed to wait on child"); - if !exit_status.success() { + let output = child.wait_with_output().expect("failed to wait on child"); + if !output.status.success() { exit(1); } + output + .stdout + .as_slice() + .iter() + .map(|&c| c as char) + .collect::() +} + +// Check whether a package is installed and install it if missing. +fn install_if_missing( + config: &Config, + package: &str, + version: &str, + url: &str, + file: &Path, +) -> Result<(), String> { + // Check whether the package is already in ~/.cache/solana. + // Donwload it and place in the proper location if not found. + let home_dir = PathBuf::from(env::var("HOME").unwrap_or_else(|err| { + eprintln!("Can't get home directory path: {}", err); + exit(1); + })); + let target_path = home_dir + .join(".cache") + .join("solana") + .join(version) + .join(package); + if !target_path.is_dir() { + if target_path.exists() { + fs::remove_file(&target_path).map_err(|err| err.to_string())?; + } + let mut url = String::from(url); + url.push('/'); + url.push_str(version); + url.push('/'); + url.push_str(file.to_str().unwrap()); + download_file(&url.as_str(), &file, true)?; + fs::create_dir_all(&target_path).map_err(|err| err.to_string())?; + let zip = File::open(&file).map_err(|err| err.to_string())?; + let tar = BzDecoder::new(BufReader::new(zip)); + let mut archive = Archive::new(tar); + archive + .unpack(&target_path) + .map_err(|err| err.to_string())?; + fs::remove_file(file).map_err(|err| err.to_string())?; + } + // Make a symbolyc link source_path -> target_path in the + // sdk/bpf/dependencies directory if no valid link found. + let source_base = config.bpf_sdk.join("dependencies"); + if !source_base.exists() { + fs::create_dir_all(&source_base).map_err(|err| err.to_string())?; + } + let source_path = source_base.join(package); + // Check whether the correct symbolic link exists. + let missing_source = if source_path.exists() { + let invalid_link = if let Ok(link_target) = source_path.read_link() { + link_target != target_path + } else { + true + }; + if invalid_link { + fs::remove_file(&source_path).map_err(|err| err.to_string())?; + } + invalid_link + } else { + true + }; + if missing_source { + #[cfg(unix)] + std::os::unix::fs::symlink(target_path, source_path).map_err(|err| err.to_string())?; + #[cfg(windows)] + std::os::windows::fs::symlink_dir(target_path, source_path) + .map_err(|err| err.to_string())?; + } + Ok(()) +} + +// check whether custom BPF toolchain is linked, and link it if it is not. +fn link_bpf_toolchain(config: &Config) { + let toolchain_path = config + .bpf_sdk + .join("dependencies") + .join("bpf-tools") + .join("rust"); + let rustup = PathBuf::from("rustup"); + let rustup_args = vec!["toolchain", "list", "-v"]; + let rustup_output = spawn(&rustup, &rustup_args); + let mut do_link = true; + for line in rustup_output.lines() { + if line.starts_with("bpf") { + let mut it = line.split_whitespace(); + let _ = it.next(); + let path = it.next(); + if path.unwrap() != toolchain_path.to_str().unwrap() { + let rustup_args = vec!["toolchain", "uninstall", "bpf"]; + spawn(&rustup, &rustup_args); + } else { + do_link = false; + } + break; + } + } + if do_link { + let rustup_args = vec!["toolchain", "link", "bpf", toolchain_path.to_str().unwrap()]; + spawn(&rustup, &rustup_args); + } } fn build_bpf_package(config: &Config, target_directory: &Path, package: &cargo_metadata::Package) { @@ -141,27 +252,71 @@ fn build_bpf_package(config: &Config, target_directory: &Path, package: &cargo_m if legacy_program_feature_present { println!("Legacy program feature detected"); } + let bpf_tools_filename = if cfg!(target_os = "macos") { + "solana-bpf-tools-osx.tar.bz2" + } else { + "solana-bpf-tools-linux.tar.bz2" + }; + install_if_missing( + &config, + "bpf-tools", + "v1.5", + "https://github.com/solana-labs/bpf-tools/releases/download", + &PathBuf::from(bpf_tools_filename), + ) + .expect("Failed to install bpf-tools"); + link_bpf_toolchain(&config); - let xargo_build = config.bpf_sdk.join("rust").join("xargo-build.sh"); - let mut xargo_build_args = vec![]; + let llvm_bin = config + .bpf_sdk + .join("dependencies") + .join("bpf-tools") + .join("llvm") + .join("bin"); + env::set_var("CC", llvm_bin.join("clang")); + env::set_var("AR", llvm_bin.join("llvm-ar")); + env::set_var("OBJDUMP", llvm_bin.join("llvm-objdump")); + env::set_var("OBJCOPY", llvm_bin.join("llvm-objcopy")); + let linker = llvm_bin.join("ld.lld"); + let linker_script = config.bpf_sdk.join("rust").join("bpf.ld"); + let mut rust_flags = String::from("-C lto=no"); + rust_flags.push_str(" -C opt-level=2"); + rust_flags.push_str(" -C link-arg=-z -C link-arg=notext"); + rust_flags.push_str(" -C link-arg=-T"); + rust_flags.push_str(linker_script.to_str().unwrap()); + rust_flags.push_str(" -C link-arg=--Bdynamic"); + rust_flags.push_str(" -C link-arg=-shared"); + rust_flags.push_str(" -C link-arg=--threads=1"); + rust_flags.push_str(" -C link-arg=--entry=entrypoint"); + rust_flags.push_str(" -C linker="); + rust_flags.push_str(linker.to_str().unwrap()); + env::set_var("RUSTFLAGS", rust_flags); + let cargo_build = PathBuf::from("cargo"); + let mut cargo_build_args = vec![ + "+bpf", + "build", + "--target", + "bpfel-unknown-unknown", + "--release", + ]; if config.no_default_features { - xargo_build_args.push("--no-default-features"); + cargo_build_args.push("--no-default-features"); } for feature in &config.features { - xargo_build_args.push("--features"); - xargo_build_args.push(feature); + cargo_build_args.push("--features"); + cargo_build_args.push(feature); } if legacy_program_feature_present { if !config.no_default_features { - xargo_build_args.push("--no-default-features"); + cargo_build_args.push("--no-default-features"); } - xargo_build_args.push("--features=program"); + cargo_build_args.push("--features=program"); } if config.verbose { - xargo_build_args.push("--verbose"); + cargo_build_args.push("--verbose"); } - spawn(&config.bpf_sdk.join(xargo_build), &xargo_build_args); + spawn(&cargo_build, &cargo_build_args); if let Some(program_name) = program_name { let program_unstripped_so = target_build_directory.join(&format!("{}.so", program_name)); @@ -246,8 +401,14 @@ fn build_bpf(config: Config, manifest_path: Option) { .packages .iter() .filter(|package| { - package.manifest_path.with_file_name("Xargo.toml").exists() - && metadata.workspace_members.contains(&package.id) + if metadata.workspace_members.contains(&package.id) { + for target in package.targets.iter() { + if target.kind.contains(&"cdylib".to_string()) { + return true; + } + } + } + false }) .collect::>(); @@ -257,6 +418,10 @@ fn build_bpf(config: Config, manifest_path: Option) { } fn main() { + if cfg!(windows) { + println!("Solana Rust BPF toolchain is not available on Windows"); + exit(1); + } let default_config = Config::default(); let default_bpf_sdk = format!("{}", default_config.bpf_sdk.display()); diff --git a/sdk/cargo-test-bpf/Cargo.toml b/sdk/cargo-test-bpf/Cargo.toml index 37adb8e52e..b979f12243 100644 --- a/sdk/cargo-test-bpf/Cargo.toml +++ b/sdk/cargo-test-bpf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-cargo-test-bpf" -version = "1.5.19" +version = "1.6.14" description = "Execute all unit and integration tests after building with the Solana BPF SDK" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/sdk/cargo-test-bpf/src/main.rs b/sdk/cargo-test-bpf/src/main.rs index dbd29785ed..fa0bb3c166 100644 --- a/sdk/cargo-test-bpf/src/main.rs +++ b/sdk/cargo-test-bpf/src/main.rs @@ -18,6 +18,7 @@ struct Config { features: Vec, test_name: Option, no_default_features: bool, + no_run: bool, offline: bool, verbose: bool, workspace: bool, @@ -34,6 +35,7 @@ impl Default for Config { features: vec![], test_name: None, no_default_features: false, + no_run: false, offline: false, verbose: false, workspace: false, @@ -109,6 +111,10 @@ fn test_bpf_package(config: &Config, target_directory: &Path, package: &cargo_me cargo_args.push(test_name); } + if config.no_run { + cargo_args.push("--no-run"); + } + // If the program crate declares the "test-bpf" feature, pass it along to the tests so they can // distinguish between `cargo test` and `cargo test-bpf` if set_test_bpf_feature { @@ -146,8 +152,14 @@ fn test_bpf(config: Config, manifest_path: Option) { .packages .iter() .filter(|package| { - package.manifest_path.with_file_name("Xargo.toml").exists() - && metadata.workspace_members.contains(&package.id) + if metadata.workspace_members.contains(&package.id) { + for target in package.targets.iter() { + if target.kind.contains(&"cdylib".to_string()) { + return true; + } + } + } + false }) .collect::>(); @@ -215,6 +227,12 @@ fn main() { .takes_value(true) .help("Place final BPF build artifacts in this directory"), ) + .arg( + Arg::with_name("no_run") + .long("no-run") + .takes_value(false) + .help("Compile, but don't run tests"), + ) .arg( Arg::with_name("offline") .long("offline") @@ -255,6 +273,7 @@ fn main() { .unwrap_or_else(Vec::new), test_name: value_t!(matches, "test", String).ok(), no_default_features: matches.is_present("no_default_features"), + no_run: matches.is_present("no_run"), offline: matches.is_present("offline"), verbose: matches.is_present("verbose"), workspace: matches.is_present("workspace"), diff --git a/sdk/docker-solana/Dockerfile b/sdk/docker-solana/Dockerfile index 874a328aa4..1beecc8ae8 100644 --- a/sdk/docker-solana/Dockerfile +++ b/sdk/docker-solana/Dockerfile @@ -1,7 +1,35 @@ FROM debian:buster -# JSON RPC port +# RPC JSON EXPOSE 8899/tcp +# RPC pubsub +EXPOSE 8900/tcp +# entrypoint +EXPOSE 8001/tcp +# (future) bank service +EXPOSE 8901/tcp +# bank service +EXPOSE 8902/tcp +# faucet +EXPOSE 9900/tcp +# tvu +EXPOSE 8000/udp +# gossip +EXPOSE 8001/udp +# tvu_forwards +EXPOSE 8002/udp +# tpu +EXPOSE 8003/udp +# tpu_forwards +EXPOSE 8004/udp +# retransmit +EXPOSE 8005/udp +# repair +EXPOSE 8006/udp +# serve_repair +EXPOSE 8007/udp +# broadcast +EXPOSE 8008/udp RUN apt update && \ apt-get install -y bzip2 libssl-dev && \ diff --git a/sdk/macro/Cargo.toml b/sdk/macro/Cargo.toml index e38e430bcf..683fc2b9b2 100644 --- a/sdk/macro/Cargo.toml +++ b/sdk/macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-sdk-macro" -version = "1.5.19" +version = "1.6.14" description = "Solana SDK Macro" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 6fcb50aec2..73e7fe9c9c 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-program" -version = "1.5.19" +version = "1.6.14" description = "Solana Program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -26,16 +26,17 @@ serde = "1.0.112" serde_bytes = "0.11" serde_derive = "1.0.103" sha2 = "0.9.2" -solana-frozen-abi = { path = "../../frozen-abi", version = "=1.5.19" } -solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.5.19" } -solana-sdk-macro = { path = "../macro", version = "=1.5.19" } +sha3 = "0.9.1" +solana-frozen-abi = { path = "../../frozen-abi", version = "=1.6.14" } +solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.6.14" } +solana-sdk-macro = { path = "../macro", version = "=1.6.14" } thiserror = "1.0" [target.'cfg(not(target_arch = "bpf"))'.dependencies] blake3 = "0.3.7" curve25519-dalek = "2.1.0" rand = "0.7.0" -solana-logger = { path = "../../logger", version = "=1.5.19" } +solana-logger = { path = "../../logger", version = "=1.6.14" } [dev-dependencies] assert_matches = "1.3.0" diff --git a/sdk/program/src/borsh.rs b/sdk/program/src/borsh.rs index a2df53bfdd..a876510f76 100644 --- a/sdk/program/src/borsh.rs +++ b/sdk/program/src/borsh.rs @@ -1,6 +1,12 @@ //! Borsh utils -use borsh::schema::{BorshSchema, Declaration, Definition, Fields}; -use std::collections::HashMap; +use { + borsh::{ + maybestd::io::{Error, Write}, + schema::{BorshSchema, Declaration, Definition, Fields}, + BorshDeserialize, BorshSerialize, + }, + std::collections::HashMap, +}; /// Get packed length for the given BorchSchema Declaration fn get_declaration_packed_len( @@ -39,7 +45,7 @@ fn get_declaration_packed_len( None => match declaration { "u8" | "i8" => 1, "u16" | "i16" => 2, - "u32" | "i32" => 2, + "u32" | "i32" => 4, "u64" | "i64" => 8, "u128" | "i128" => 16, "nil" => 0, @@ -49,7 +55,244 @@ fn get_declaration_packed_len( } /// Get the worst-case packed length for the given BorshSchema +/// +/// Note: due to the serializer currently used by Borsh, this function cannot +/// be used on-chain in the Solana BPF execution environment. pub fn get_packed_len() -> usize { let schema_container = S::schema_container(); get_declaration_packed_len(&schema_container.declaration, &schema_container.definitions) } + +/// Deserializes without checking that the entire slice has been consumed +/// +/// Normally, `try_from_slice` checks the length of the final slice to ensure +/// that the deserialization uses up all of the bytes in the slice. +/// +/// Note that there is a potential issue with this function. Any buffer greater than +/// or equal to the expected size will properly deserialize. For example, if the +/// user passes a buffer destined for a different type, the error won't get caught +/// as easily. +pub fn try_from_slice_unchecked(data: &[u8]) -> Result { + let mut data_mut = data; + let result = T::deserialize(&mut data_mut)?; + Ok(result) +} + +/// Helper struct which to count how much data would be written during serialization +#[derive(Default)] +struct WriteCounter { + count: usize, +} + +impl Write for WriteCounter { + fn write(&mut self, data: &[u8]) -> Result { + let amount = data.len(); + self.count += amount; + Ok(amount) + } + + fn flush(&mut self) -> Result<(), Error> { + Ok(()) + } +} + +/// Get the packed length for the serialized form of this object instance. +/// +/// Useful when working with instances of types that contain a variable-length +/// sequence, such as a Vec or HashMap. Since it is impossible to know the packed +/// length only from the type's schema, this can be used when an instance already +/// exists, to figure out how much space to allocate in an account. +pub fn get_instance_packed_len(instance: &T) -> Result { + let mut counter = WriteCounter::default(); + instance.serialize(&mut counter)?; + Ok(counter.count) +} + +#[cfg(test)] +mod tests { + use { + super::*, + borsh::{maybestd::io::ErrorKind, BorshSchema, BorshSerialize}, + std::{collections::HashMap, mem::size_of}, + }; + + #[derive(PartialEq, Clone, Debug, BorshSerialize, BorshDeserialize, BorshSchema)] + enum TestEnum { + NoValue, + Value(u32), + StructValue { + #[allow(dead_code)] + number: u64, + #[allow(dead_code)] + array: [u8; 8], + }, + } + + // for test simplicity + impl Default for TestEnum { + fn default() -> Self { + Self::NoValue + } + } + + #[derive(Default, BorshSerialize, BorshDeserialize, BorshSchema)] + struct TestStruct { + pub array: [u64; 16], + pub number_u128: u128, + pub number_u32: u32, + pub tuple: (u8, u16), + pub enumeration: TestEnum, + } + + #[derive(Debug, PartialEq, BorshSerialize, BorshDeserialize, BorshSchema)] + struct Child { + pub data: [u8; 64], + } + + #[derive(Debug, PartialEq, BorshSerialize, BorshDeserialize, BorshSchema)] + struct Parent { + pub data: Vec, + } + + #[test] + fn unchecked_deserialization() { + let data = vec![ + Child { data: [0u8; 64] }, + Child { data: [1u8; 64] }, + Child { data: [2u8; 64] }, + ]; + let parent = Parent { data }; + + // exact size, both work + let mut byte_vec = vec![0u8; 4 + get_packed_len::() * 3]; + let mut bytes = byte_vec.as_mut_slice(); + parent.serialize(&mut bytes).unwrap(); + let deserialized = Parent::try_from_slice(&byte_vec).unwrap(); + assert_eq!(deserialized, parent); + let deserialized = try_from_slice_unchecked::(&byte_vec).unwrap(); + assert_eq!(deserialized, parent); + + // too big, only unchecked works + let mut byte_vec = vec![0u8; 4 + get_packed_len::() * 10]; + let mut bytes = byte_vec.as_mut_slice(); + parent.serialize(&mut bytes).unwrap(); + let err = Parent::try_from_slice(&byte_vec).unwrap_err(); + assert_eq!(err.kind(), ErrorKind::InvalidData); + let deserialized = try_from_slice_unchecked::(&byte_vec).unwrap(); + assert_eq!(deserialized, parent); + } + + #[test] + fn packed_len() { + assert_eq!( + get_packed_len::(), + size_of::() + size_of::() + size_of::() * 8 + ); + assert_eq!( + get_packed_len::(), + size_of::() * 16 + + size_of::() + + size_of::() + + size_of::() + + size_of::() + + get_packed_len::() + ); + } + + #[test] + fn instance_packed_len_matches_packed_len() { + let enumeration = TestEnum::StructValue { + number: u64::MAX, + array: [255; 8], + }; + assert_eq!( + get_packed_len::(), + get_instance_packed_len(&enumeration).unwrap(), + ); + let test_struct = TestStruct { + enumeration, + ..TestStruct::default() + }; + assert_eq!( + get_packed_len::(), + get_instance_packed_len(&test_struct).unwrap(), + ); + assert_eq!( + get_packed_len::(), + get_instance_packed_len(&0u8).unwrap(), + ); + assert_eq!( + get_packed_len::(), + get_instance_packed_len(&0u16).unwrap(), + ); + assert_eq!( + get_packed_len::(), + get_instance_packed_len(&0u32).unwrap(), + ); + assert_eq!( + get_packed_len::(), + get_instance_packed_len(&0u64).unwrap(), + ); + assert_eq!( + get_packed_len::(), + get_instance_packed_len(&0u128).unwrap(), + ); + assert_eq!( + get_packed_len::<[u8; 10]>(), + get_instance_packed_len(&[0u8; 10]).unwrap(), + ); + assert_eq!( + get_packed_len::<(i8, i16, i32, i64, i128)>(), + get_instance_packed_len(&(i8::MAX, i16::MAX, i32::MAX, i64::MAX, i128::MAX)).unwrap(), + ); + } + + #[test] + fn instance_packed_len_with_vec() { + let data = vec![ + Child { data: [0u8; 64] }, + Child { data: [1u8; 64] }, + Child { data: [2u8; 64] }, + Child { data: [3u8; 64] }, + Child { data: [4u8; 64] }, + Child { data: [5u8; 64] }, + ]; + let parent = Parent { data }; + assert_eq!( + get_instance_packed_len(&parent).unwrap(), + 4 + parent.data.len() * get_packed_len::() + ); + } + + #[derive(Debug, PartialEq, BorshSerialize, BorshDeserialize, BorshSchema)] + struct StructWithHashMap { + data: HashMap, + } + + #[test] + fn instance_packed_len_with_varying_sizes_in_hashmap() { + let mut data = HashMap::new(); + let string1 = "the first string, it's actually really really long".to_string(); + let enum1 = TestEnum::NoValue; + let string2 = "second string, shorter".to_string(); + let enum2 = TestEnum::Value(u32::MAX); + let string3 = "third".to_string(); + let enum3 = TestEnum::StructValue { + number: 0, + array: [0; 8], + }; + data.insert(string1.clone(), enum1.clone()); + data.insert(string2.clone(), enum2.clone()); + data.insert(string3.clone(), enum3.clone()); + let instance = StructWithHashMap { data }; + assert_eq!( + get_instance_packed_len(&instance).unwrap(), + 4 + get_instance_packed_len(&string1).unwrap() + + get_instance_packed_len(&enum1).unwrap() + + get_instance_packed_len(&string2).unwrap() + + get_instance_packed_len(&enum2).unwrap() + + get_instance_packed_len(&string3).unwrap() + + get_instance_packed_len(&enum3).unwrap() + ); + } +} diff --git a/sdk/program/src/bpf_loader_upgradeable.rs b/sdk/program/src/bpf_loader_upgradeable.rs index 78f9d990d6..2ff747ba76 100644 --- a/sdk/program/src/bpf_loader_upgradeable.rs +++ b/sdk/program/src/bpf_loader_upgradeable.rs @@ -99,7 +99,7 @@ pub fn create_buffer( UpgradeableLoaderState::buffer_len(program_len)? as u64, &id(), ), - Instruction::new( + Instruction::new_with_bincode( id(), &UpgradeableLoaderInstruction::InitializeBuffer, vec![ @@ -118,7 +118,7 @@ pub fn write( offset: u32, bytes: Vec, ) -> Instruction { - Instruction::new( + Instruction::new_with_bincode( id(), &UpgradeableLoaderInstruction::Write { offset, bytes }, vec![ @@ -148,7 +148,7 @@ pub fn deploy_with_max_program_len( UpgradeableLoaderState::program_len()? as u64, &id(), ), - Instruction::new( + Instruction::new_with_bincode( id(), &UpgradeableLoaderInstruction::DeployWithMaxDataLen { max_data_len }, vec![ @@ -173,7 +173,7 @@ pub fn upgrade( spill_address: &Pubkey, ) -> Instruction { let (programdata_address, _) = Pubkey::find_program_address(&[program_address.as_ref()], &id()); - Instruction::new( + Instruction::new_with_bincode( id(), &UpgradeableLoaderInstruction::Upgrade, vec![ @@ -189,7 +189,11 @@ pub fn upgrade( } pub fn is_upgrade_instruction(instruction_data: &[u8]) -> bool { - 3 == instruction_data[0] + !instruction_data.is_empty() && 3 == instruction_data[0] +} + +pub fn is_set_authority_instruction(instruction_data: &[u8]) -> bool { + !instruction_data.is_empty() && 4 == instruction_data[0] } /// Returns the instructions required to set a buffers's authority. @@ -198,7 +202,7 @@ pub fn set_buffer_authority( current_authority_address: &Pubkey, new_authority_address: &Pubkey, ) -> Instruction { - Instruction::new( + Instruction::new_with_bincode( id(), &UpgradeableLoaderInstruction::SetAuthority, vec![ @@ -224,7 +228,7 @@ pub fn set_upgrade_authority( if let Some(address) = new_authority_address { metas.push(AccountMeta::new_readonly(*address, false)); } - Instruction::new(id(), &UpgradeableLoaderInstruction::SetAuthority, metas) + Instruction::new_with_bincode(id(), &UpgradeableLoaderInstruction::SetAuthority, metas) } /// Returns the instructions required to close an account @@ -262,44 +266,84 @@ mod tests { ); } - #[test] - fn test_is_upgrade_instruction() { - assert_eq!( - false, - is_upgrade_instruction( - &bincode::serialize(&UpgradeableLoaderInstruction::InitializeBuffer).unwrap() - ) + fn assert_is_instruction( + is_instruction_fn: F, + expected_instruction: UpgradeableLoaderInstruction, + ) where + F: Fn(&[u8]) -> bool, + { + let result = is_instruction_fn( + &bincode::serialize(&UpgradeableLoaderInstruction::InitializeBuffer).unwrap(), ); - assert_eq!( - false, - is_upgrade_instruction( - &bincode::serialize(&UpgradeableLoaderInstruction::Write { - offset: 0, - bytes: vec![], - }) - .unwrap() - ) + let expected_result = matches!( + expected_instruction, + UpgradeableLoaderInstruction::InitializeBuffer ); - assert_eq!( - false, - is_upgrade_instruction( - &bincode::serialize(&UpgradeableLoaderInstruction::DeployWithMaxDataLen { - max_data_len: 0, - }) - .unwrap() - ) + assert_eq!(expected_result, result); + + let result = is_instruction_fn( + &bincode::serialize(&UpgradeableLoaderInstruction::Write { + offset: 0, + bytes: vec![], + }) + .unwrap(), ); - assert_eq!( - true, - is_upgrade_instruction( - &bincode::serialize(&UpgradeableLoaderInstruction::Upgrade).unwrap() - ) + let expected_result = matches!( + expected_instruction, + UpgradeableLoaderInstruction::Write { + offset: _, + bytes: _, + } ); - assert_eq!( - false, - is_upgrade_instruction( - &bincode::serialize(&UpgradeableLoaderInstruction::SetAuthority).unwrap() - ) + assert_eq!(expected_result, result); + + let result = is_instruction_fn( + &bincode::serialize(&UpgradeableLoaderInstruction::DeployWithMaxDataLen { + max_data_len: 0, + }) + .unwrap(), + ); + let expected_result = matches!( + expected_instruction, + UpgradeableLoaderInstruction::DeployWithMaxDataLen { max_data_len: _ } + ); + assert_eq!(expected_result, result); + + let result = + is_instruction_fn(&bincode::serialize(&UpgradeableLoaderInstruction::Upgrade).unwrap()); + let expected_result = matches!(expected_instruction, UpgradeableLoaderInstruction::Upgrade); + assert_eq!(expected_result, result); + + let result = is_instruction_fn( + &bincode::serialize(&UpgradeableLoaderInstruction::SetAuthority).unwrap(), + ); + let expected_result = matches!( + expected_instruction, + UpgradeableLoaderInstruction::SetAuthority + ); + assert_eq!(expected_result, result); + + let result = + is_instruction_fn(&bincode::serialize(&UpgradeableLoaderInstruction::Close).unwrap()); + let expected_result = matches!(expected_instruction, UpgradeableLoaderInstruction::Close); + assert_eq!(expected_result, result); + } + + #[test] + fn test_is_set_authority_instruction() { + assert!(!is_set_authority_instruction(&[])); + assert_is_instruction( + is_set_authority_instruction, + UpgradeableLoaderInstruction::SetAuthority {}, + ); + } + + #[test] + fn test_is_upgrade_instruction() { + assert!(!is_upgrade_instruction(&[])); + assert_is_instruction( + is_upgrade_instruction, + UpgradeableLoaderInstruction::Upgrade {}, ); } } diff --git a/sdk/program/src/clock.rs b/sdk/program/src/clock.rs index 76a327b9da..f923fe5503 100644 --- a/sdk/program/src/clock.rs +++ b/sdk/program/src/clock.rs @@ -29,6 +29,7 @@ pub const DEFAULT_SLOTS_PER_EPOCH: u64 = 2 * TICKS_PER_DAY / DEFAULT_TICKS_PER_S // leader schedule is governed by this pub const NUM_CONSECUTIVE_LEADER_SLOTS: u64 = 4; +pub const DEFAULT_S_PER_SLOT: f64 = DEFAULT_TICKS_PER_SLOT as f64 / DEFAULT_TICKS_PER_SECOND as f64; pub const DEFAULT_MS_PER_SLOT: u64 = 1_000 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND; /// The time window of recent block hash values that the bank will track the signatures @@ -39,9 +40,6 @@ pub const DEFAULT_MS_PER_SLOT: u64 = 1_000 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TI /// not be processed by the network. pub const MAX_HASH_AGE_IN_SECONDS: usize = 120; -// Number of max evm blockhashes to save; -pub const MAX_EVM_BLOCKHASHES: usize = 256; - // Number of maximum recent blockhashes (one blockhash per slot) pub const MAX_RECENT_BLOCKHASHES: usize = MAX_HASH_AGE_IN_SECONDS * DEFAULT_TICKS_PER_SECOND as usize / DEFAULT_TICKS_PER_SLOT as usize; @@ -84,7 +82,7 @@ pub type UnixTimestamp = i64; /// as the network progresses). /// #[repr(C)] -#[derive(Serialize, Deserialize, Debug, Default, PartialEq)] +#[derive(Serialize, Clone, Deserialize, Debug, Default, PartialEq)] pub struct Clock { /// the current network/bank Slot pub slot: Slot, diff --git a/sdk/program/src/entrypoint.rs b/sdk/program/src/entrypoint.rs index bebef33377..9cbf954466 100644 --- a/sdk/program/src/entrypoint.rs +++ b/sdk/program/src/entrypoint.rs @@ -190,7 +190,6 @@ pub unsafe fn deserialize<'a>(input: *mut u8) -> (&'a Pubkey, Vec(program_id: Pubkey, data: &T, accounts: Vec) -> Self { - let data = serialize(data).unwrap(); - Self { - program_id, - accounts, - data, - } + Self::new_with_bincode(program_id, data, accounts) } pub fn new_with_bincode( @@ -225,7 +244,12 @@ impl Instruction { data: &T, accounts: Vec, ) -> Self { - Self::new(program_id, data, accounts) + let data = serialize(data).unwrap(); + Self { + program_id, + accounts, + data, + } } pub fn new_with_borsh( @@ -240,6 +264,14 @@ impl Instruction { data, } } + + pub fn new_with_bytes(program_id: Pubkey, data: &[u8], accounts: Vec) -> Self { + Self { + program_id, + accounts, + data: data.to_vec(), + } + } } pub fn checked_add(a: u64, b: u64) -> Result { diff --git a/sdk/program/src/keccak.rs b/sdk/program/src/keccak.rs new file mode 100644 index 0000000000..ab0572b2fa --- /dev/null +++ b/sdk/program/src/keccak.rs @@ -0,0 +1,159 @@ +use crate::sanitize::Sanitize; +use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use sha3::{Digest, Keccak256}; +use std::{convert::TryFrom, fmt, mem, str::FromStr}; +use thiserror::Error; + +pub const HASH_BYTES: usize = 32; +/// Maximum string length of a base58 encoded hash +const MAX_BASE58_LEN: usize = 44; +#[derive( + Serialize, + Deserialize, + BorshSerialize, + BorshDeserialize, + BorshSchema, + Clone, + Copy, + Default, + Eq, + PartialEq, + Ord, + PartialOrd, + Hash, + AbiExample, +)] +#[repr(transparent)] +pub struct Hash(pub [u8; HASH_BYTES]); + +#[derive(Clone, Default)] +pub struct Hasher { + hasher: Keccak256, +} + +impl Hasher { + pub fn hash(&mut self, val: &[u8]) { + self.hasher.update(val); + } + pub fn hashv(&mut self, vals: &[&[u8]]) { + for val in vals { + self.hash(val); + } + } + pub fn result(self) -> Hash { + // At the time of this writing, the sha3 library is stuck on an old version + // of generic_array (0.9.0). Decouple ourselves with a clone to our version. + Hash(<[u8; HASH_BYTES]>::try_from(self.hasher.finalize().as_slice()).unwrap()) + } +} + +impl Sanitize for Hash {} + +impl AsRef<[u8]> for Hash { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl fmt::Debug for Hash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", bs58::encode(self.0).into_string()) + } +} + +impl fmt::Display for Hash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", bs58::encode(self.0).into_string()) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Error)] +pub enum ParseHashError { + #[error("string decoded to wrong size for hash")] + WrongSize, + #[error("failed to decoded string to hash")] + Invalid, +} + +impl FromStr for Hash { + type Err = ParseHashError; + + fn from_str(s: &str) -> Result { + if s.len() > MAX_BASE58_LEN { + return Err(ParseHashError::WrongSize); + } + let bytes = bs58::decode(s) + .into_vec() + .map_err(|_| ParseHashError::Invalid)?; + if bytes.len() != mem::size_of::() { + Err(ParseHashError::WrongSize) + } else { + Ok(Hash::new(&bytes)) + } + } +} + +impl Hash { + pub fn new(hash_slice: &[u8]) -> Self { + Hash(<[u8; HASH_BYTES]>::try_from(hash_slice).unwrap()) + } + + pub const fn new_from_array(hash_array: [u8; HASH_BYTES]) -> Self { + Self(hash_array) + } + + /// unique Hash for tests and benchmarks. + pub fn new_unique() -> Self { + use std::sync::atomic::{AtomicU64, Ordering}; + static I: AtomicU64 = AtomicU64::new(1); + + let mut b = [0u8; HASH_BYTES]; + let i = I.fetch_add(1, Ordering::Relaxed); + b[0..8].copy_from_slice(&i.to_le_bytes()); + Self::new(&b) + } + + pub fn to_bytes(self) -> [u8; HASH_BYTES] { + self.0 + } +} + +/// Return a Keccak256 hash for the given data. +pub fn hashv(vals: &[&[u8]]) -> Hash { + // Perform the calculation inline, calling this from within a program is + // not supported + #[cfg(not(target_arch = "bpf"))] + { + let mut hasher = Hasher::default(); + hasher.hashv(vals); + hasher.result() + } + // Call via a system call to perform the calculation + #[cfg(target_arch = "bpf")] + { + extern "C" { + fn sol_keccak256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64; + }; + let mut hash_result = [0; HASH_BYTES]; + unsafe { + sol_keccak256( + vals as *const _ as *const u8, + vals.len() as u64, + &mut hash_result as *mut _ as *mut u8, + ); + } + Hash::new_from_array(hash_result) + } +} + +/// Return a Keccak256 hash for the given data. +pub fn hash(val: &[u8]) -> Hash { + hashv(&[val]) +} + +/// Return the hash of the given hash extended with the given value. +pub fn extend_and_hash(id: &Hash, val: &[u8]) -> Hash { + let mut hash_data = id.as_ref().to_vec(); + hash_data.extend_from_slice(val); + hash(&hash_data) +} diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 66bd4256a0..8253aa9869 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -22,6 +22,7 @@ pub mod fee_calculator; pub mod hash; pub mod incinerator; pub mod instruction; +pub mod keccak; pub mod loader_instruction; pub mod loader_upgradeable_instruction; pub mod log; @@ -46,6 +47,27 @@ pub mod system_instruction; pub mod system_program; pub mod sysvar; +pub mod config { + pub mod program { + crate::declare_id!("Config1111111111111111111111111111111111111"); + } +} + +pub mod vote { + pub mod program { + crate::declare_id!("Vote111111111111111111111111111111111111111"); + } +} + +pub mod stake { + pub mod config { + crate::declare_id!("StakeConfig11111111111111111111111111111111"); + } + + pub mod program { + crate::declare_id!("Stake11111111111111111111111111111111111111"); + } +} /// Convenience macro to declare a static public key and functions to interact with it /// /// Input: a single literal base58 string representation of a program's id diff --git a/sdk/program/src/loader_instruction.rs b/sdk/program/src/loader_instruction.rs index 9465f14744..a2a195c05c 100644 --- a/sdk/program/src/loader_instruction.rs +++ b/sdk/program/src/loader_instruction.rs @@ -37,7 +37,7 @@ pub fn write( bytes: Vec, ) -> Instruction { let account_metas = vec![AccountMeta::new(*account_pubkey, true)]; - Instruction::new( + Instruction::new_with_bincode( *program_id, &LoaderInstruction::Write { offset, bytes }, account_metas, @@ -49,18 +49,5 @@ pub fn finalize(account_pubkey: &Pubkey, program_id: &Pubkey) -> Instruction { AccountMeta::new(*account_pubkey, true), AccountMeta::new_readonly(rent::id(), false), ]; - Instruction::new(*program_id, &LoaderInstruction::Finalize, account_metas) -} - -pub fn finalize_with_caller( - account_pubkey: &Pubkey, - program_id: &Pubkey, - caller_key: &Pubkey, -) -> Instruction { - let account_metas = vec![ - AccountMeta::new(*account_pubkey, true), - AccountMeta::new(rent::id(), false), - AccountMeta::new(*caller_key, true), - ]; - Instruction::new(*program_id, &LoaderInstruction::Finalize, account_metas) + Instruction::new_with_bincode(*program_id, &LoaderInstruction::Finalize, account_metas) } diff --git a/sdk/program/src/log.rs b/sdk/program/src/log.rs index cc474eb942..c0a8211ab9 100644 --- a/sdk/program/src/log.rs +++ b/sdk/program/src/log.rs @@ -3,7 +3,7 @@ use crate::account_info::AccountInfo; #[macro_export] -#[deprecated(since = "1.4.14", note = "use `msg` macro instead")] +#[deprecated(since = "1.4.14", note = "Please use `msg` macro instead")] macro_rules! info { ($msg:expr) => { $crate::log::sol_log($msg) diff --git a/sdk/program/src/message.rs b/sdk/program/src/message.rs index 2765c68654..32c0198104 100644 --- a/sdk/program/src/message.rs +++ b/sdk/program/src/message.rs @@ -5,13 +5,34 @@ use crate::serialize_utils::{ append_slice, append_u16, append_u8, read_pubkey, read_slice, read_u16, read_u8, }; use crate::{ + bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, hash::Hash, instruction::{AccountMeta, CompiledInstruction, Instruction}, pubkey::Pubkey, - short_vec, system_instruction, + short_vec, system_instruction, system_program, sysvar, }; use itertools::Itertools; -use std::convert::TryFrom; +use lazy_static::lazy_static; +use std::{convert::TryFrom, str::FromStr}; + +lazy_static! { + // Copied keys over since direct references create cyclical dependency. + static ref BUILTIN_PROGRAMS_KEYS: [Pubkey; 10] = { + let parse = |s| Pubkey::from_str(s).unwrap(); + [ + parse("Config1111111111111111111111111111111111111"), + parse("Feature111111111111111111111111111111111111"), + parse("NativeLoader1111111111111111111111111111111"), + parse("Stake11111111111111111111111111111111111111"), + parse("StakeConfig11111111111111111111111111111111"), + parse("Vote111111111111111111111111111111111111111"), + system_program::id(), + bpf_loader::id(), + bpf_loader_deprecated::id(), + bpf_loader_upgradeable::id(), + ] + }; +} fn position(keys: &[Pubkey], key: &Pubkey) -> u8 { keys.iter().position(|k| k == key).unwrap() as u8 @@ -318,10 +339,11 @@ impl Message { /// Return true if message borrow mutably evm_state account. pub fn is_modify_evm_state(&self) -> bool { - self.account_keys - .iter() - .enumerate() - .any(|(num, key)| *key == crate::evm_state::id() && self.is_writable(num)) + self.account_keys.iter().enumerate().any(|(num, key)| { + *key == crate::evm_state::id() + // NOTE: 'true' as 'demote_sysvar_write_locks' makes no shortcuts in flow + && self.is_writable(num, true) + }) } pub fn is_key_passed_to_program(&self, index: usize) -> bool { @@ -346,23 +368,35 @@ impl Message { .position(|&&pubkey| pubkey == self.account_keys[index]) } - pub fn is_writable(&self, i: usize) -> bool { - i < (self.header.num_required_signatures - self.header.num_readonly_signed_accounts) + pub fn maybe_executable(&self, i: usize) -> bool { + self.program_position(i).is_some() + } + + pub fn is_writable(&self, i: usize, demote_sysvar_write_locks: bool) -> bool { + (i < (self.header.num_required_signatures - self.header.num_readonly_signed_accounts) as usize || (i >= self.header.num_required_signatures as usize && i < self.account_keys.len() - - self.header.num_readonly_unsigned_accounts as usize) + - self.header.num_readonly_unsigned_accounts as usize)) + && !{ + let key = self.account_keys[i]; + demote_sysvar_write_locks + && (sysvar::is_sysvar_id(&key) || BUILTIN_PROGRAMS_KEYS.contains(&key)) + } } pub fn is_signer(&self, i: usize) -> bool { i < self.header.num_required_signatures as usize } - pub fn get_account_keys_by_lock_type(&self) -> (Vec<&Pubkey>, Vec<&Pubkey>) { + pub fn get_account_keys_by_lock_type( + &self, + demote_sysvar_write_locks: bool, + ) -> (Vec<&Pubkey>, Vec<&Pubkey>) { let mut writable_keys = vec![]; let mut readonly_keys = vec![]; for (i, key) in self.account_keys.iter().enumerate() { - if self.is_writable(i) { + if self.is_writable(i, demote_sysvar_write_locks) { writable_keys.push(key); } else { readonly_keys.push(key); @@ -375,16 +409,16 @@ impl Message { // [0..2 - num_instructions // // Then a table of offsets of where to find them in the data - // 3..2*num_instructions table of instruction offsets + // 3..2 * num_instructions table of instruction offsets // // Each instruction is then encoded as: // 0..2 - num_accounts - // 3 - meta_byte -> (bit 0 signer, bit 1 is_writable) - // 4..36 - pubkey - 32 bytes - // 36..64 - program_id - // 33..34 - data len - u16 - // 35..data_len - data - pub fn serialize_instructions(&self) -> Vec { + // 2 - meta_byte -> (bit 0 signer, bit 1 is_writable) + // 3..35 - pubkey - 32 bytes + // 35..67 - program_id + // 67..69 - data len - u16 + // 69..data_len - data + pub fn serialize_instructions(&self, demote_sysvar_write_locks: bool) -> Vec { // 64 bytes is a reasonable guess, calculating exactly is slower in benchmarks let mut data = Vec::with_capacity(self.instructions.len() * (32 * 2)); append_u16(&mut data, self.instructions.len() as u16); @@ -399,7 +433,7 @@ impl Message { for account_index in &instruction.accounts { let account_index = *account_index as usize; let is_signer = self.is_signer(account_index); - let is_writable = self.is_writable(account_index); + let is_writable = self.is_writable(account_index, demote_sysvar_write_locks); let mut meta_byte = 0; if is_signer { meta_byte |= 1 << Self::IS_SIGNER_BIT; @@ -479,27 +513,48 @@ impl Message { #[cfg(test)] mod tests { use super::*; - use crate::instruction::AccountMeta; - use std::str::FromStr; + use crate::{hash, instruction::AccountMeta}; + use std::collections::HashSet; #[test] fn test_message_unique_program_ids() { let program_id0 = Pubkey::default(); let program_ids = get_program_ids(&[ - Instruction::new(program_id0, &0, vec![]), - Instruction::new(program_id0, &0, vec![]), + Instruction::new_with_bincode(program_id0, &0, vec![]), + Instruction::new_with_bincode(program_id0, &0, vec![]), ]); assert_eq!(program_ids, vec![program_id0]); } + #[test] + fn test_builtin_program_keys() { + let keys: HashSet = BUILTIN_PROGRAMS_KEYS.iter().copied().collect(); + assert_eq!(keys.len(), 10); + for k in keys { + let k = format!("{}", k); + assert!(k.ends_with("11111111111111111111111")); + } + } + + #[test] + fn test_builtin_program_keys_abi_freeze() { + // Once the feature is flipped on, we can't further modify + // BUILTIN_PROGRAMS_KEYS without the risk of breaking consensus. + let builtins = format!("{:?}", *BUILTIN_PROGRAMS_KEYS); + assert_eq!( + format!("{}", hash::hash(builtins.as_bytes())), + "ACqmMkYbo9eqK6QrRSrB3HLyR6uHhLf31SCfGUAJjiWj" + ); + } + #[test] fn test_message_unique_program_ids_not_adjacent() { let program_id0 = Pubkey::default(); let program_id1 = Pubkey::new_unique(); let program_ids = get_program_ids(&[ - Instruction::new(program_id0, &0, vec![]), - Instruction::new(program_id1, &0, vec![]), - Instruction::new(program_id0, &0, vec![]), + Instruction::new_with_bincode(program_id0, &0, vec![]), + Instruction::new_with_bincode(program_id1, &0, vec![]), + Instruction::new_with_bincode(program_id0, &0, vec![]), ]); assert_eq!(program_ids, vec![program_id0, program_id1]); } @@ -509,9 +564,9 @@ mod tests { let program_id0 = Pubkey::new_unique(); let program_id1 = Pubkey::default(); // Key less than program_id0 let program_ids = get_program_ids(&[ - Instruction::new(program_id0, &0, vec![]), - Instruction::new(program_id1, &0, vec![]), - Instruction::new(program_id0, &0, vec![]), + Instruction::new_with_bincode(program_id0, &0, vec![]), + Instruction::new_with_bincode(program_id1, &0, vec![]), + Instruction::new_with_bincode(program_id0, &0, vec![]), ]); assert_eq!(program_ids, vec![program_id0, program_id1]); } @@ -522,8 +577,8 @@ mod tests { let id0 = Pubkey::default(); let keys = get_keys( &[ - Instruction::new(program_id, &0, vec![AccountMeta::new(id0, true)]), - Instruction::new(program_id, &0, vec![AccountMeta::new(id0, true)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, true)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, true)]), ], None, ); @@ -535,7 +590,7 @@ mod tests { let program_id = Pubkey::default(); let id0 = Pubkey::default(); let keys = get_keys( - &[Instruction::new( + &[Instruction::new_with_bincode( program_id, &0, vec![AccountMeta::new(id0, true)], @@ -550,7 +605,7 @@ mod tests { let program_id = Pubkey::default(); let id0 = Pubkey::default(); let keys = get_keys( - &[Instruction::new( + &[Instruction::new_with_bincode( program_id, &0, vec![AccountMeta::new(id0, false)], @@ -566,8 +621,8 @@ mod tests { let id0 = Pubkey::default(); let keys = get_keys( &[ - Instruction::new(program_id, &0, vec![AccountMeta::new(id0, false)]), - Instruction::new(program_id, &0, vec![AccountMeta::new(id0, true)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, false)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, true)]), ], None, ); @@ -580,8 +635,12 @@ mod tests { let id0 = Pubkey::default(); let keys = get_keys( &[ - Instruction::new(program_id, &0, vec![AccountMeta::new_readonly(id0, true)]), - Instruction::new(program_id, &0, vec![AccountMeta::new(id0, true)]), + Instruction::new_with_bincode( + program_id, + &0, + vec![AccountMeta::new_readonly(id0, true)], + ), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, true)]), ], None, ); @@ -596,8 +655,12 @@ mod tests { let id0 = Pubkey::default(); let keys = get_keys( &[ - Instruction::new(program_id, &0, vec![AccountMeta::new_readonly(id0, false)]), - Instruction::new(program_id, &0, vec![AccountMeta::new(id0, false)]), + Instruction::new_with_bincode( + program_id, + &0, + vec![AccountMeta::new_readonly(id0, false)], + ), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, false)]), ], None, ); @@ -613,8 +676,8 @@ mod tests { let id1 = Pubkey::default(); // Key less than id0 let keys = get_keys( &[ - Instruction::new(program_id, &0, vec![AccountMeta::new(id0, false)]), - Instruction::new(program_id, &0, vec![AccountMeta::new(id1, false)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, false)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id1, false)]), ], None, ); @@ -628,9 +691,9 @@ mod tests { let id1 = Pubkey::new_unique(); let keys = get_keys( &[ - Instruction::new(program_id, &0, vec![AccountMeta::new(id0, false)]), - Instruction::new(program_id, &0, vec![AccountMeta::new(id1, false)]), - Instruction::new(program_id, &0, vec![AccountMeta::new(id0, true)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, false)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id1, false)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, true)]), ], None, ); @@ -644,8 +707,8 @@ mod tests { let id1 = Pubkey::new_unique(); let keys = get_keys( &[ - Instruction::new(program_id, &0, vec![AccountMeta::new(id0, false)]), - Instruction::new(program_id, &0, vec![AccountMeta::new(id1, true)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, false)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id1, true)]), ], None, ); @@ -657,11 +720,11 @@ mod tests { fn test_message_signed_keys_len() { let program_id = Pubkey::default(); let id0 = Pubkey::default(); - let ix = Instruction::new(program_id, &0, vec![AccountMeta::new(id0, false)]); + let ix = Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, false)]); let message = Message::new(&[ix], None); assert_eq!(message.header.num_required_signatures, 0); - let ix = Instruction::new(program_id, &0, vec![AccountMeta::new(id0, true)]); + let ix = Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, true)]); let message = Message::new(&[ix], Some(&id0)); assert_eq!(message.header.num_required_signatures, 1); } @@ -675,10 +738,18 @@ mod tests { let id3 = Pubkey::new_unique(); let keys = get_keys( &[ - Instruction::new(program_id, &0, vec![AccountMeta::new_readonly(id0, false)]), - Instruction::new(program_id, &0, vec![AccountMeta::new_readonly(id1, true)]), - Instruction::new(program_id, &0, vec![AccountMeta::new(id2, false)]), - Instruction::new(program_id, &0, vec![AccountMeta::new(id3, true)]), + Instruction::new_with_bincode( + program_id, + &0, + vec![AccountMeta::new_readonly(id0, false)], + ), + Instruction::new_with_bincode( + program_id, + &0, + vec![AccountMeta::new_readonly(id1, true)], + ), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id2, false)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id3, true)]), ], None, ); @@ -696,9 +767,9 @@ mod tests { let id1 = Pubkey::new_unique(); let message = Message::new( &[ - Instruction::new(program_id0, &0, vec![AccountMeta::new(id0, false)]), - Instruction::new(program_id1, &0, vec![AccountMeta::new(id1, true)]), - Instruction::new(program_id0, &0, vec![AccountMeta::new(id1, false)]), + Instruction::new_with_bincode(program_id0, &0, vec![AccountMeta::new(id0, false)]), + Instruction::new_with_bincode(program_id1, &0, vec![AccountMeta::new(id1, true)]), + Instruction::new_with_bincode(program_id0, &0, vec![AccountMeta::new(id1, false)]), ], Some(&id1), ); @@ -722,15 +793,15 @@ mod tests { let payer = Pubkey::new_unique(); let id0 = Pubkey::default(); - let ix = Instruction::new(program_id, &0, vec![AccountMeta::new(id0, false)]); + let ix = Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, false)]); let message = Message::new(&[ix], Some(&payer)); assert_eq!(message.header.num_required_signatures, 1); - let ix = Instruction::new(program_id, &0, vec![AccountMeta::new(id0, true)]); + let ix = Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, true)]); let message = Message::new(&[ix], Some(&payer)); assert_eq!(message.header.num_required_signatures, 2); - let ix = Instruction::new( + let ix = Instruction::new_with_bincode( program_id, &0, vec![AccountMeta::new(payer, true), AccountMeta::new(id0, true)], @@ -746,8 +817,16 @@ mod tests { let id1 = Pubkey::new_unique(); let keys = get_keys( &[ - Instruction::new(program_id, &0, vec![AccountMeta::new_readonly(id0, false)]), - Instruction::new(program_id, &0, vec![AccountMeta::new_readonly(id1, true)]), + Instruction::new_with_bincode( + program_id, + &0, + vec![AccountMeta::new_readonly(id0, false)], + ), + Instruction::new_with_bincode( + program_id, + &0, + vec![AccountMeta::new_readonly(id1, true)], + ), ], None, ); @@ -764,8 +843,8 @@ mod tests { let id = Pubkey::new_unique(); let message = Message::new( &[ - Instruction::new(program_id0, &0, vec![AccountMeta::new(id, false)]), - Instruction::new(program_id1, &0, vec![AccountMeta::new(id, true)]), + Instruction::new_with_bincode(program_id0, &0, vec![AccountMeta::new(id, false)]), + Instruction::new_with_bincode(program_id1, &0, vec![AccountMeta::new(id, true)]), ], Some(&id), ); @@ -793,12 +872,13 @@ mod tests { recent_blockhash: Hash::default(), instructions: vec![], }; - assert_eq!(message.is_writable(0), true); - assert_eq!(message.is_writable(1), false); - assert_eq!(message.is_writable(2), false); - assert_eq!(message.is_writable(3), true); - assert_eq!(message.is_writable(4), true); - assert_eq!(message.is_writable(5), false); + let demote_sysvar_write_locks = true; + assert_eq!(message.is_writable(0, demote_sysvar_write_locks), true); + assert_eq!(message.is_writable(1, demote_sysvar_write_locks), false); + assert_eq!(message.is_writable(2, demote_sysvar_write_locks), false); + assert_eq!(message.is_writable(3, demote_sysvar_write_locks), true); + assert_eq!(message.is_writable(4, demote_sysvar_write_locks), true); + assert_eq!(message.is_writable(5, demote_sysvar_write_locks), false); } #[test] @@ -810,15 +890,25 @@ mod tests { let id3 = Pubkey::new_unique(); let message = Message::new( &[ - Instruction::new(program_id, &0, vec![AccountMeta::new(id0, false)]), - Instruction::new(program_id, &0, vec![AccountMeta::new(id1, true)]), - Instruction::new(program_id, &0, vec![AccountMeta::new_readonly(id2, false)]), - Instruction::new(program_id, &0, vec![AccountMeta::new_readonly(id3, true)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, false)]), + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id1, true)]), + Instruction::new_with_bincode( + program_id, + &0, + vec![AccountMeta::new_readonly(id2, false)], + ), + Instruction::new_with_bincode( + program_id, + &0, + vec![AccountMeta::new_readonly(id3, true)], + ), ], Some(&id1), ); assert_eq!( - message.get_account_keys_by_lock_type(), + message.get_account_keys_by_lock_type( + true, // demote_sysvar_write_locks + ), (vec![&id1, &id0], vec![&id3, &id2, &program_id]) ); } @@ -833,14 +923,24 @@ mod tests { let id2 = Pubkey::new_unique(); let id3 = Pubkey::new_unique(); let instructions = vec![ - Instruction::new(program_id0, &0, vec![AccountMeta::new(id0, false)]), - Instruction::new(program_id0, &0, vec![AccountMeta::new(id1, true)]), - Instruction::new(program_id1, &0, vec![AccountMeta::new_readonly(id2, false)]), - Instruction::new(program_id1, &0, vec![AccountMeta::new_readonly(id3, true)]), + Instruction::new_with_bincode(program_id0, &0, vec![AccountMeta::new(id0, false)]), + Instruction::new_with_bincode(program_id0, &0, vec![AccountMeta::new(id1, true)]), + Instruction::new_with_bincode( + program_id1, + &0, + vec![AccountMeta::new_readonly(id2, false)], + ), + Instruction::new_with_bincode( + program_id1, + &0, + vec![AccountMeta::new_readonly(id3, true)], + ), ]; let message = Message::new(&instructions, Some(&id1)); - let serialized = message.serialize_instructions(); + let serialized = message.serialize_instructions( + true, // demote_sysvar_write_locks + ); for (i, instruction) in instructions.iter().enumerate() { assert_eq!( Message::deserialize_instruction(i, &serialized).unwrap(), @@ -861,7 +961,9 @@ mod tests { ]; let message = Message::new(&instructions, Some(&id1)); - let serialized = message.serialize_instructions(); + let serialized = message.serialize_instructions( + true, // demote_sysvar_write_locks + ); assert_eq!( Message::deserialize_instruction(instructions.len(), &serialized).unwrap_err(), SanitizeError::IndexOutOfBounds, @@ -925,6 +1027,14 @@ mod tests { assert!(!message.is_non_loader_key(&loader2, 2)); } + #[test] + fn test_message_header_len_constant() { + assert_eq!( + bincode::serialized_size(&MessageHeader::default()).unwrap() as usize, + MESSAGE_HEADER_LENGTH + ); + } + #[test] fn test_message_hash() { // when this test fails, it's most likely due to a new serialized format of a message. @@ -956,12 +1066,4 @@ mod tests { Hash::from_str("CXRH7GHLieaQZRUjH1mpnNnUZQtU4V4RpJpAFgy77i3z").unwrap() ) } - - #[test] - fn test_message_header_len_constant() { - assert_eq!( - bincode::serialized_size(&MessageHeader::default()).unwrap() as usize, - MESSAGE_HEADER_LENGTH - ); - } } diff --git a/sdk/program/src/native_token.rs b/sdk/program/src/native_token.rs index 8e659818e2..837852b5af 100644 --- a/sdk/program/src/native_token.rs +++ b/sdk/program/src/native_token.rs @@ -1,20 +1,16 @@ -/// There are 10^9 lamports in one VLX +/// There are 10^9 lamports in one SOL pub const LAMPORTS_PER_VLX: u64 = 1_000_000_000; -/// Approximately convert fractional native tokens (lamports) into native tokens (VLX) +/// Approximately convert fractional native tokens (lamports) into native tokens (SOL) pub fn lamports_to_sol(lamports: u64) -> f64 { lamports as f64 / LAMPORTS_PER_VLX as f64 } -/// Approximately convert native tokens (VLX) into fractional native tokens (lamports) +/// Approximately convert native tokens (SOL) into fractional native tokens (lamports) pub fn sol_to_lamports(sol: f64) -> u64 { (sol * LAMPORTS_PER_VLX as f64) as u64 } -pub const fn sol_to_lamports_u64(sol: u64) -> u64 { - sol * LAMPORTS_PER_VLX -} - use std::fmt::{Debug, Display, Formatter, Result}; pub struct Sol(pub u64); diff --git a/sdk/program/src/program_error.rs b/sdk/program/src/program_error.rs index 5b9a7bb844..76ff5612e3 100644 --- a/sdk/program/src/program_error.rs +++ b/sdk/program/src/program_error.rs @@ -42,6 +42,10 @@ pub enum ProgramError { BorshIoError(String), #[error("An account does not have enough lamports to be rent-exempt")] AccountNotRentExempt, + #[error("Unsupported sysvar")] + UnsupportedSysvar, + #[error("Provided owner is not allowed")] + IllegalOwner, } pub trait PrintProgramError { @@ -78,6 +82,8 @@ impl PrintProgramError for ProgramError { Self::InvalidSeeds => msg!("Error: InvalidSeeds"), Self::BorshIoError(_) => msg!("Error: BorshIoError"), Self::AccountNotRentExempt => msg!("Error: AccountNotRentExempt"), + Self::UnsupportedSysvar => msg!("Error: UnsupportedSysvar"), + Self::IllegalOwner => msg!("Error: IllegalOwner"), } } } @@ -106,6 +112,8 @@ pub const MAX_SEED_LENGTH_EXCEEDED: u64 = to_builtin!(13); pub const INVALID_SEEDS: u64 = to_builtin!(14); pub const BORSH_IO_ERROR: u64 = to_builtin!(15); pub const ACCOUNT_NOT_RENT_EXEMPT: u64 = to_builtin!(16); +pub const UNSUPPORTED_SYSVAR: u64 = to_builtin!(17); +pub const ILLEGAL_OWNER: u64 = to_builtin!(18); impl From for u64 { fn from(error: ProgramError) -> Self { @@ -125,6 +133,8 @@ impl From for u64 { ProgramError::InvalidSeeds => INVALID_SEEDS, ProgramError::BorshIoError(_) => BORSH_IO_ERROR, ProgramError::AccountNotRentExempt => ACCOUNT_NOT_RENT_EXEMPT, + ProgramError::UnsupportedSysvar => UNSUPPORTED_SYSVAR, + ProgramError::IllegalOwner => ILLEGAL_OWNER, ProgramError::Custom(error) => { if error == 0 { @@ -153,7 +163,9 @@ impl From for ProgramError { ACCOUNT_BORROW_FAILED => ProgramError::AccountBorrowFailed, MAX_SEED_LENGTH_EXCEEDED => ProgramError::MaxSeedLengthExceeded, INVALID_SEEDS => ProgramError::InvalidSeeds, + UNSUPPORTED_SYSVAR => ProgramError::UnsupportedSysvar, CUSTOM_ZERO => ProgramError::Custom(0), + ILLEGAL_OWNER => ProgramError::IllegalOwner, _ => ProgramError::Custom(error as u32), } } @@ -179,6 +191,8 @@ impl TryFrom for ProgramError { Self::Error::MaxSeedLengthExceeded => Ok(Self::MaxSeedLengthExceeded), Self::Error::BorshIoError(err) => Ok(Self::BorshIoError(err)), Self::Error::AccountNotRentExempt => Ok(Self::AccountNotRentExempt), + Self::Error::UnsupportedSysvar => Ok(Self::UnsupportedSysvar), + Self::Error::IllegalOwner => Ok(Self::IllegalOwner), _ => Err(error), } } @@ -205,6 +219,8 @@ where ACCOUNT_BORROW_FAILED => InstructionError::AccountBorrowFailed, MAX_SEED_LENGTH_EXCEEDED => InstructionError::MaxSeedLengthExceeded, INVALID_SEEDS => InstructionError::InvalidSeeds, + UNSUPPORTED_SYSVAR => InstructionError::UnsupportedSysvar, + ILLEGAL_OWNER => InstructionError::IllegalOwner, _ => { // A valid custom error has no bits set in the upper 32 if error >> BUILTIN_BIT_SHIFT == 0 { @@ -222,6 +238,7 @@ impl From for ProgramError { match error { PubkeyError::MaxSeedLengthExceeded => ProgramError::MaxSeedLengthExceeded, PubkeyError::InvalidSeeds => ProgramError::InvalidSeeds, + PubkeyError::IllegalOwner => ProgramError::IllegalOwner, } } } diff --git a/sdk/program/src/program_stubs.rs b/sdk/program/src/program_stubs.rs index fcd63e6b6d..8aa67b6c78 100644 --- a/sdk/program/src/program_stubs.rs +++ b/sdk/program/src/program_stubs.rs @@ -2,7 +2,10 @@ #![cfg(not(target_arch = "bpf"))] -use crate::{account_info::AccountInfo, entrypoint::ProgramResult, instruction::Instruction}; +use crate::{ + account_info::AccountInfo, entrypoint::ProgramResult, instruction::Instruction, + program_error::UNSUPPORTED_SYSVAR, +}; use std::sync::{Arc, RwLock}; lazy_static::lazy_static! { @@ -31,6 +34,19 @@ pub trait SyscallStubs: Sync + Send { sol_log("SyscallStubs: sol_invoke_signed() not available"); Ok(()) } + + fn sol_get_clock_sysvar(&self, _var_addr: *mut u8) -> u64 { + UNSUPPORTED_SYSVAR + } + fn sol_get_epoch_schedule_sysvar(&self, _var_addr: *mut u8) -> u64 { + UNSUPPORTED_SYSVAR + } + fn sol_get_fees_sysvar(&self, _var_addr: *mut u8) -> u64 { + UNSUPPORTED_SYSVAR + } + fn sol_get_rent_sysvar(&self, _var_addr: *mut u8) -> u64 { + UNSUPPORTED_SYSVAR + } } struct DefaultSyscallStubs {} @@ -61,3 +77,22 @@ pub(crate) fn sol_invoke_signed( .unwrap() .sol_invoke_signed(instruction, account_infos, signers_seeds) } + +pub(crate) fn sol_get_clock_sysvar(var_addr: *mut u8) -> u64 { + SYSCALL_STUBS.read().unwrap().sol_get_clock_sysvar(var_addr) +} + +pub(crate) fn sol_get_epoch_schedule_sysvar(var_addr: *mut u8) -> u64 { + SYSCALL_STUBS + .read() + .unwrap() + .sol_get_epoch_schedule_sysvar(var_addr) +} + +pub(crate) fn sol_get_fees_sysvar(var_addr: *mut u8) -> u64 { + SYSCALL_STUBS.read().unwrap().sol_get_fees_sysvar(var_addr) +} + +pub(crate) fn sol_get_rent_sysvar(var_addr: *mut u8) -> u64 { + SYSCALL_STUBS.read().unwrap().sol_get_rent_sysvar(var_addr) +} diff --git a/sdk/program/src/pubkey.rs b/sdk/program/src/pubkey.rs index 5175ba7f54..ecda337304 100644 --- a/sdk/program/src/pubkey.rs +++ b/sdk/program/src/pubkey.rs @@ -1,7 +1,15 @@ -use crate::{decode_error::DecodeError, hash::hashv}; +use crate::{ + bpf_loader, bpf_loader_deprecated, config, decode_error::DecodeError, feature, hash::hashv, + secp256k1_program, stake, system_program, sysvar, vote, +}; + use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use num_derive::{FromPrimitive, ToPrimitive}; -use std::{convert::TryFrom, fmt, mem, str::FromStr}; +use std::{ + convert::{Infallible, TryFrom}, + fmt, mem, + str::FromStr, +}; use thiserror::Error; /// Number of bytes in a pubkey @@ -13,6 +21,8 @@ pub const MAX_SEEDS: usize = 16; /// Maximum string length of a base58 encoded pubkey const MAX_BASE58_LEN: usize = 44; +const PDA_MARKER: &[u8; 21] = b"ProgramDerivedAddress"; + #[derive(Error, Debug, Serialize, Clone, PartialEq, FromPrimitive, ToPrimitive)] pub enum PubkeyError { /// Length of the seed is too long for address generation @@ -20,6 +30,8 @@ pub enum PubkeyError { MaxSeedLengthExceeded, #[error("Provided seeds do not result in a valid address")] InvalidSeeds, + #[error("Provided owner is not allowed")] + IllegalOwner, } impl DecodeError for PubkeyError { fn type_of() -> &'static str { @@ -63,7 +75,16 @@ pub enum ParsePubkeyError { WrongSize, #[error("Invalid Base58 string")] Invalid, + #[error("Infallible")] + Infallible, } + +impl From for ParsePubkeyError { + fn from(_: Infallible) -> Self { + Self::Infallible + } +} + impl DecodeError for ParsePubkeyError { fn type_of() -> &'static str { "ParsePubkeyError" @@ -88,6 +109,24 @@ impl FromStr for Pubkey { } } +impl TryFrom<&str> for Pubkey { + type Error = ParsePubkeyError; + fn try_from(s: &str) -> Result { + Pubkey::from_str(s) + } +} + +pub fn bytes_are_curve_point>(_bytes: T) -> bool { + #[cfg(not(target_arch = "bpf"))] + { + curve25519_dalek::edwards::CompressedEdwardsY::from_slice(_bytes.as_ref()) + .decompress() + .is_some() + } + #[cfg(target_arch = "bpf")] + unimplemented!(); +} + impl Pubkey { pub fn new(pubkey_vec: &[u8]) -> Self { Self( @@ -127,8 +166,16 @@ impl Pubkey { return Err(PubkeyError::MaxSeedLengthExceeded); } + let owner = owner.as_ref(); + if owner.len() >= PDA_MARKER.len() { + let slice = &owner[owner.len() - PDA_MARKER.len()..]; + if slice == PDA_MARKER { + return Err(PubkeyError::IllegalOwner); + } + } + Ok(Pubkey::new( - hashv(&[base.as_ref(), seed.as_ref(), owner.as_ref()]).as_ref(), + hashv(&[base.as_ref(), seed.as_ref(), owner]).as_ref(), )) } @@ -168,6 +215,10 @@ impl Pubkey { } } + if program_id.is_native_program_id() { + return Err(PubkeyError::IllegalOwner); + } + // Perform the calculation inline, calling this from within a program is // not supported #[cfg(not(target_arch = "bpf"))] @@ -176,13 +227,10 @@ impl Pubkey { for seed in seeds.iter() { hasher.hash(seed); } - hasher.hashv(&[program_id.as_ref(), "ProgramDerivedAddress".as_ref()]); + hasher.hashv(&[program_id.as_ref(), PDA_MARKER]); let hash = hasher.result(); - if curve25519_dalek::edwards::CompressedEdwardsY::from_slice(hash.as_ref()) - .decompress() - .is_some() - { + if bytes_are_curve_point(hash) { return Err(PubkeyError::InvalidSeeds); } @@ -260,9 +308,10 @@ impl Pubkey { { let mut seeds_with_bump = seeds.to_vec(); seeds_with_bump.push(&bump_seed); - if let Ok(address) = Self::create_program_address(&seeds_with_bump, program_id) - { - return Some((address, bump_seed[0])); + match Self::create_program_address(&seeds_with_bump, program_id) { + Ok(address) => return Some((address, bump_seed[0])), + Err(PubkeyError::InvalidSeeds) => (), + _ => break, } } bump_seed[0] -= 1; @@ -303,6 +352,10 @@ impl Pubkey { self.0 } + pub fn is_on_curve(&self) -> bool { + bytes_are_curve_point(self) + } + /// Log a `Pubkey` from a program pub fn log(&self) { #[cfg(target_arch = "bpf")] @@ -316,6 +369,22 @@ impl Pubkey { #[cfg(not(target_arch = "bpf"))] crate::program_stubs::sol_log(&self.to_string()); } + + pub fn is_native_program_id(&self) -> bool { + let all_program_ids = [ + bpf_loader::id(), + bpf_loader_deprecated::id(), + feature::id(), + config::program::id(), + stake::program::id(), + stake::config::id(), + vote::program::id(), + secp256k1_program::id(), + system_program::id(), + sysvar::id(), + ]; + all_program_ids.contains(self) + } } impl AsRef<[u8]> for Pubkey { @@ -446,7 +515,7 @@ mod tests { fn test_create_program_address() { let exceeded_seed = &[127; MAX_SEED_LEN + 1]; let max_seed = &[0; MAX_SEED_LEN]; - let program_id = Pubkey::from_str("BPFLoader1111111111111111111111111111111111").unwrap(); + let program_id = Pubkey::from_str("BPFLoaderUpgradeab1e11111111111111111111111").unwrap(); let public_key = Pubkey::from_str("SeedPubey1111111111111111111111111111111111").unwrap(); assert_eq!( @@ -460,25 +529,25 @@ mod tests { assert!(Pubkey::create_program_address(&[max_seed], &program_id).is_ok()); assert_eq!( Pubkey::create_program_address(&[b"", &[1]], &program_id), - Ok("3gF2KMe9KiC6FNVBmfg9i267aMPvK37FewCip4eGBFcT" + Ok("BwqrghZA2htAcqq8dzP1WDAhTXYTYWj7CHxF5j7TDBAe" .parse() .unwrap()) ); assert_eq!( - Pubkey::create_program_address(&["☉".as_ref()], &program_id), - Ok("7ytmC1nT1xY4RfxCV2ZgyA7UakC93do5ZdyhdF3EtPj7" + Pubkey::create_program_address(&["☉".as_ref(), &[0]], &program_id), + Ok("13yWmRpaTR4r5nAktwLqMpRNr28tnVUZw26rTvPSSB19" .parse() .unwrap()) ); assert_eq!( Pubkey::create_program_address(&[b"Talking", b"Squirrels"], &program_id), - Ok("HwRVBufQ4haG5XSgpspwKtNd3PC9GM9m1196uJW36vds" + Ok("2fnQrngrQT4SeLcdToJAD96phoEjNL2man2kfRLCASVk" .parse() .unwrap()) ); assert_eq!( - Pubkey::create_program_address(&[public_key.as_ref()], &program_id), - Ok("GUs5qLUfsEHkcMB9T38vjr18ypEhRuNWiePW2LoK4E3K" + Pubkey::create_program_address(&[public_key.as_ref(), &[1]], &program_id), + Ok("976ymqVnfE32QFe6NfGDctSvVa36LWnvYxhU6G2232YL" .parse() .unwrap()) ); @@ -525,4 +594,18 @@ mod tests { ); } } + + #[test] + fn test_is_native_program_id() { + assert!(bpf_loader::id().is_native_program_id()); + assert!(bpf_loader_deprecated::id().is_native_program_id()); + assert!(config::program::id().is_native_program_id()); + assert!(feature::id().is_native_program_id()); + assert!(secp256k1_program::id().is_native_program_id()); + assert!(stake::program::id().is_native_program_id()); + assert!(stake::config::id().is_native_program_id()); + assert!(system_program::id().is_native_program_id()); + assert!(sysvar::id().is_native_program_id()); + assert!(vote::program::id().is_native_program_id()); + } } diff --git a/sdk/program/src/rent.rs b/sdk/program/src/rent.rs index ffdaafd62c..0245fe0c16 100644 --- a/sdk/program/src/rent.rs +++ b/sdk/program/src/rent.rs @@ -14,8 +14,8 @@ pub struct Rent { } /// default rental rate in lamports/byte-year, based on: -/// 10^9 lamports per VLX -/// $1 per VLX +/// 10^9 lamports per SOL +/// $1 per SOL /// $0.01 per megabyte day /// $3.65 per megabyte year pub const DEFAULT_LAMPORTS_PER_BYTE_YEAR: u64 = 1_000_000_000 / 100 * 365 / (1024 * 1024); @@ -142,8 +142,7 @@ mod tests { use crate::{clock::*, sysvar::Sysvar}; const SECONDS_PER_YEAR: f64 = 365.242_199 * 24.0 * 60.0 * 60.0; - const SLOTS_PER_YEAR: f64 = - SECONDS_PER_YEAR / (DEFAULT_TICKS_PER_SLOT as f64 / DEFAULT_TICKS_PER_SECOND as f64); + const SLOTS_PER_YEAR: f64 = SECONDS_PER_YEAR / DEFAULT_S_PER_SLOT; let rent = Rent::default(); panic!( diff --git a/sdk/program/src/system_instruction.rs b/sdk/program/src/system_instruction.rs index 0eabf419b2..5294e9edb5 100644 --- a/sdk/program/src/system_instruction.rs +++ b/sdk/program/src/system_instruction.rs @@ -13,7 +13,7 @@ use thiserror::Error; pub enum SystemError { #[error("an account with the same address already exists")] AccountAlreadyInUse, - #[error("account does not have enough VLX to perform the operation")] + #[error("account does not have enough SOL to perform the operation")] ResultWithNegativeLamports, #[error("cannot assign account to this program id")] InvalidProgramId, @@ -230,7 +230,7 @@ pub fn create_account( AccountMeta::new(*from_pubkey, true), AccountMeta::new(*to_pubkey, true), ]; - Instruction::new( + Instruction::new_with_bincode( system_program::id(), &SystemInstruction::CreateAccount { lamports, @@ -258,7 +258,7 @@ pub fn create_account_with_seed( AccountMeta::new_readonly(*base, true), ]; - Instruction::new( + Instruction::new_with_bincode( system_program::id(), &SystemInstruction::CreateAccountWithSeed { base: *base, @@ -273,7 +273,7 @@ pub fn create_account_with_seed( pub fn assign(pubkey: &Pubkey, owner: &Pubkey) -> Instruction { let account_metas = vec![AccountMeta::new(*pubkey, true)]; - Instruction::new( + Instruction::new_with_bincode( system_program::id(), &SystemInstruction::Assign { owner: *owner }, account_metas, @@ -290,7 +290,7 @@ pub fn assign_with_seed( AccountMeta::new(*address, false), AccountMeta::new_readonly(*base, true), ]; - Instruction::new( + Instruction::new_with_bincode( system_program::id(), &SystemInstruction::AssignWithSeed { base: *base, @@ -306,7 +306,7 @@ pub fn transfer(from_pubkey: &Pubkey, to_pubkey: &Pubkey, lamports: u64) -> Inst AccountMeta::new(*from_pubkey, true), AccountMeta::new(*to_pubkey, false), ]; - Instruction::new( + Instruction::new_with_bincode( system_program::id(), &SystemInstruction::Transfer { lamports }, account_metas, @@ -326,7 +326,7 @@ pub fn transfer_with_seed( AccountMeta::new_readonly(*from_base, true), AccountMeta::new(*to_pubkey, false), ]; - Instruction::new( + Instruction::new_with_bincode( system_program::id(), &SystemInstruction::TransferWithSeed { lamports, @@ -339,7 +339,7 @@ pub fn transfer_with_seed( pub fn allocate(pubkey: &Pubkey, space: u64) -> Instruction { let account_metas = vec![AccountMeta::new(*pubkey, true)]; - Instruction::new( + Instruction::new_with_bincode( system_program::id(), &SystemInstruction::Allocate { space }, account_metas, @@ -357,7 +357,7 @@ pub fn allocate_with_seed( AccountMeta::new(*address, false), AccountMeta::new_readonly(*base, true), ]; - Instruction::new( + Instruction::new_with_bincode( system_program::id(), &SystemInstruction::AllocateWithSeed { base: *base, @@ -395,7 +395,7 @@ pub fn create_nonce_account_with_seed( nonce::State::size() as u64, &system_program::id(), ), - Instruction::new( + Instruction::new_with_bincode( system_program::id(), &SystemInstruction::InitializeNonceAccount(*authority), vec![ @@ -421,7 +421,7 @@ pub fn create_nonce_account( nonce::State::size() as u64, &system_program::id(), ), - Instruction::new( + Instruction::new_with_bincode( system_program::id(), &SystemInstruction::InitializeNonceAccount(*authority), vec![ @@ -439,7 +439,7 @@ pub fn advance_nonce_account(nonce_pubkey: &Pubkey, authorized_pubkey: &Pubkey) AccountMeta::new_readonly(recent_blockhashes::id(), false), AccountMeta::new_readonly(*authorized_pubkey, true), ]; - Instruction::new( + Instruction::new_with_bincode( system_program::id(), &SystemInstruction::AdvanceNonceAccount, account_metas, @@ -459,7 +459,7 @@ pub fn withdraw_nonce_account( AccountMeta::new_readonly(rent::id(), false), AccountMeta::new_readonly(*authorized_pubkey, true), ]; - Instruction::new( + Instruction::new_with_bincode( system_program::id(), &SystemInstruction::WithdrawNonceAccount(lamports), account_metas, @@ -475,7 +475,7 @@ pub fn authorize_nonce_account( AccountMeta::new(*nonce_pubkey, false), AccountMeta::new_readonly(*authorized_pubkey, true), ]; - Instruction::new( + Instruction::new_with_bincode( system_program::id(), &SystemInstruction::AuthorizeNonceAccount(*new_authority), account_metas, diff --git a/sdk/program/src/sysvar/clock.rs b/sdk/program/src/sysvar/clock.rs index a6af0573bb..89566a0574 100644 --- a/sdk/program/src/sysvar/clock.rs +++ b/sdk/program/src/sysvar/clock.rs @@ -2,8 +2,10 @@ //! pub use crate::clock::Clock; -use crate::sysvar::Sysvar; +use crate::{impl_sysvar_get, program_error::ProgramError, sysvar::Sysvar}; crate::declare_sysvar_id!("SysvarC1ock11111111111111111111111111111111", Clock); -impl Sysvar for Clock {} +impl Sysvar for Clock { + impl_sysvar_get!(sol_get_clock_sysvar); +} diff --git a/sdk/program/src/sysvar/epoch_schedule.rs b/sdk/program/src/sysvar/epoch_schedule.rs index 77b27ec9ce..502d477d9e 100644 --- a/sdk/program/src/sysvar/epoch_schedule.rs +++ b/sdk/program/src/sysvar/epoch_schedule.rs @@ -1,8 +1,11 @@ //! This account contains the current cluster rent //! pub use crate::epoch_schedule::EpochSchedule; -use crate::sysvar::Sysvar; + +use crate::{impl_sysvar_get, program_error::ProgramError, sysvar::Sysvar}; crate::declare_sysvar_id!("SysvarEpochSchedu1e111111111111111111111111", EpochSchedule); -impl Sysvar for EpochSchedule {} +impl Sysvar for EpochSchedule { + impl_sysvar_get!(sol_get_epoch_schedule_sysvar); +} diff --git a/sdk/program/src/sysvar/fees.rs b/sdk/program/src/sysvar/fees.rs index 3c09281e4b..7d6f14d496 100644 --- a/sdk/program/src/sysvar/fees.rs +++ b/sdk/program/src/sysvar/fees.rs @@ -1,11 +1,13 @@ //! This account contains the current cluster fees //! -use crate::{fee_calculator::FeeCalculator, sysvar::Sysvar}; +use crate::{ + fee_calculator::FeeCalculator, impl_sysvar_get, program_error::ProgramError, sysvar::Sysvar, +}; crate::declare_sysvar_id!("SysvarFees111111111111111111111111111111111", Fees); #[repr(C)] -#[derive(Serialize, Deserialize, Debug, Default)] +#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)] pub struct Fees { pub fee_calculator: FeeCalculator, } @@ -17,4 +19,6 @@ impl Fees { } } -impl Sysvar for Fees {} +impl Sysvar for Fees { + impl_sysvar_get!(sol_get_fees_sysvar); +} diff --git a/sdk/program/src/sysvar/instructions.rs b/sdk/program/src/sysvar/instructions.rs index 7aad1843bd..400e60eb61 100644 --- a/sdk/program/src/sysvar/instructions.rs +++ b/sdk/program/src/sysvar/instructions.rs @@ -1,13 +1,13 @@ //! This account contains the serialized transaction instructions -use crate::{instruction::Instruction, sanitize::SanitizeError, sysvar::Sysvar}; +use crate::{instruction::Instruction, sanitize::SanitizeError}; -pub type Instructions = Vec; +// Instructions Sysvar, dummy type, use the associated helpers instead of the Sysvar trait +pub struct Instructions(); crate::declare_sysvar_id!("Sysvar1nstructions1111111111111111111111111", Instructions); -impl Sysvar for Instructions {} - +/// Load the current instruction's index from the Instructions Sysvar data pub fn load_current_index(data: &[u8]) -> u16 { let mut instr_fixed_data = [0u8; 2]; let len = data.len(); @@ -15,11 +15,13 @@ pub fn load_current_index(data: &[u8]) -> u16 { u16::from_le_bytes(instr_fixed_data) } +/// Store the current instruction's index in the Instructions Sysvar data pub fn store_current_index(data: &mut [u8], instruction_index: u16) { let last_index = data.len() - 2; data[last_index..last_index + 2].copy_from_slice(&instruction_index.to_le_bytes()); } +/// Load an instruction at the specified index pub fn load_instruction_at(index: usize, data: &[u8]) -> Result { crate::message::Message::deserialize_instruction(index, data) } diff --git a/sdk/program/src/sysvar/mod.rs b/sdk/program/src/sysvar/mod.rs index d748d045d5..f0309475e7 100644 --- a/sdk/program/src/sysvar/mod.rs +++ b/sdk/program/src/sysvar/mod.rs @@ -49,14 +49,14 @@ macro_rules! declare_sysvar_id( ) ); -// owner pubkey for sysvar accounts +// Owner pubkey for sysvar accounts crate::declare_id!("Sysvar1111111111111111111111111111111111111"); pub trait SysvarId { fn check_id(pubkey: &Pubkey) -> bool; } -// utilities for moving into and out of Accounts +// Sysvar utilities pub trait Sysvar: SysvarId + Default + Sized + serde::Serialize + serde::de::DeserializeOwned { @@ -72,6 +72,34 @@ pub trait Sysvar: fn to_account_info(&self, account_info: &mut AccountInfo) -> Option<()> { bincode::serialize_into(&mut account_info.data.borrow_mut()[..], self).ok() } + fn get() -> Result { + Err(ProgramError::UnsupportedSysvar) + } +} + +#[macro_export] +macro_rules! impl_sysvar_get { + ($syscall_name:ident) => { + fn get() -> Result { + let mut var = Self::default(); + let var_addr = &mut var as *mut _ as *mut u8; + + #[cfg(target_arch = "bpf")] + let result = unsafe { + extern "C" { + fn $syscall_name(var_addr: *mut u8) -> u64; + } + $syscall_name(var_addr) + }; + #[cfg(not(target_arch = "bpf"))] + let result = crate::program_stubs::$syscall_name(var_addr); + + match result { + crate::entrypoint::SUCCESS => Ok(var), + e => Err(e.into()), + } + } + }; } #[cfg(test)] diff --git a/sdk/program/src/sysvar/rent.rs b/sdk/program/src/sysvar/rent.rs index 7ae3cd209f..13e7f32367 100644 --- a/sdk/program/src/sysvar/rent.rs +++ b/sdk/program/src/sysvar/rent.rs @@ -2,8 +2,10 @@ //! pub use crate::rent::Rent; -use crate::sysvar::Sysvar; +use crate::{impl_sysvar_get, program_error::ProgramError, sysvar::Sysvar}; crate::declare_sysvar_id!("SysvarRent111111111111111111111111111111111", Rent); -impl Sysvar for Rent {} +impl Sysvar for Rent { + impl_sysvar_get!(sol_get_rent_sysvar); +} diff --git a/sdk/program/src/sysvar/slot_hashes.rs b/sdk/program/src/sysvar/slot_hashes.rs index 71865d04d8..d10cb57d8a 100644 --- a/sdk/program/src/sysvar/slot_hashes.rs +++ b/sdk/program/src/sysvar/slot_hashes.rs @@ -4,7 +4,7 @@ //! pub use crate::slot_hashes::SlotHashes; -use crate::sysvar::Sysvar; +use crate::{account_info::AccountInfo, program_error::ProgramError, sysvar::Sysvar}; crate::declare_sysvar_id!("SysvarS1otHashes111111111111111111111111111", SlotHashes); @@ -14,6 +14,10 @@ impl Sysvar for SlotHashes { // hard-coded so that we don't have to construct an empty 20_488 // golden, update if MAX_ENTRIES changes } + fn from_account_info(_account_info: &AccountInfo) -> Result { + // This sysvar is too large to bincode::deserialize in-program + Err(ProgramError::UnsupportedSysvar) + } } #[cfg(test)] diff --git a/sdk/program/src/sysvar/slot_history.rs b/sdk/program/src/sysvar/slot_history.rs index 7aafa02d8a..fae51b2f26 100644 --- a/sdk/program/src/sysvar/slot_history.rs +++ b/sdk/program/src/sysvar/slot_history.rs @@ -1,9 +1,11 @@ //! named accounts for synthesized data accounts for bank state, etc. //! //! this account carries a bitvector of slots present over the past -//! epoch +//! epoch //! -pub use crate::slot_history::SlotHistory; +pub use crate::{ + account_info::AccountInfo, program_error::ProgramError, slot_history::SlotHistory, +}; use crate::sysvar::Sysvar; @@ -15,6 +17,10 @@ impl Sysvar for SlotHistory { // hard-coded so that we don't have to construct an empty 131_097 // golden, update if MAX_ENTRIES changes } + fn from_account_info(_account_info: &AccountInfo) -> Result { + // This sysvar is too large to bincode::deserialize in-program + Err(ProgramError::UnsupportedSysvar) + } } #[cfg(test)] diff --git a/sdk/src/account.rs b/sdk/src/account.rs index f28cfbeccd..b6665d9cf9 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -3,7 +3,7 @@ use crate::{ pubkey::Pubkey, }; use solana_program::{account_info::AccountInfo, sysvar::Sysvar}; -use std::{cell::RefCell, cmp, fmt, rc::Rc}; +use std::{cell::Ref, cell::RefCell, cmp, fmt, rc::Rc}; /// An Account with data that is stored on chain #[repr(C)] @@ -24,72 +24,361 @@ pub struct Account { pub rent_epoch: Epoch, } +/// An Account with data that is stored on chain +/// This will become a new in-memory representation of the 'Account' struct data. +/// The existing 'Account' structure cannot easily change due to downstream projects. +/// This struct will shortly rely on something like the ReadableAccount trait for access to the fields. +#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Default, AbiExample)] +pub struct AccountSharedData { + /// lamports in the account + pub lamports: u64, + /// data held in this account + #[serde(with = "serde_bytes")] + pub data: Vec, // will be: Arc>, + /// the program that owns this account. If executable, the program that loads this account. + pub owner: Pubkey, + /// this account's data contains a loaded program (and is now read-only) + pub executable: bool, + /// the epoch at which this account will next owe rent + pub rent_epoch: Epoch, +} + +/// Compares two ReadableAccounts +/// +/// Returns true if accounts are essentially equivalent as in all fields are equivalent. +pub fn accounts_equal(me: &T, other: &U) -> bool { + me.lamports() == other.lamports() + && me.data() == other.data() + && me.owner() == other.owner() + && me.executable() == other.executable() + && me.rent_epoch() == other.rent_epoch() +} + +impl From for Account { + fn from(other: AccountSharedData) -> Self { + Self { + lamports: other.lamports, + data: other.data, + owner: other.owner, + executable: other.executable, + rent_epoch: other.rent_epoch, + } + } +} + +impl From for AccountSharedData { + fn from(other: Account) -> Self { + Self { + lamports: other.lamports, + data: other.data, + owner: other.owner, + executable: other.executable, + rent_epoch: other.rent_epoch, + } + } +} + +pub trait WritableAccount: ReadableAccount { + fn set_lamports(&mut self, lamports: u64); + fn data_as_mut_slice(&mut self) -> &mut [u8]; + fn set_owner(&mut self, owner: Pubkey); + fn set_executable(&mut self, executable: bool); + fn set_rent_epoch(&mut self, epoch: Epoch); + fn create( + lamports: u64, + data: Vec, + owner: Pubkey, + executable: bool, + rent_epoch: Epoch, + ) -> Self; +} + +pub trait ReadableAccount: Sized { + fn lamports(&self) -> u64; + fn data(&self) -> &Vec; + fn owner(&self) -> &Pubkey; + fn executable(&self) -> bool; + fn rent_epoch(&self) -> Epoch; +} + +impl ReadableAccount for Account { + fn lamports(&self) -> u64 { + self.lamports + } + fn data(&self) -> &Vec { + &self.data + } + fn owner(&self) -> &Pubkey { + &self.owner + } + fn executable(&self) -> bool { + self.executable + } + fn rent_epoch(&self) -> Epoch { + self.rent_epoch + } +} + +impl WritableAccount for Account { + fn set_lamports(&mut self, lamports: u64) { + self.lamports = lamports; + } + fn data_as_mut_slice(&mut self) -> &mut [u8] { + &mut self.data + } + fn set_owner(&mut self, owner: Pubkey) { + self.owner = owner; + } + fn set_executable(&mut self, executable: bool) { + self.executable = executable; + } + fn set_rent_epoch(&mut self, epoch: Epoch) { + self.rent_epoch = epoch; + } + fn create( + lamports: u64, + data: Vec, + owner: Pubkey, + executable: bool, + rent_epoch: Epoch, + ) -> Self { + Account { + lamports, + data, + owner, + executable, + rent_epoch, + } + } +} + +impl WritableAccount for AccountSharedData { + fn set_lamports(&mut self, lamports: u64) { + self.lamports = lamports; + } + fn data_as_mut_slice(&mut self) -> &mut [u8] { + &mut self.data + } + fn set_owner(&mut self, owner: Pubkey) { + self.owner = owner; + } + fn set_executable(&mut self, executable: bool) { + self.executable = executable; + } + fn set_rent_epoch(&mut self, epoch: Epoch) { + self.rent_epoch = epoch; + } + fn create( + lamports: u64, + data: Vec, + owner: Pubkey, + executable: bool, + rent_epoch: Epoch, + ) -> Self { + AccountSharedData { + lamports, + data, + owner, + executable, + rent_epoch, + } + } +} + +impl ReadableAccount for AccountSharedData { + fn lamports(&self) -> u64 { + self.lamports + } + fn data(&self) -> &Vec { + &self.data + } + fn owner(&self) -> &Pubkey { + &self.owner + } + fn executable(&self) -> bool { + self.executable + } + fn rent_epoch(&self) -> Epoch { + self.rent_epoch + } +} + +impl ReadableAccount for Ref<'_, AccountSharedData> { + fn lamports(&self) -> u64 { + self.lamports + } + fn data(&self) -> &Vec { + &self.data + } + fn owner(&self) -> &Pubkey { + &self.owner + } + fn executable(&self) -> bool { + self.executable + } + fn rent_epoch(&self) -> Epoch { + self.rent_epoch + } +} + +impl ReadableAccount for Ref<'_, Account> { + fn lamports(&self) -> u64 { + self.lamports + } + fn data(&self) -> &Vec { + &self.data + } + fn owner(&self) -> &Pubkey { + &self.owner + } + fn executable(&self) -> bool { + self.executable + } + fn rent_epoch(&self) -> Epoch { + self.rent_epoch + } +} + +fn debug_fmt(item: &T, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let data_len = cmp::min(64, item.data().len()); + let data_str = if data_len > 0 { + format!(" data: {}", hex::encode(item.data()[..data_len].to_vec())) + } else { + "".to_string() + }; + write!( + f, + "Account {{ lamports: {} data.len: {} owner: {} executable: {} rent_epoch: {}{} }}", + item.lamports(), + item.data().len(), + item.owner(), + item.executable(), + item.rent_epoch(), + data_str, + ) +} + impl fmt::Debug for Account { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let data_len = cmp::min(64, self.data.len()); - let data_str = if data_len > 0 { - format!(" data: {}", hex::encode(self.data[..data_len].to_vec())) - } else { - "".to_string() - }; - write!( - f, - "Account {{ lamports: {} data.len: {} owner: {} executable: {} rent_epoch: {}{} }}", - self.lamports, - self.data.len(), - self.owner, - self.executable, - self.rent_epoch, - data_str, - ) + debug_fmt(self, f) } } +impl fmt::Debug for AccountSharedData { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + debug_fmt(self, f) + } +} + +fn shared_new(lamports: u64, space: usize, owner: &Pubkey) -> T { + T::create( + lamports, + vec![0u8; space], + *owner, + bool::default(), + Epoch::default(), + ) +} + +fn shared_new_ref( + lamports: u64, + space: usize, + owner: &Pubkey, +) -> Rc> { + Rc::new(RefCell::new(shared_new::(lamports, space, owner))) +} + +fn shared_new_data( + lamports: u64, + state: &T, + owner: &Pubkey, +) -> Result { + let data = bincode::serialize(state)?; + Ok(U::create( + lamports, + data, + *owner, + bool::default(), + Epoch::default(), + )) +} +fn shared_new_ref_data( + lamports: u64, + state: &T, + owner: &Pubkey, +) -> Result, bincode::Error> { + Ok(RefCell::new(shared_new_data::( + lamports, state, owner, + )?)) +} + +fn shared_new_data_with_space( + lamports: u64, + state: &T, + space: usize, + owner: &Pubkey, +) -> Result { + let mut account = shared_new::(lamports, space, owner); + + shared_serialize_data(&mut account, state)?; + + Ok(account) +} +fn shared_new_ref_data_with_space( + lamports: u64, + state: &T, + space: usize, + owner: &Pubkey, +) -> Result, bincode::Error> { + Ok(RefCell::new(shared_new_data_with_space::( + lamports, state, space, owner, + )?)) +} + +fn shared_deserialize_data( + account: &U, +) -> Result { + bincode::deserialize(account.data()) +} + +fn shared_serialize_data( + account: &mut U, + state: &T, +) -> Result<(), bincode::Error> { + if bincode::serialized_size(state)? > account.data().len() as u64 { + return Err(Box::new(bincode::ErrorKind::SizeLimit)); + } + bincode::serialize_into(&mut account.data_as_mut_slice(), state) +} + impl Account { pub fn new(lamports: u64, space: usize, owner: &Pubkey) -> Self { - Self { - lamports, - data: vec![0u8; space], - owner: *owner, - ..Self::default() - } + shared_new(lamports, space, owner) } pub fn new_ref(lamports: u64, space: usize, owner: &Pubkey) -> Rc> { - Rc::new(RefCell::new(Self::new(lamports, space, owner))) + shared_new_ref(lamports, space, owner) } - pub fn new_data( lamports: u64, state: &T, owner: &Pubkey, ) -> Result { - let data = bincode::serialize(state)?; - Ok(Self { - lamports, - data, - owner: *owner, - ..Self::default() - }) + shared_new_data(lamports, state, owner) } pub fn new_ref_data( lamports: u64, state: &T, owner: &Pubkey, ) -> Result, bincode::Error> { - Ok(RefCell::new(Self::new_data(lamports, state, owner)?)) + shared_new_ref_data(lamports, state, owner) } - pub fn new_data_with_space( lamports: u64, state: &T, space: usize, owner: &Pubkey, ) -> Result { - let mut account = Self::new(lamports, space, owner); - - account.serialize_data(state)?; - - Ok(account) + shared_new_data_with_space(lamports, state, space, owner) } pub fn new_ref_data_with_space( lamports: u64, @@ -97,28 +386,61 @@ impl Account { space: usize, owner: &Pubkey, ) -> Result, bincode::Error> { - Ok(RefCell::new(Self::new_data_with_space( - lamports, state, space, owner, - )?)) + shared_new_ref_data_with_space(lamports, state, space, owner) } - pub fn deserialize_data(&self) -> Result { - bincode::deserialize(&self.data) + shared_deserialize_data(self) } - pub fn serialize_data(&mut self, state: &T) -> Result<(), bincode::Error> { - if bincode::serialized_size(state)? > self.data.len() as u64 { - return Err(Box::new(bincode::ErrorKind::SizeLimit)); - } - bincode::serialize_into(&mut self.data[..], state) + shared_serialize_data(self, state) } } -// AccountSharedData stub for forwards compatibility with the v1.6 release line -pub struct AccountSharedData {} impl AccountSharedData { - pub fn new_ref(lamports: u64, space: usize, owner: &Pubkey) -> Rc> { - Rc::new(RefCell::new(Account::new(lamports, space, owner))) + pub fn set_data(&mut self, data: Vec) { + self.data = data; + } + pub fn new(lamports: u64, space: usize, owner: &Pubkey) -> Self { + shared_new(lamports, space, owner) + } + pub fn new_ref(lamports: u64, space: usize, owner: &Pubkey) -> Rc> { + shared_new_ref(lamports, space, owner) + } + pub fn new_data( + lamports: u64, + state: &T, + owner: &Pubkey, + ) -> Result { + shared_new_data(lamports, state, owner) + } + pub fn new_ref_data( + lamports: u64, + state: &T, + owner: &Pubkey, + ) -> Result, bincode::Error> { + shared_new_ref_data(lamports, state, owner) + } + pub fn new_data_with_space( + lamports: u64, + state: &T, + space: usize, + owner: &Pubkey, + ) -> Result { + shared_new_data_with_space(lamports, state, space, owner) + } + pub fn new_ref_data_with_space( + lamports: u64, + state: &T, + space: usize, + owner: &Pubkey, + ) -> Result, bincode::Error> { + shared_new_ref_data_with_space(lamports, state, space, owner) + } + pub fn deserialize_data(&self) -> Result { + shared_deserialize_data(self) + } + pub fn serialize_data(&mut self, state: &T) -> Result<(), bincode::Error> { + shared_serialize_data(self, state) } } @@ -140,7 +462,7 @@ pub fn create_account_with_fields( ) -> Account { let data_len = S::size_of().max(bincode::serialized_size(sysvar).unwrap() as usize); let mut account = Account::new(lamports, data_len, &solana_program::sysvar::id()); - to_account::(sysvar, &mut account).unwrap(); + to_account::(sysvar, &mut account).unwrap(); account.rent_epoch = rent_epoch; account } @@ -149,14 +471,40 @@ pub fn create_account_for_test(sysvar: &S) -> Account { create_account_with_fields(sysvar, DUMMY_INHERITABLE_ACCOUNT_FIELDS) } +/// Create an `Account` from a `Sysvar`. +#[deprecated( + since = "1.5.17", + note = "Please use `create_account_shared_data_for_test` instead" +)] +pub fn create_account_shared_data(sysvar: &S, lamports: u64) -> AccountSharedData { + AccountSharedData::from(create_account_with_fields( + sysvar, + (lamports, INITIAL_RENT_EPOCH), + )) +} + +pub fn create_account_shared_data_with_fields( + sysvar: &S, + fields: InheritableAccountFields, +) -> AccountSharedData { + AccountSharedData::from(create_account_with_fields(sysvar, fields)) +} + +pub fn create_account_shared_data_for_test(sysvar: &S) -> AccountSharedData { + AccountSharedData::from(create_account_with_fields( + sysvar, + DUMMY_INHERITABLE_ACCOUNT_FIELDS, + )) +} + /// Create a `Sysvar` from an `Account`'s data. -pub fn from_account(account: &Account) -> Option { - bincode::deserialize(&account.data).ok() +pub fn from_account(account: &T) -> Option { + bincode::deserialize(account.data()).ok() } /// Serialize a `Sysvar` into an `Account`'s data. -pub fn to_account(sysvar: &S, account: &mut Account) -> Option<()> { - bincode::serialize_into(&mut account.data[..], sysvar).ok() +pub fn to_account(sysvar: &S, account: &mut T) -> Option<()> { + bincode::serialize_into(account.data_as_mut_slice(), sysvar).ok() } /// Return the information required to construct an `AccountInfo`. Used by the @@ -173,11 +521,6 @@ impl solana_program::account_info::Account for Account { } } -/// Create `AccountInfo`s -pub fn create_account_infos(accounts: &mut [(Pubkey, Account)]) -> Vec { - accounts.iter_mut().map(Into::into).collect() -} - /// Create `AccountInfo`s pub fn create_is_signer_account_infos<'a>( accounts: &'a mut [(&'a Pubkey, bool, &'a mut Account)], @@ -198,3 +541,303 @@ pub fn create_is_signer_account_infos<'a>( }) .collect() } + +#[cfg(test)] +pub mod tests { + use super::*; + + fn make_two_accounts(key: &Pubkey) -> (Account, AccountSharedData) { + let mut account1 = Account::new(1, 2, &key); + account1.executable = true; + account1.rent_epoch = 4; + let mut account2 = AccountSharedData::new(1, 2, key); + account2.executable = true; + account2.rent_epoch = 4; + assert!(accounts_equal(&account1, &account2)); + (account1, account2) + } + + #[test] + fn test_account_data_set_data() { + let key = Pubkey::new_unique(); + let (_, mut account) = make_two_accounts(&key); + assert_eq!(account.data(), &vec![0, 0]); + account.set_data(vec![1, 2]); + assert_eq!(account.data(), &vec![1, 2]); + account.set_data(vec![]); + assert_eq!(account.data().len(), 0); + } + + #[test] + #[should_panic( + expected = "called `Result::unwrap()` on an `Err` value: Io(Kind(UnexpectedEof))" + )] + fn test_account_deserialize() { + let key = Pubkey::new_unique(); + let (account1, _account2) = make_two_accounts(&key); + account1.deserialize_data::().unwrap(); + } + + #[test] + #[should_panic(expected = "called `Result::unwrap()` on an `Err` value: SizeLimit")] + fn test_account_serialize() { + let key = Pubkey::new_unique(); + let (mut account1, _account2) = make_two_accounts(&key); + account1.serialize_data(&"hello world").unwrap(); + } + + #[test] + #[should_panic( + expected = "called `Result::unwrap()` on an `Err` value: Io(Kind(UnexpectedEof))" + )] + fn test_account_shared_data_deserialize() { + let key = Pubkey::new_unique(); + let (_account1, account2) = make_two_accounts(&key); + account2.deserialize_data::().unwrap(); + } + + #[test] + #[should_panic(expected = "called `Result::unwrap()` on an `Err` value: SizeLimit")] + fn test_account_shared_data_serialize() { + let key = Pubkey::new_unique(); + let (_account1, mut account2) = make_two_accounts(&key); + account2.serialize_data(&"hello world").unwrap(); + } + + #[test] + fn test_account_shared_data() { + let key = Pubkey::new_unique(); + let (account1, account2) = make_two_accounts(&key); + assert!(accounts_equal(&account1, &account2)); + let account = account1; + assert_eq!(account.lamports, 1); + assert_eq!(account.lamports(), 1); + assert_eq!(account.data.len(), 2); + assert_eq!(account.data().len(), 2); + assert_eq!(account.owner, key); + assert_eq!(account.owner(), &key); + assert_eq!(account.executable, true); + assert_eq!(account.executable(), true); + assert_eq!(account.rent_epoch, 4); + assert_eq!(account.rent_epoch(), 4); + let account = account2; + assert_eq!(account.lamports, 1); + assert_eq!(account.lamports(), 1); + assert_eq!(account.data.len(), 2); + assert_eq!(account.data().len(), 2); + assert_eq!(account.owner, key); + assert_eq!(account.owner(), &key); + assert_eq!(account.executable, true); + assert_eq!(account.executable(), true); + assert_eq!(account.rent_epoch, 4); + assert_eq!(account.rent_epoch(), 4); + } + + // test clone and from for both types against expected + fn test_equal( + should_be_equal: bool, + account1: &Account, + account2: &AccountSharedData, + account_expected: &Account, + ) { + assert_eq!(should_be_equal, accounts_equal(account1, account2)); + if should_be_equal { + assert!(accounts_equal(account_expected, account2)); + } + assert_eq!( + accounts_equal(account_expected, account1), + accounts_equal(account_expected, &account1.clone()) + ); + assert_eq!( + accounts_equal(account_expected, account2), + accounts_equal(account_expected, &account2.clone()) + ); + assert_eq!( + accounts_equal(account_expected, account1), + accounts_equal(account_expected, &AccountSharedData::from(account1.clone())) + ); + assert_eq!( + accounts_equal(account_expected, account2), + accounts_equal(account_expected, &Account::from(account2.clone())) + ); + } + + #[test] + #[allow(clippy::redundant_clone)] + fn test_account_shared_data_all_fields() { + let key = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + let key3 = Pubkey::new_unique(); + let (mut account1, mut account2) = make_two_accounts(&key); + assert!(accounts_equal(&account1, &account2)); + + let mut account_expected = account1.clone(); + assert!(accounts_equal(&account1, &account_expected)); + assert!(accounts_equal(&account1, &account2.clone())); // test the clone here + + for field_index in 0..5 { + for pass in 0..4 { + if field_index == 0 { + if pass == 0 { + account1.lamports += 1; + } else if pass == 1 { + account_expected.lamports += 1; + account2.set_lamports(account2.lamports + 1); + } else if pass == 2 { + account1.set_lamports(account1.lamports + 1); + } else if pass == 3 { + account_expected.lamports += 1; + account2.lamports += 1; + } + } else if field_index == 1 { + if pass == 0 { + account1.data[0] += 1; + } else if pass == 1 { + account_expected.data[0] += 1; + account2.data_as_mut_slice()[0] = account2.data[0] + 1; + } else if pass == 2 { + account1.data_as_mut_slice()[0] = account1.data[0] + 1; + } else if pass == 3 { + account_expected.data[0] += 1; + account2.data[0] += 1; + } + } else if field_index == 2 { + if pass == 0 { + account1.owner = key2; + } else if pass == 1 { + account_expected.owner = key2; + account2.set_owner(key2); + } else if pass == 2 { + account1.set_owner(key3); + } else if pass == 3 { + account_expected.owner = key3; + account2.owner = key3; + } + } else if field_index == 3 { + if pass == 0 { + account1.executable = !account1.executable; + } else if pass == 1 { + account_expected.executable = !account_expected.executable; + account2.set_executable(!account2.executable); + } else if pass == 2 { + account1.set_executable(!account1.executable); + } else if pass == 3 { + account_expected.executable = !account_expected.executable; + account2.executable = !account2.executable; + } + } else if field_index == 4 { + if pass == 0 { + account1.rent_epoch += 1; + } else if pass == 1 { + account_expected.rent_epoch += 1; + account2.set_rent_epoch(account2.rent_epoch + 1); + } else if pass == 2 { + account1.set_rent_epoch(account1.rent_epoch + 1); + } else if pass == 3 { + account_expected.rent_epoch += 1; + account2.rent_epoch += 1; + } + } + + let should_be_equal = pass == 1 || pass == 3; + test_equal(should_be_equal, &account1, &account2, &account_expected); + + // test new_ref + if should_be_equal { + assert!(accounts_equal( + &Account::new_ref( + account_expected.lamports(), + account_expected.data().len(), + account_expected.owner() + ) + .borrow(), + &AccountSharedData::new_ref( + account_expected.lamports(), + account_expected.data().len(), + account_expected.owner() + ) + .borrow() + )); + + { + // test new_data + let account1_with_data = Account::new_data( + account_expected.lamports(), + &account_expected.data()[0], + account_expected.owner(), + ) + .unwrap(); + let account2_with_data = AccountSharedData::new_data( + account_expected.lamports(), + &account_expected.data()[0], + account_expected.owner(), + ) + .unwrap(); + + assert!(accounts_equal(&account1_with_data, &account2_with_data)); + assert_eq!( + account1_with_data.deserialize_data::().unwrap(), + account2_with_data.deserialize_data::().unwrap() + ); + } + + // test new_data_with_space + assert!(accounts_equal( + &Account::new_data_with_space( + account_expected.lamports(), + &account_expected.data()[0], + 1, + account_expected.owner() + ) + .unwrap(), + &AccountSharedData::new_data_with_space( + account_expected.lamports(), + &account_expected.data()[0], + 1, + account_expected.owner() + ) + .unwrap() + )); + + // test new_ref_data + assert!(accounts_equal( + &Account::new_ref_data( + account_expected.lamports(), + &account_expected.data()[0], + account_expected.owner() + ) + .unwrap() + .borrow(), + &AccountSharedData::new_ref_data( + account_expected.lamports(), + &account_expected.data()[0], + account_expected.owner() + ) + .unwrap() + .borrow() + )); + + //new_ref_data_with_space + assert!(accounts_equal( + &Account::new_ref_data_with_space( + account_expected.lamports(), + &account_expected.data()[0], + 1, + account_expected.owner() + ) + .unwrap() + .borrow(), + &AccountSharedData::new_ref_data_with_space( + account_expected.lamports(), + &account_expected.data()[0], + 1, + account_expected.owner() + ) + .unwrap() + .borrow() + )); + } + } + } + } +} diff --git a/sdk/src/account_utils.rs b/sdk/src/account_utils.rs index c1096c1e95..a7f5f30366 100644 --- a/sdk/src/account_utils.rs +++ b/sdk/src/account_utils.rs @@ -1,6 +1,7 @@ //! useful extras for Account state -use crate::{account::Account, instruction::InstructionError}; +use crate::{account::Account, account::AccountSharedData, instruction::InstructionError}; use bincode::ErrorKind; +use std::cell::Ref; /// Convenience trait to covert bincode errors to instruction errors. pub trait StateMut { @@ -28,20 +29,49 @@ where } } +impl StateMut for AccountSharedData +where + T: serde::Serialize + serde::de::DeserializeOwned, +{ + fn state(&self) -> Result { + self.deserialize_data() + .map_err(|_| InstructionError::InvalidAccountData) + } + fn set_state(&mut self, state: &T) -> Result<(), InstructionError> { + self.serialize_data(state).map_err(|err| match *err { + ErrorKind::SizeLimit => InstructionError::AccountDataTooSmall, + _ => InstructionError::GenericError, + }) + } +} + +impl StateMut for Ref<'_, AccountSharedData> +where + T: serde::Serialize + serde::de::DeserializeOwned, +{ + fn state(&self) -> Result { + self.deserialize_data() + .map_err(|_| InstructionError::InvalidAccountData) + } + fn set_state(&mut self, _state: &T) -> Result<(), InstructionError> { + panic!("illegal"); + } +} + #[cfg(test)] mod tests { use super::*; - use crate::{account::Account, pubkey::Pubkey}; + use crate::{account::AccountSharedData, pubkey::Pubkey}; #[test] fn test_account_state() { let state = 42u64; - assert!(Account::default().set_state(&state).is_err()); - let res = Account::default().state() as Result; + assert!(AccountSharedData::default().set_state(&state).is_err()); + let res = AccountSharedData::default().state() as Result; assert!(res.is_err()); - let mut account = Account::new(0, std::mem::size_of::(), &Pubkey::default()); + let mut account = AccountSharedData::new(0, std::mem::size_of::(), &Pubkey::default()); assert!(account.set_state(&state).is_ok()); let stored_state: u64 = account.state().unwrap(); diff --git a/sdk/src/arithmetic.rs b/sdk/src/arithmetic.rs new file mode 100644 index 0000000000..8f0be2df46 --- /dev/null +++ b/sdk/src/arithmetic.rs @@ -0,0 +1,45 @@ +use std::time::Duration; + +/// A helper trait for primitive types that do not yet implement saturating arithmetic methods +pub trait SaturatingArithmetic { + fn sol_saturating_add(&self, rhs: Self) -> Self; + fn sol_saturating_sub(&self, rhs: Self) -> Self; + fn sol_saturating_mul(&self, rhs: T) -> Self; +} + +/// Saturating arithmetic for Duration, until Rust support moves from nightly to stable +/// Duration::MAX is constructed manually, as Duration consts are not yet stable either. +impl SaturatingArithmetic for Duration { + fn sol_saturating_add(&self, rhs: Self) -> Self { + self.checked_add(rhs) + .unwrap_or_else(|| Self::new(u64::MAX, 1_000_000_000u32.saturating_sub(1))) + } + fn sol_saturating_sub(&self, rhs: Self) -> Self { + self.checked_sub(rhs).unwrap_or_else(|| Self::new(0, 0)) + } + fn sol_saturating_mul(&self, rhs: u32) -> Self { + self.checked_mul(rhs) + .unwrap_or_else(|| Self::new(u64::MAX, 1_000_000_000u32.saturating_sub(1))) + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + + #[test] + fn test_duration() { + let empty_duration = Duration::new(0, 0); + let max_duration = Duration::new(u64::MAX, 1_000_000_000 - 1); + let duration = Duration::new(u64::MAX, 0); + + let add = duration.sol_saturating_add(duration); + assert_eq!(add, max_duration); + + let sub = duration.sol_saturating_sub(max_duration); + assert_eq!(sub, empty_duration); + + let mult = duration.sol_saturating_mul(u32::MAX); + assert_eq!(mult, max_duration); + } +} diff --git a/sdk/src/derivation_path.rs b/sdk/src/derivation_path.rs index 34971d356f..11c5631776 100644 --- a/sdk/src/derivation_path.rs +++ b/sdk/src/derivation_path.rs @@ -1,95 +1,129 @@ -#![cfg(feature = "full")] use { - std::{fmt, str::FromStr}, + core::{iter::IntoIterator, slice::Iter}, + derivation_path::{ChildIndex, DerivationPath as DerivationPathInner}, + std::{ + convert::{Infallible, TryFrom}, + fmt, + str::FromStr, + }, thiserror::Error, + uriparse::URIReference, }; +const ACCOUNT_INDEX: usize = 2; +const CHANGE_INDEX: usize = 3; + /// Derivation path error. -#[derive(Error, Debug, Clone)] +#[derive(Error, Debug, Clone, PartialEq)] pub enum DerivationPathError { #[error("invalid derivation path: {0}")] InvalidDerivationPath(String), + #[error("infallible")] + Infallible, } -#[derive(Clone, Default, PartialEq)] -pub struct DerivationPathComponent(u32); +impl From for DerivationPathError { + fn from(_: Infallible) -> Self { + Self::Infallible + } +} -impl DerivationPathComponent { - pub const HARDENED_BIT: u32 = 1 << 31; +#[derive(Clone, PartialEq)] +pub struct DerivationPath(DerivationPathInner); - pub fn as_u32(&self) -> u32 { - self.0 +impl Default for DerivationPath { + fn default() -> Self { + Self::new_bip44(None, None) } } -impl From for DerivationPathComponent { - fn from(n: u32) -> Self { - Self(n | Self::HARDENED_BIT) +impl TryFrom<&str> for DerivationPath { + type Error = DerivationPathError; + fn try_from(s: &str) -> Result { + Self::from_key_str(s) } } -impl FromStr for DerivationPathComponent { - type Err = DerivationPathError; - - fn from_str(s: &str) -> Result { - let index_str = if let Some(stripped) = s.strip_suffix('\'') { - stripped - } else { - s - }; - index_str.parse::().map(|ki| ki.into()).map_err(|_| { - DerivationPathError::InvalidDerivationPath(format!( - "failed to parse path component: {:?}", - s - )) - }) +impl AsRef<[ChildIndex]> for DerivationPath { + fn as_ref(&self) -> &[ChildIndex] { + &self.0.as_ref() } } -impl std::fmt::Display for DerivationPathComponent { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - let hardened = if (self.0 & Self::HARDENED_BIT) == 0 { - "" +impl DerivationPath { + fn new>>(path: P) -> Self { + Self(DerivationPathInner::new(path)) + } + + pub fn from_key_str(path: &str) -> Result { + Self::from_key_str_with_coin(path, Solana) + } + + fn from_key_str_with_coin(path: &str, coin: T) -> Result { + let master_path = if path == "m" { + path.to_string() } else { - "'" + format!("m/{}", path) }; - let index = self.0 & !Self::HARDENED_BIT; - write!(fmt, "{}{}", index, hardened) + let extend = DerivationPathInner::from_str(&master_path) + .map_err(|err| DerivationPathError::InvalidDerivationPath(err.to_string()))?; + let mut extend = extend.into_iter(); + let account = extend.next().map(|index| index.to_u32()); + let change = extend.next().map(|index| index.to_u32()); + if extend.next().is_some() { + return Err(DerivationPathError::InvalidDerivationPath(format!( + "key path `{}` too deep, only / supported", + path + ))); + } + Ok(Self::new_bip44_with_coin(coin, account, change)) } -} -impl std::fmt::Debug for DerivationPathComponent { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - std::fmt::Display::fmt(self, fmt) + fn from_absolute_path_str(path: &str) -> Result { + let inner = DerivationPath::_from_absolute_path_insecure_str(path)? + .into_iter() + .map(|c| ChildIndex::Hardened(c.to_u32())) + .collect::>(); + Ok(Self(DerivationPathInner::new(inner))) } -} -#[derive(Default, PartialEq, Clone)] -pub struct DerivationPath { - pub account: Option, - pub change: Option, -} + fn _from_absolute_path_insecure_str(path: &str) -> Result { + Ok(Self(DerivationPathInner::from_str(&path).map_err( + |err| DerivationPathError::InvalidDerivationPath(err.to_string()), + )?)) + } -impl fmt::Debug for DerivationPath { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let account = if let Some(account) = &self.account { - format!("/{:?}", account) - } else { - "".to_string() - }; - let change = if let Some(change) = &self.change { - format!("/{:?}", change) - } else { - "".to_string() - }; - write!(f, "m/44'/5655640'{}{}", account, change) + pub fn new_bip44(account: Option, change: Option) -> Self { + Self::new_bip44_with_coin(Solana, account, change) } -} -impl DerivationPath { + fn new_bip44_with_coin(coin: T, account: Option, change: Option) -> Self { + let mut indexes = coin.base_indexes(); + if let Some(account) = account { + indexes.push(ChildIndex::Hardened(account)); + if let Some(change) = change { + indexes.push(ChildIndex::Hardened(change)); + } + } + Self::new(indexes) + } + + pub fn account(&self) -> Option<&ChildIndex> { + self.0.path().get(ACCOUNT_INDEX) + } + + pub fn change(&self) -> Option<&ChildIndex> { + self.0.path().get(CHANGE_INDEX) + } + + pub fn path(&self) -> &[ChildIndex] { + self.0.path() + } + + // Assumes `key` query-string key pub fn get_query(&self) -> String { - if let Some(account) = &self.account { - if let Some(change) = &self.change { + if let Some(account) = &self.account() { + if let Some(change) = &self.change() { format!("?key={}/{}", account, change) } else { format!("?key={}", account) @@ -98,67 +132,633 @@ impl DerivationPath { "".to_string() } } + + pub fn from_uri_key_query(uri: &URIReference<'_>) -> Result, DerivationPathError> { + Self::from_uri(uri, true) + } + + pub fn from_uri_any_query(uri: &URIReference<'_>) -> Result, DerivationPathError> { + Self::from_uri(uri, false) + } + + fn from_uri( + uri: &URIReference<'_>, + key_only: bool, + ) -> Result, DerivationPathError> { + if let Some(query) = uri.query() { + let query_str = query.as_str(); + if query_str.is_empty() { + return Ok(None); + } + let query = qstring::QString::from(query_str); + if query.len() > 1 { + return Err(DerivationPathError::InvalidDerivationPath( + "invalid query string, extra fields not supported".to_string(), + )); + } + let key = query.get(QueryKey::Key.as_ref()); + if let Some(key) = key { + // Use from_key_str instead of TryInto here to make it more explicit that this + // generates a Solana bip44 DerivationPath + return Self::from_key_str(key).map(Some); + } + if key_only { + return Err(DerivationPathError::InvalidDerivationPath(format!( + "invalid query string `{}`, only `key` supported", + query_str, + ))); + } + let full_path = query.get(QueryKey::FullPath.as_ref()); + if let Some(full_path) = full_path { + return Self::from_absolute_path_str(full_path).map(Some); + } + Err(DerivationPathError::InvalidDerivationPath(format!( + "invalid query string `{}`, only `key` and `full-path` supported", + query_str, + ))) + } else { + Ok(None) + } + } +} + +impl fmt::Debug for DerivationPath { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "m")?; + for index in self.0.path() { + write!(f, "/{}", index)?; + } + Ok(()) + } +} + +impl<'a> IntoIterator for &'a DerivationPath { + type IntoIter = Iter<'a, ChildIndex>; + type Item = &'a ChildIndex; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +const QUERY_KEY_FULL_PATH: &str = "full-path"; +const QUERY_KEY_KEY: &str = "key"; + +#[derive(Clone, Debug, Error, PartialEq)] +#[error("invalid query key `{0}`")] +struct QueryKeyError(String); + +enum QueryKey { + FullPath, + Key, +} + +impl FromStr for QueryKey { + type Err = QueryKeyError; + fn from_str(s: &str) -> Result { + let lowercase = s.to_ascii_lowercase(); + match lowercase.as_str() { + QUERY_KEY_FULL_PATH => Ok(Self::FullPath), + QUERY_KEY_KEY => Ok(Self::Key), + _ => Err(QueryKeyError(s.to_string())), + } + } +} + +impl AsRef for QueryKey { + fn as_ref(&self) -> &str { + match self { + Self::FullPath => QUERY_KEY_FULL_PATH, + Self::Key => QUERY_KEY_KEY, + } + } +} + +impl std::fmt::Display for QueryKey { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let s: &str = self.as_ref(); + write!(f, "{}", s) + } +} + +trait Bip44 { + const PURPOSE: u32 = 44; + const COIN: u32; + + fn base_indexes(&self) -> Vec { + vec![ + ChildIndex::Hardened(Self::PURPOSE), + ChildIndex::Hardened(Self::COIN), + ] + } +} + +struct Solana; + +impl Bip44 for Solana { + const COIN: u32 = 501; } #[cfg(test)] mod tests { use super::*; + use uriparse::URIReferenceBuilder; + + struct TestCoin; + impl Bip44 for TestCoin { + const COIN: u32 = 999; + } #[test] - fn test_get_query() { - let derivation_path = DerivationPath { - account: None, - change: None, - }; - assert_eq!(derivation_path.get_query(), "".to_string()); - let derivation_path = DerivationPath { - account: Some(1.into()), - change: None, - }; + fn test_from_key_str() { + let s = "1/2"; assert_eq!( - derivation_path.get_query(), - format!("?key={}", DerivationPathComponent::from(1)) + DerivationPath::from_key_str_with_coin(s, TestCoin).unwrap(), + DerivationPath::new_bip44_with_coin(TestCoin, Some(1), Some(2)) ); - let derivation_path = DerivationPath { - account: Some(1.into()), - change: Some(2.into()), - }; + let s = "1'/2'"; + assert_eq!( + DerivationPath::from_key_str_with_coin(s, TestCoin).unwrap(), + DerivationPath::new_bip44_with_coin(TestCoin, Some(1), Some(2)) + ); + let s = "1\'/2\'"; + assert_eq!( + DerivationPath::from_key_str_with_coin(s, TestCoin).unwrap(), + DerivationPath::new_bip44_with_coin(TestCoin, Some(1), Some(2)) + ); + let s = "1"; assert_eq!( - derivation_path.get_query(), - format!( - "?key={}/{}", - DerivationPathComponent::from(1), - DerivationPathComponent::from(2) - ) + DerivationPath::from_key_str_with_coin(s, TestCoin).unwrap(), + DerivationPath::new_bip44_with_coin(TestCoin, Some(1), None) ); + let s = "1'"; + assert_eq!( + DerivationPath::from_key_str_with_coin(s, TestCoin).unwrap(), + DerivationPath::new_bip44_with_coin(TestCoin, Some(1), None) + ); + let s = "1\'"; + assert_eq!( + DerivationPath::from_key_str_with_coin(s, TestCoin).unwrap(), + DerivationPath::new_bip44_with_coin(TestCoin, Some(1), None) + ); + + assert!(DerivationPath::from_key_str_with_coin("1/2/3", TestCoin).is_err()); + assert!(DerivationPath::from_key_str_with_coin("other", TestCoin).is_err()); + assert!(DerivationPath::from_key_str_with_coin("1o", TestCoin).is_err()); } #[test] - fn test_derivation_path_debug() { - let mut path = DerivationPath::default(); - assert_eq!(format!("{:?}", path), "m/44'/5655640'".to_string()); + fn test_from_absolute_path_str() { + let s = "m/44/501"; + assert_eq!( + DerivationPath::from_absolute_path_str(s).unwrap(), + DerivationPath::default() + ); + let s = "m/44'/501'"; + assert_eq!( + DerivationPath::from_absolute_path_str(s).unwrap(), + DerivationPath::default() + ); + let s = "m/44'/501'/1/2"; + assert_eq!( + DerivationPath::from_absolute_path_str(s).unwrap(), + DerivationPath::new_bip44(Some(1), Some(2)) + ); + let s = "m/44'/501'/1'/2'"; + assert_eq!( + DerivationPath::from_absolute_path_str(s).unwrap(), + DerivationPath::new_bip44(Some(1), Some(2)) + ); - path.account = Some(1.into()); - assert_eq!(format!("{:?}", path), "m/44'/5655640'/1'".to_string()); + // Test non-Solana Bip44 + let s = "m/44'/999'/1/2"; + assert_eq!( + DerivationPath::from_absolute_path_str(s).unwrap(), + DerivationPath::new_bip44_with_coin(TestCoin, Some(1), Some(2)) + ); + let s = "m/44'/999'/1'/2'"; + assert_eq!( + DerivationPath::from_absolute_path_str(s).unwrap(), + DerivationPath::new_bip44_with_coin(TestCoin, Some(1), Some(2)) + ); - path.change = Some(2.into()); - assert_eq!(format!("{:?}", path), "m/44'/5655640'/1'/2'".to_string()); + // Test non-bip44 paths + let s = "m/501'/0'/0/0"; + assert_eq!( + DerivationPath::from_absolute_path_str(s).unwrap(), + DerivationPath::new(vec![ + ChildIndex::Hardened(501), + ChildIndex::Hardened(0), + ChildIndex::Hardened(0), + ChildIndex::Hardened(0), + ]) + ); + let s = "m/501'/0'/0'/0'"; + assert_eq!( + DerivationPath::from_absolute_path_str(s).unwrap(), + DerivationPath::new(vec![ + ChildIndex::Hardened(501), + ChildIndex::Hardened(0), + ChildIndex::Hardened(0), + ChildIndex::Hardened(0), + ]) + ); } #[test] - fn test_derivation_path_component() { - let f = DerivationPathComponent::from(1); - assert_eq!(f.as_u32(), 1 | DerivationPathComponent::HARDENED_BIT); + fn test_from_uri() { + let derivation_path = DerivationPath::new_bip44(Some(0), Some(0)); + + // test://path?key=0/0 + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("key=0/0")) + .unwrap(); + let uri = builder.build().unwrap(); + assert_eq!( + DerivationPath::from_uri(&uri, true).unwrap(), + Some(derivation_path.clone()) + ); - let fs = DerivationPathComponent::from_str("1").unwrap(); - assert_eq!(fs, f); + // test://path?key=0'/0' + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("key=0'/0'")) + .unwrap(); + let uri = builder.build().unwrap(); + assert_eq!( + DerivationPath::from_uri(&uri, true).unwrap(), + Some(derivation_path.clone()) + ); + + // test://path?key=0\'/0\' + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("key=0\'/0\'")) + .unwrap(); + let uri = builder.build().unwrap(); + assert_eq!( + DerivationPath::from_uri(&uri, true).unwrap(), + Some(derivation_path) + ); - let fs = DerivationPathComponent::from_str("1'").unwrap(); - assert_eq!(fs, f); + // test://path?key=m + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("key=m")) + .unwrap(); + let uri = builder.build().unwrap(); + assert_eq!( + DerivationPath::from_uri(&uri, true).unwrap(), + Some(DerivationPath::new_bip44(None, None)) + ); + + // test://path + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap(); + let uri = builder.build().unwrap(); + assert_eq!(DerivationPath::from_uri(&uri, true).unwrap(), None); + + // test://path? + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("")) + .unwrap(); + let uri = builder.build().unwrap(); + assert_eq!(DerivationPath::from_uri(&uri, true).unwrap(), None); + + // test://path?key=0/0/0 + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("key=0/0/0")) + .unwrap(); + let uri = builder.build().unwrap(); + assert!(matches!( + DerivationPath::from_uri(&uri, true), + Err(DerivationPathError::InvalidDerivationPath(_)) + )); + + // test://path?key=0/0&bad-key=0/0 + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("key=0/0&bad-key=0/0")) + .unwrap(); + let uri = builder.build().unwrap(); + assert!(matches!( + DerivationPath::from_uri(&uri, true), + Err(DerivationPathError::InvalidDerivationPath(_)) + )); + + // test://path?bad-key=0/0 + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("bad-key=0/0")) + .unwrap(); + let uri = builder.build().unwrap(); + assert!(matches!( + DerivationPath::from_uri(&uri, true), + Err(DerivationPathError::InvalidDerivationPath(_)) + )); + + // test://path?key=bad-value + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("key=bad-value")) + .unwrap(); + let uri = builder.build().unwrap(); + assert!(matches!( + DerivationPath::from_uri(&uri, true), + Err(DerivationPathError::InvalidDerivationPath(_)) + )); + + // test://path?key= + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("key=")) + .unwrap(); + let uri = builder.build().unwrap(); + assert!(matches!( + DerivationPath::from_uri(&uri, true), + Err(DerivationPathError::InvalidDerivationPath(_)) + )); + + // test://path?key + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("key")) + .unwrap(); + let uri = builder.build().unwrap(); + assert!(matches!( + DerivationPath::from_uri(&uri, true), + Err(DerivationPathError::InvalidDerivationPath(_)) + )); + } + + #[test] + fn test_from_uri_full_path() { + let derivation_path = DerivationPath::from_absolute_path_str("m/44'/999'/1'").unwrap(); + + // test://path?full-path=m/44/999/1 + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("full-path=m/44/999/1")) + .unwrap(); + let uri = builder.build().unwrap(); + assert_eq!( + DerivationPath::from_uri(&uri, false).unwrap(), + Some(derivation_path.clone()) + ); + + // test://path?full-path=m/44'/999'/1' + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("full-path=m/44'/999'/1'")) + .unwrap(); + let uri = builder.build().unwrap(); + assert_eq!( + DerivationPath::from_uri(&uri, false).unwrap(), + Some(derivation_path.clone()) + ); + + // test://path?full-path=m/44\'/999\'/1\' + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("full-path=m/44\'/999\'/1\'")) + .unwrap(); + let uri = builder.build().unwrap(); + assert_eq!( + DerivationPath::from_uri(&uri, false).unwrap(), + Some(derivation_path) + ); + + // test://path?full-path=m + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("full-path=m")) + .unwrap(); + let uri = builder.build().unwrap(); + assert_eq!( + DerivationPath::from_uri(&uri, false).unwrap(), + Some(DerivationPath(DerivationPathInner::from_str("m").unwrap())) + ); + + // test://path?full-path=m/44/999/1, only `key` supported + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("full-path=m/44/999/1")) + .unwrap(); + let uri = builder.build().unwrap(); + assert!(matches!( + DerivationPath::from_uri(&uri, true), + Err(DerivationPathError::InvalidDerivationPath(_)) + )); + + // test://path?key=0/0&full-path=m/44/999/1 + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("key=0/0&full-path=m/44/999/1")) + .unwrap(); + let uri = builder.build().unwrap(); + assert!(matches!( + DerivationPath::from_uri(&uri, false), + Err(DerivationPathError::InvalidDerivationPath(_)) + )); + + // test://path?full-path=m/44/999/1&bad-key=0/0 + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("full-path=m/44/999/1&bad-key=0/0")) + .unwrap(); + let uri = builder.build().unwrap(); + assert!(matches!( + DerivationPath::from_uri(&uri, false), + Err(DerivationPathError::InvalidDerivationPath(_)) + )); + + // test://path?full-path=bad-value + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("full-path=bad-value")) + .unwrap(); + let uri = builder.build().unwrap(); + assert!(matches!( + DerivationPath::from_uri(&uri, false), + Err(DerivationPathError::InvalidDerivationPath(_)) + )); + + // test://path?full-path= + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("full-path=")) + .unwrap(); + let uri = builder.build().unwrap(); + assert!(matches!( + DerivationPath::from_uri(&uri, false), + Err(DerivationPathError::InvalidDerivationPath(_)) + )); + + // test://path?full-path + let mut builder = URIReferenceBuilder::new(); + builder + .try_scheme(Some("test")) + .unwrap() + .try_authority(Some("path")) + .unwrap() + .try_path("") + .unwrap() + .try_query(Some("full-path")) + .unwrap(); + let uri = builder.build().unwrap(); + assert!(matches!( + DerivationPath::from_uri(&uri, false), + Err(DerivationPathError::InvalidDerivationPath(_)) + )); + } + + #[test] + fn test_get_query() { + let derivation_path = DerivationPath::new_bip44_with_coin(TestCoin, None, None); + assert_eq!(derivation_path.get_query(), "".to_string()); + let derivation_path = DerivationPath::new_bip44_with_coin(TestCoin, Some(1), None); + assert_eq!(derivation_path.get_query(), "?key=1'".to_string()); + let derivation_path = DerivationPath::new_bip44_with_coin(TestCoin, Some(1), Some(2)); + assert_eq!(derivation_path.get_query(), "?key=1'/2'".to_string()); + } + + #[test] + fn test_derivation_path_debug() { + let path = DerivationPath::default(); + assert_eq!(format!("{:?}", path), "m/44'/501'".to_string()); - assert!(DerivationPathComponent::from_str("-1").is_err()); + let path = DerivationPath::new_bip44(Some(1), None); + assert_eq!(format!("{:?}", path), "m/44'/501'/1'".to_string()); - assert_eq!(format!("{}", f), "1'".to_string()); - assert_eq!(format!("{:?}", f), "1'".to_string()); + let path = DerivationPath::new_bip44(Some(1), Some(2)); + assert_eq!(format!("{:?}", path), "m/44'/501'/1'/2'".to_string()); } } diff --git a/sdk/src/feature.rs b/sdk/src/feature.rs index 61a1fdaa93..78517f5d6b 100644 --- a/sdk/src/feature.rs +++ b/sdk/src/feature.rs @@ -1,21 +1,21 @@ -use crate::account::Account; +use crate::account::{AccountSharedData, ReadableAccount, WritableAccount}; pub use solana_program::feature::*; -pub fn from_account(account: &Account) -> Option { - if account.owner != id() { +pub fn from_account(account: &T) -> Option { + if account.owner() != &id() { None } else { - bincode::deserialize(&account.data).ok() + bincode::deserialize(account.data()).ok() } } -pub fn to_account(feature: &Feature, account: &mut Account) -> Option<()> { - bincode::serialize_into(&mut account.data[..], feature).ok() +pub fn to_account(feature: &Feature, account: &mut AccountSharedData) -> Option<()> { + bincode::serialize_into(account.data_as_mut_slice(), feature).ok() } -pub fn create_account(feature: &Feature, lamports: u64) -> Account { +pub fn create_account(feature: &Feature, lamports: u64) -> AccountSharedData { let data_len = Feature::size_of().max(bincode::serialized_size(feature).unwrap() as usize); - let mut account = Account::new(lamports, data_len, &id()); + let mut account = AccountSharedData::new(lamports, data_len, &id()); to_account(feature, &mut account).unwrap(); account } @@ -26,7 +26,7 @@ mod test { #[test] fn feature_deserialize_none() { - let just_initialized = Account::new(42, Feature::size_of(), &id()); + let just_initialized = AccountSharedData::new(42, Feature::size_of(), &id()); assert_eq!( from_account(&just_initialized), Some(Feature { activated_at: None }) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 040f371225..8369b9ba4b 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -47,46 +47,10 @@ pub mod spl_token_v2_multisig_fix { solana_sdk::declare_id!("E5JiFDQCwyC6QfT9REFyMpfK2mHcmv1GUDySU1Ue7TYv"); } -pub mod bpf_loader2_program { - solana_sdk::declare_id!("DFBnrgThdzH4W6wZ12uGPoWcMnvfZj11EHnxHcVxLPhD"); -} - -pub mod bpf_compute_budget_balancing { - solana_sdk::declare_id!("HxvjqDSiF5sYdSYuCXsUnS8UeAoWsMT9iGoFP8pgV1mB"); -} - -pub mod sha256_syscall_enabled { - solana_sdk::declare_id!("D7KfP7bZxpkYtD4Pc38t9htgs1k5k47Yhxe4rp6WDVi8"); -} - pub mod no_overflow_rent_distribution { solana_sdk::declare_id!("4kpdyrcj5jS47CZb2oJGfVxjYbsMm2Kx97gFyZrxxwXz"); } -pub mod ristretto_mul_syscall_enabled { - solana_sdk::declare_id!("HRe7A6aoxgjKzdjbBv6HTy7tJ4YWqE6tVmYCGho6S9Aq"); -} - -pub mod max_invoke_depth_4 { - solana_sdk::declare_id!("EdM9xggY5y7AhNMskRG8NgGMnaP4JFNsWi8ZZtyT1af5"); -} - -pub mod max_program_call_depth_64 { - solana_sdk::declare_id!("YCKSgA6XmjtkQrHBQjpyNrX6EMhJPcYcLWMVgWn36iv"); -} - -pub mod sol_log_compute_units_syscall { - solana_sdk::declare_id!("BHuZqHAj7JdZc68wVgZZcy51jZykvgrx4zptR44RyChe"); -} - -pub mod pubkey_log_syscall_enabled { - solana_sdk::declare_id!("MoqiU1vryuCGQSxFKA1SZ316JdLEFFhoAu6cKUNk7dN"); -} - -pub mod pull_request_ping_pong_check { - solana_sdk::declare_id!("5RzEHTnf6D7JPZCvwEzjM19kzBsyjSU3HoMfXaQmVgnZ"); -} - pub mod stake_program_v2 { solana_sdk::declare_id!("Gvd9gGJZDHGMNf1b3jkxrfBQSR5etrfTQSBNKCvLSFJN"); } @@ -103,76 +67,87 @@ pub mod bpf_loader_upgradeable_program { solana_sdk::declare_id!("FbhK8HN9qvNHvJcoFVHAEUCNkagHvu7DTWzdnLuVQ5u4"); } -pub mod try_find_program_address_syscall_enabled { - solana_sdk::declare_id!("EMsMNadQNhCYDyGpYH5Tx6dGHxiUqKHk782PU5XaWfmi"); -} - pub mod stake_program_v3 { solana_sdk::declare_id!("Ego6nTu7WsBcZBvVqJQKp6Yku2N3mrfG8oYCfaLZkAeK"); } -pub mod max_cpi_instruction_size_ipv6_mtu { - solana_sdk::declare_id!("5WLtuUJA5VVA1Cc28qULPfGs8anhoBev8uNqaaXeasnf"); +pub mod require_custodian_for_locked_stake_authorize { + solana_sdk::declare_id!("D4jsDcXaqdW8tDAWn8H4R25Cdns2YwLneujSL1zvjW6R"); } -pub mod limit_cpi_loader_invoke { - solana_sdk::declare_id!("xGbcW7EEC7zMRJ6LaJCob65EJxKryWjwM4rv8f57SRM"); +pub mod spl_token_v2_self_transfer_fix { + solana_sdk::declare_id!("BL99GYhdjjcv6ys22C9wPgn2aTVERDbPHHo4NbS3hgp7"); } -pub mod use_loaded_program_accounts { - solana_sdk::declare_id!("FLjgLeg1PJkZimQCVa5sVFtaq6VmSDPw3NvH8iQ3nyHn"); +pub mod warp_timestamp_again { + solana_sdk::declare_id!("GvDsGDkH5gyzwpDhxNixx8vtx1kwYHH13RiNAPw27zXb"); } -pub mod abort_on_all_cpi_failures { - solana_sdk::declare_id!("ED5D5a2hQaECHaMmKpnU48GdsfafdCjkb3pgAw5RKbb2"); +pub mod check_init_vote_data { + solana_sdk::declare_id!("3ccR6QpxGYsAbWyfevEtBNGfWV4xBffxRj2tD6A9i39F"); } -pub mod use_loaded_executables { - solana_sdk::declare_id!("3Jq7mE2chDpf6oeEDsuGK7orTYEgyQjCPvaRppTNdVGK"); +pub mod check_program_owner { + solana_sdk::declare_id!("5XnbR5Es9YXEARRuP6mdvoxiW3hx5atNNeBmwVd8P3QD"); } -pub mod turbine_retransmit_peers_patch { - solana_sdk::declare_id!("5Lu3JnWSFwRYpXzwDMkanWSk6XqSuF2i5fpnVhzB5CTc"); +pub mod cpi_share_ro_and_exec_accounts { + solana_sdk::declare_id!("6VgVBi3uRVqp56TtEwNou8idgdmhCD1aYqX8FaJ1fnJb"); } -pub mod prevent_upgrade_and_invoke { - solana_sdk::declare_id!("BiNjYd8jCYDgAwMqP91uwZs6skWpuHtKrZbckuKESs8N"); +pub mod skip_ro_deserialization { + solana_sdk::declare_id!("6Sw5JV84f7QkDe8gvRxpcPWFnPpfpgEnNziiy8sELaCp"); } -pub mod track_writable_deescalation { - solana_sdk::declare_id!("HVPSxqskEtRLRT2ZeEMmkmt9FWqoFX4vrN6f5VaadLED"); +pub mod require_stake_for_gossip { + solana_sdk::declare_id!("6oNzd5Z3M2L1xo4Q5hoox7CR2DuW7m1ETLWH5jHJthwa"); } -pub mod require_custodian_for_locked_stake_authorize { - solana_sdk::declare_id!("D4jsDcXaqdW8tDAWn8H4R25Cdns2YwLneujSL1zvjW6R"); +pub mod cpi_data_cost { + solana_sdk::declare_id!("Hrg5bXePPGiAVWZfDHbvjqytSeyBDPAGAQ7v6N5i4gCX"); } -pub mod spl_token_v2_self_transfer_fix { - solana_sdk::declare_id!("BL99GYhdjjcv6ys22C9wPgn2aTVERDbPHHo4NbS3hgp7"); +pub mod upgradeable_close_instruction { + solana_sdk::declare_id!("FsPaByos3gA9bUEhp3EimQpQPCoSvCEigHod496NmABQ"); } -pub mod matching_buffer_upgrade_authorities { - solana_sdk::declare_id!("B5PSjDEJvKJEUQSL7q94N7XCEoWJCYum8XfUg7yuugUU"); +pub mod demote_sysvar_write_locks { + solana_sdk::declare_id!("86LJYRuq2zgtHuL3FccR6hqFJQMQkFoun4knAxcPiF1P"); } -pub mod warp_timestamp_again { - solana_sdk::declare_id!("GvDsGDkH5gyzwpDhxNixx8vtx1kwYHH13RiNAPw27zXb"); +pub mod sysvar_via_syscall { + solana_sdk::declare_id!("7411E6gFQLDhQkdRjmpXwM1hzHMMoYQUjHicmvGPC1Nf"); } -pub mod per_byte_logging_cost { - solana_sdk::declare_id!("59dM4SV6dPEKXPfkrkhFkRdn4K6xwKxdNAPMyXG7J1wT"); +pub mod check_duplicates_by_hash { + solana_sdk::declare_id!("8ZqTSYHgzyaYCcXJPMViRy6afCFSgNvYooPDeVdyj5GC"); } -pub mod check_init_vote_data { - solana_sdk::declare_id!("3ccR6QpxGYsAbWyfevEtBNGfWV4xBffxRj2tD6A9i39F"); +pub mod enforce_aligned_host_addrs { + solana_sdk::declare_id!("6Qob9Z4RwGdf599FDVCqsjuKjR8ZFR3oVs2ByRLWBsua"); +} +pub mod set_upgrade_authority_via_cpi_enabled { + solana_sdk::declare_id!("GQdjCCptpGECG7QfE35hKTAopB1umGoSrdKfax2VmZWy"); } -pub mod check_program_owner { - solana_sdk::declare_id!("5XnbR5Es9YXEARRuP6mdvoxiW3hx5atNNeBmwVd8P3QD"); +pub mod update_data_on_realloc { + solana_sdk::declare_id!("BkPcYCrwHXBoTsv9vMhiRF9gteZmDj3Uwisz9CDjoMKp"); +} + +pub mod keccak256_syscall_enabled { + solana_sdk::declare_id!("7Ua8mFtahVfA3WCY9LoXDAJJdvJRJHckvSSr1dD8FTWc"); +} + +pub mod stake_program_v4 { + solana_sdk::declare_id!("Dc7djyhP9aLfdq2zktpvskeAjpG56msCU1yexpxXiWZb"); +} + +pub mod system_transfer_zero_check { + solana_sdk::declare_id!("BrTR9hzw4WBGFP65AJMbpAo64DcA3U6jdPSga9fMV5cS"); } -pub mod test_features { - solana_sdk::declare_id!("11111111111111111111111111111111"); +pub mod track_writable_deescalation { + solana_sdk::declare_id!("HVPSxqskEtRLRT2ZeEMmkmt9FWqoFX4vrN6f5VaadLED"); } pub mod velas_hardfork_pack { @@ -182,20 +157,9 @@ pub mod velas_hardfork_pack { // 4. sha3uncle hash from zero block, not zeros. solana_sdk::declare_id!("91nakVjUc5UmNzLioE6K7HhASmb2m1E7hRuLZS4LzUPV"); } -pub mod cpi_data_cost { - solana_sdk::declare_id!("Hrg5bXePPGiAVWZfDHbvjqytSeyBDPAGAQ7v6N5i4gCX"); -} - -pub mod upgradeable_close_instruction { - solana_sdk::declare_id!("FsPaByos3gA9bUEhp3EimQpQPCoSvCEigHod496NmABQ"); -} - -pub mod check_duplicates_by_hash { - solana_sdk::declare_id!("8ZqTSYHgzyaYCcXJPMViRy6afCFSgNvYooPDeVdyj5GC"); -} lazy_static! { - + /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES_BEFORE_MAINNET: HashMap = [ (instructions_sysvar_enabled::id(), "instructions sysvar"), (secp256k1_program_enabled::id(), "secp256k1 program"), @@ -204,60 +168,43 @@ lazy_static! { (pico_inflation::id(), "pico inflation"), (full_inflation::devnet_and_testnet_velas_mainnet::id(), "full inflation on devnet and testnet"), (spl_token_v2_multisig_fix::id(), "spl-token multisig fix"), - (bpf_loader2_program::id(), "bpf_loader2 program"), - (bpf_compute_budget_balancing::id(), "compute budget balancing"), - (sha256_syscall_enabled::id(), "sha256 syscall"), (no_overflow_rent_distribution::id(), "no overflow rent distribution"), - (ristretto_mul_syscall_enabled::id(), "ristretto multiply syscall"), - (max_invoke_depth_4::id(), "max invoke call depth 4"), - (max_program_call_depth_64::id(), "max program call depth 64"), - (sol_log_compute_units_syscall::id(), "sol_log_compute_units syscall (#13243)"), - (pubkey_log_syscall_enabled::id(), "pubkey log syscall"), - (pull_request_ping_pong_check::id(), "ping-pong packet check #12794"), (stake_program_v2::id(), "solana_stake_program v2"), (rewrite_stake::id(), "rewrite stake"), (filter_stake_delegation_accounts::id(), "filter stake_delegation_accounts #14062"), (bpf_loader_upgradeable_program::id(), "upgradeable bpf loader"), - (try_find_program_address_syscall_enabled::id(), "add try_find_program_address syscall"), (stake_program_v3::id(), "solana_stake_program v3"), - (max_cpi_instruction_size_ipv6_mtu::id(), "max cross-program invocation size 1280"), - (limit_cpi_loader_invoke::id(), "loader not authorized via CPI"), - (use_loaded_program_accounts::id(), "use loaded program accounts"), - (abort_on_all_cpi_failures::id(), "abort on all CPI failures"), - (use_loaded_executables::id(), "use loaded executable accounts"), - (turbine_retransmit_peers_patch::id(), "turbine retransmit peers patch #14631"), - (prevent_upgrade_and_invoke::id(), "prevent upgrade and invoke in same tx batch"), - (track_writable_deescalation::id(), "track account writable deescalation"), (require_custodian_for_locked_stake_authorize::id(), "require custodian to authorize withdrawer change for locked stake"), (spl_token_v2_self_transfer_fix::id(), "spl-token self-transfer fix"), - (matching_buffer_upgrade_authorities::id(), "Upgradeable buffer and program authorities must match"), + (full_inflation::mainnet::certusone::enable::id(), "full inflation enabled by Certus One"), + (full_inflation::mainnet::certusone::vote::id(), "community vote allowing Certus One to enable full inflation"), (warp_timestamp_again::id(), "warp timestamp again, adjust bounding to 25% fast 80% slow #15204"), - (per_byte_logging_cost::id(), "charge the compute budget per byte for logging"), (check_init_vote_data::id(), "check initialized Vote data"), (check_program_owner::id(), "limit programs to operating on accounts owned by itself"), - + (require_stake_for_gossip::id(), "require stakes for propagating crds values through gossip #15561"), + (cpi_data_cost::id(), "charge the compute budget for data passed via CPI"), + (upgradeable_close_instruction::id(), "close upgradeable buffer accounts"), + (demote_sysvar_write_locks::id(), "demote builtins and sysvar write locks to readonly #15497"), + (sysvar_via_syscall::id(), "provide sysvars via syscalls"), + (check_duplicates_by_hash::id(), "use transaction message hash for duplicate check"), + (enforce_aligned_host_addrs::id(), "enforce aligned host addresses"), + (update_data_on_realloc::id(), "Retain updated data values modified after realloc via CPI"), + (set_upgrade_authority_via_cpi_enabled::id(), "set upgrade authority instruction via cpi calls for upgradable programs"), + (keccak256_syscall_enabled::id(), "keccak256 syscall"), + (stake_program_v4::id(), "solana_stake_program v4"), + (system_transfer_zero_check::id(), "perform all checks for transfers of 0 lamports"), /*************** ADD NEW FEATURES HERE ***************/ ] - .iter() - .cloned() - .collect(); + .iter() + .copied() + .collect(); - /// Map of feature identifiers to user-visible description - pub static ref FEATURE_NAMES: HashMap = FEATURE_NAMES_BEFORE_MAINNET.iter().map(|(k,v)| (*k, *v)) - .chain( + pub static ref FEATURE_NAMES: HashMap = FEATURE_NAMES_BEFORE_MAINNET.iter().map(|(k, v)| (*k, *v)).chain( [ - (test_features::id(), "Test feature used as example how to implement features."), - (velas_hardfork_pack::id(), "EVMblockhashes sysvar history, roothashes calculation. Apply old (reconfigure_native_token, unlock_switch_vote)."), - (cpi_data_cost::id(), "charge the compute budget for data passed via CPI"), - (upgradeable_close_instruction::id(), "close upgradeable buffer accounts"), - - (check_duplicates_by_hash::id(), "use transaction message hash for duplicate check"), - /*************** ADD NEW FEATURES HERE ***************/ + (velas_hardfork_pack::id(), + "EVMblockhashes sysvar history, roothashes calculation. Apply old (reconfigure_native_token, unlock_switch_vote)."), ] - .iter() - .cloned()) - .collect(); - + ).collect(); /// Unique identifier of the current software's feature set @@ -286,9 +233,9 @@ lazy_static! { enable_id: full_inflation::mainnet::certusone::enable::id(), }, ] - .iter() - .cloned() - .collect(); + .iter() + .cloned() + .collect(); } /// `FeatureSet` holds the set of currently active/inactive runtime features @@ -348,7 +295,7 @@ mod test { use super::*; #[test] - fn test_full_inflation_features_enabled_devnet_and_testnet_velas_mainnet() { + fn test_full_inflation_features_enabled_devnet_and_testnet() { let mut feature_set = FeatureSet::default(); assert!(feature_set.full_inflation_features_enabled().is_empty()); feature_set diff --git a/sdk/src/genesis_config.rs b/sdk/src/genesis_config.rs index 3af2ab1d2f..60c3591a3e 100644 --- a/sdk/src/genesis_config.rs +++ b/sdk/src/genesis_config.rs @@ -4,6 +4,7 @@ use crate::{ account::Account, + account::AccountSharedData, clock::{UnixTimestamp, DEFAULT_TICKS_PER_SLOT}, epoch_schedule::EpochSchedule, fee_calculator::FeeRateGovernor, @@ -113,7 +114,7 @@ pub fn create_genesis_config(lamports: u64) -> (GenesisConfig, Keypair) { GenesisConfig::new( &[( faucet_keypair.pubkey(), - Account::new(lamports, 0, &system_program::id()), + AccountSharedData::new(lamports, 0, &system_program::id()), )], &[], ), @@ -148,13 +149,14 @@ impl Default for GenesisConfig { impl GenesisConfig { pub fn new( - accounts: &[(Pubkey, Account)], + accounts: &[(Pubkey, AccountSharedData)], native_instruction_processors: &[(String, Pubkey)], ) -> Self { Self { accounts: accounts .iter() .cloned() + .map(|(key, account)| (key, Account::from(account))) .collect::>(), native_instruction_processors: native_instruction_processors.to_vec(), ..GenesisConfig::default() @@ -261,7 +263,9 @@ impl GenesisConfig { .make_backup() .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, format!("{}.", e)))?; std::fs::create_dir_all(ledger_path)?; - evm_genesis::copy_dir(tmp_backup, evm_backup).unwrap(); // use copy instead of move, to work with cross device-links (backup makes hardlink and is immovable between devices). + + // use copy instead of move, to work with cross device-links (backup makes hardlink and is immovable between devices). + evm_genesis::copy_dir(tmp_backup, evm_backup).unwrap(); Ok(()) } @@ -269,8 +273,8 @@ impl GenesisConfig { self.evm_root_hash = root_hash; } - pub fn add_account(&mut self, pubkey: Pubkey, account: Account) { - self.accounts.insert(pubkey, account); + pub fn add_account(&mut self, pubkey: Pubkey, account: AccountSharedData) { + self.accounts.insert(pubkey, Account::from(account)); } pub fn add_native_instruction_processor(&mut self, name: String, program_id: Pubkey) { @@ -286,7 +290,10 @@ impl GenesisConfig { } pub fn ns_per_slot(&self) -> u128 { - self.poh_config.target_tick_duration.as_nanos() * self.ticks_per_slot() as u128 + self.poh_config + .target_tick_duration + .as_nanos() + .saturating_mul(self.ticks_per_slot() as u128) } pub fn slots_per_year(&self) -> f64 { @@ -309,8 +316,10 @@ impl fmt::Display for GenesisConfig { Shred version: {}\n\ Ticks per slot: {:?}\n\ Hashes per tick: {:?}\n\ + Target tick duration: {:?}\n\ Slots per epoch: {}\n\ Warmup epochs: {}abled\n\ + Slots per year: {}\n\ {:?}\n\ {:?}\n\ {:?}\n\ @@ -325,12 +334,14 @@ impl fmt::Display for GenesisConfig { compute_shred_version(&self.hash(), None), self.ticks_per_slot, self.poh_config.hashes_per_tick, + self.poh_config.target_tick_duration, self.epoch_schedule.slots_per_epoch, if self.epoch_schedule.warmup { "en" } else { "dis" }, + self.slots_per_year(), self.inflation, self.rent, self.fee_rate_governor, @@ -672,11 +683,11 @@ mod tests { let mut config = GenesisConfig::default(); config.add_account( faucet_keypair.pubkey(), - Account::new(10_000, 0, &Pubkey::default()), + AccountSharedData::new(10_000, 0, &Pubkey::default()), ); config.add_account( solana_sdk::pubkey::new_rand(), - Account::new(1, 0, &Pubkey::default()), + AccountSharedData::new(1, 0, &Pubkey::default()), ); config.add_native_instruction_processor("hi".to_string(), solana_sdk::pubkey::new_rand()); @@ -709,11 +720,11 @@ mod tests { let mut config = GenesisConfig::default(); config.add_account( faucet_keypair.pubkey(), - Account::new(10_000, 0, &Pubkey::default()), + AccountSharedData::new(10_000, 0, &Pubkey::default()), ); config.add_account( solana_sdk::pubkey::new_rand(), - Account::new(1, 0, &Pubkey::default()), + AccountSharedData::new(1, 0, &Pubkey::default()), ); config.add_native_instruction_processor("hi".to_string(), solana_sdk::pubkey::new_rand()); config.evm_chain_id = 0x42; diff --git a/sdk/src/hard_forks.rs b/sdk/src/hard_forks.rs index 5781b60a26..1714fef458 100644 --- a/sdk/src/hard_forks.rs +++ b/sdk/src/hard_forks.rs @@ -18,7 +18,7 @@ impl HardForks { .iter() .position(|(slot, _)| *slot == new_slot) { - self.hard_forks[i] = (new_slot, self.hard_forks[i].1 + 1); + self.hard_forks[i] = (new_slot, self.hard_forks[i].1.saturating_add(1)); } else { self.hard_forks.push((new_slot, 1)); } diff --git a/sdk/src/keyed_account.rs b/sdk/src/keyed_account.rs index 3ad013a219..3f60387a3b 100644 --- a/sdk/src/keyed_account.rs +++ b/sdk/src/keyed_account.rs @@ -1,11 +1,12 @@ use crate::{ - account::{from_account, Account}, + account::{from_account, AccountSharedData, ReadableAccount}, account_utils::{State, StateMut}, }; use solana_program::{clock::Epoch, instruction::InstructionError, pubkey::Pubkey, sysvar::Sysvar}; use std::{ cell::{Ref, RefCell, RefMut}, iter::FromIterator, + rc::Rc, }; #[repr(C)] @@ -14,7 +15,7 @@ pub struct KeyedAccount<'a> { is_signer: bool, // Transaction was signed by this account's key is_writable: bool, key: &'a Pubkey, - pub account: &'a RefCell, + pub account: &'a RefCell, } impl<'a> KeyedAccount<'a> { @@ -39,11 +40,11 @@ impl<'a> KeyedAccount<'a> { } pub fn data_len(&self) -> Result { - Ok(self.try_borrow()?.data.len()) + Ok(self.try_borrow()?.data().len()) } pub fn data_is_empty(&self) -> Result { - Ok(self.try_borrow()?.data.is_empty()) + Ok(self.try_borrow()?.data().is_empty()) } pub fn owner(&self) -> Result { @@ -58,26 +59,26 @@ impl<'a> KeyedAccount<'a> { Ok(self.try_borrow()?.rent_epoch) } - pub fn try_account_ref(&'a self) -> Result, InstructionError> { + pub fn try_account_ref(&'a self) -> Result, InstructionError> { self.try_borrow() } - pub fn try_account_ref_mut(&'a self) -> Result, InstructionError> { + pub fn try_account_ref_mut(&'a self) -> Result, InstructionError> { self.try_borrow_mut() } - fn try_borrow(&self) -> Result, InstructionError> { + fn try_borrow(&self) -> Result, InstructionError> { self.account .try_borrow() .map_err(|_| InstructionError::AccountBorrowFailed) } - fn try_borrow_mut(&self) -> Result, InstructionError> { + fn try_borrow_mut(&self) -> Result, InstructionError> { self.account .try_borrow_mut() .map_err(|_| InstructionError::AccountBorrowFailed) } - pub fn new(key: &'a Pubkey, is_signer: bool, account: &'a RefCell) -> Self { + pub fn new(key: &'a Pubkey, is_signer: bool, account: &'a RefCell) -> Self { Self { is_signer, is_writable: true, @@ -86,7 +87,11 @@ impl<'a> KeyedAccount<'a> { } } - pub fn new_readonly(key: &'a Pubkey, is_signer: bool, account: &'a RefCell) -> Self { + pub fn new_readonly( + key: &'a Pubkey, + is_signer: bool, + account: &'a RefCell, + ) -> Self { Self { is_signer, is_writable: false, @@ -102,8 +107,8 @@ impl<'a> PartialEq for KeyedAccount<'a> { } } -impl<'a> From<(&'a Pubkey, &'a RefCell)> for KeyedAccount<'a> { - fn from((key, account): (&'a Pubkey, &'a RefCell)) -> Self { +impl<'a> From<(&'a Pubkey, &'a RefCell)> for KeyedAccount<'a> { + fn from((key, account): (&'a Pubkey, &'a RefCell)) -> Self { Self { is_signer: false, is_writable: true, @@ -113,8 +118,8 @@ impl<'a> From<(&'a Pubkey, &'a RefCell)> for KeyedAccount<'a> { } } -impl<'a> From<(&'a Pubkey, bool, &'a RefCell)> for KeyedAccount<'a> { - fn from((key, is_signer, account): (&'a Pubkey, bool, &'a RefCell)) -> Self { +impl<'a> From<(&'a Pubkey, bool, &'a RefCell)> for KeyedAccount<'a> { + fn from((key, is_signer, account): (&'a Pubkey, bool, &'a RefCell)) -> Self { Self { is_signer, is_writable: true, @@ -124,8 +129,8 @@ impl<'a> From<(&'a Pubkey, bool, &'a RefCell)> for KeyedAccount<'a> { } } -impl<'a> From<&'a (&'a Pubkey, &'a RefCell)> for KeyedAccount<'a> { - fn from((key, account): &'a (&'a Pubkey, &'a RefCell)) -> Self { +impl<'a> From<&'a (&'a Pubkey, &'a RefCell)> for KeyedAccount<'a> { + fn from((key, account): &'a (&'a Pubkey, &'a RefCell)) -> Self { Self { is_signer: false, is_writable: true, @@ -136,13 +141,13 @@ impl<'a> From<&'a (&'a Pubkey, &'a RefCell)> for KeyedAccount<'a> { } pub fn create_keyed_accounts<'a>( - accounts: &'a [(&'a Pubkey, &'a RefCell)], + accounts: &'a [(&'a Pubkey, &'a RefCell)], ) -> Vec> { accounts.iter().map(Into::into).collect() } pub fn create_keyed_is_signer_accounts<'a>( - accounts: &'a [(&'a Pubkey, bool, &'a RefCell)], + accounts: &'a [(&'a Pubkey, bool, &'a RefCell)], ) -> Vec> { accounts .iter() @@ -156,7 +161,7 @@ pub fn create_keyed_is_signer_accounts<'a>( } pub fn create_keyed_readonly_accounts( - accounts: &[(Pubkey, RefCell)], + accounts: &[(Pubkey, Rc>)], ) -> Vec { accounts .iter() @@ -212,7 +217,8 @@ pub fn from_keyed_account( if !S::check_id(keyed_account.unsigned_key()) { return Err(InstructionError::InvalidArgument); } - from_account::(&*keyed_account.try_account_ref()?).ok_or(InstructionError::InvalidArgument) + from_account::(&*keyed_account.try_account_ref()?) + .ok_or(InstructionError::InvalidArgument) } #[cfg(test)] @@ -244,12 +250,12 @@ mod tests { let wrong_key = Pubkey::new_unique(); let account = create_account_for_test(&test_sysvar); - let test_sysvar = from_account::(&account).unwrap(); + let test_sysvar = from_account::(&account).unwrap(); assert_eq!(test_sysvar, TestSysvar::default()); - let mut account = Account::new(42, TestSysvar::size_of(), &key); + let mut account = AccountSharedData::new(42, TestSysvar::size_of(), &key); to_account(&test_sysvar, &mut account).unwrap(); - let test_sysvar = from_account::(&account).unwrap(); + let test_sysvar = from_account::(&account).unwrap(); assert_eq!(test_sysvar, TestSysvar::default()); let account = RefCell::new(account); diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index f1ae1dd78c..b8ed36b916 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -6,10 +6,13 @@ // Allows macro expansion of `use ::solana_sdk::*` to work within this crate extern crate self as solana_sdk; +#[cfg(feature = "full")] +pub use signer::signers; pub use solana_program::*; pub mod account; pub mod account_utils; +pub mod arithmetic; pub mod builtins; pub mod client; pub mod commitment_config; @@ -42,7 +45,7 @@ pub mod rpc_port; pub mod secp256k1_instruction; pub mod shred_version; pub mod signature; -pub mod signers; +pub mod signer; pub mod stake_weighted_timestamp; pub mod system_transaction; pub mod timing; diff --git a/sdk/src/log.rs b/sdk/src/log.rs index 2e8bc14190..4cc45cf413 100644 --- a/sdk/src/log.rs +++ b/sdk/src/log.rs @@ -3,7 +3,10 @@ pub use solana_program::log::*; #[macro_export] -#[deprecated(since = "1.4.3", note = "solana_program::log::info instead")] +#[deprecated( + since = "1.4.3", + note = "Please use `solana_program::log::info` instead" +)] macro_rules! info { ($msg:expr) => { $crate::log::sol_log($msg) diff --git a/sdk/src/native_loader.rs b/sdk/src/native_loader.rs index 0bdbf5b3d4..52cdf8fc47 100644 --- a/sdk/src/native_loader.rs +++ b/sdk/src/native_loader.rs @@ -1,4 +1,6 @@ -use crate::account::{Account, InheritableAccountFields, DUMMY_INHERITABLE_ACCOUNT_FIELDS}; +use crate::account::{ + AccountSharedData, InheritableAccountFields, DUMMY_INHERITABLE_ACCOUNT_FIELDS, +}; use crate::clock::INITIAL_RENT_EPOCH; crate::declare_id!("NativeLoader1111111111111111111111111111111"); @@ -8,15 +10,15 @@ crate::declare_id!("NativeLoader1111111111111111111111111111111"); since = "1.5.17", note = "Please use `create_loadable_account_for_test` instead" )] -pub fn create_loadable_account(name: &str, lamports: u64) -> Account { +pub fn create_loadable_account(name: &str, lamports: u64) -> AccountSharedData { create_loadable_account_with_fields(name, (lamports, INITIAL_RENT_EPOCH)) } pub fn create_loadable_account_with_fields( name: &str, (lamports, rent_epoch): InheritableAccountFields, -) -> Account { - Account { +) -> AccountSharedData { + AccountSharedData { lamports, owner: id(), data: name.as_bytes().to_vec(), @@ -25,6 +27,6 @@ pub fn create_loadable_account_with_fields( } } -pub fn create_loadable_account_for_test(name: &str) -> Account { +pub fn create_loadable_account_for_test(name: &str) -> AccountSharedData { create_loadable_account_with_fields(name, DUMMY_INHERITABLE_ACCOUNT_FIELDS) } diff --git a/sdk/src/nonce_account.rs b/sdk/src/nonce_account.rs index d510e18190..7bf9624d56 100644 --- a/sdk/src/nonce_account.rs +++ b/sdk/src/nonce_account.rs @@ -1,5 +1,5 @@ use crate::{ - account::Account, + account::AccountSharedData, account_utils::StateMut, fee_calculator::FeeCalculator, hash::Hash, @@ -7,9 +7,9 @@ use crate::{ }; use std::cell::RefCell; -pub fn create_account(lamports: u64) -> RefCell { +pub fn create_account(lamports: u64) -> RefCell { RefCell::new( - Account::new_data_with_space( + AccountSharedData::new_data_with_space( lamports, &Versions::new_current(State::Uninitialized), State::size(), @@ -19,7 +19,7 @@ pub fn create_account(lamports: u64) -> RefCell { ) } -pub fn verify_nonce_account(acc: &Account, hash: &Hash) -> bool { +pub fn verify_nonce_account(acc: &AccountSharedData, hash: &Hash) -> bool { if acc.owner != crate::system_program::id() { return false; } @@ -29,7 +29,7 @@ pub fn verify_nonce_account(acc: &Account, hash: &Hash) -> bool { } } -pub fn fee_calculator_of(account: &Account) -> Option { +pub fn fee_calculator_of(account: &AccountSharedData) -> Option { let state = StateMut::::state(account) .ok()? .convert_to_current(); @@ -48,7 +48,7 @@ mod tests { fn test_verify_bad_account_owner_fails() { let program_id = Pubkey::new_unique(); assert_ne!(program_id, crate::system_program::id()); - let account = Account::new_data_with_space( + let account = AccountSharedData::new_data_with_space( 42, &Versions::new_current(State::Uninitialized), State::size(), diff --git a/sdk/src/nonce_keyed_account.rs b/sdk/src/nonce_keyed_account.rs index 81e3ab7c0d..fc9ae75176 100644 --- a/sdk/src/nonce_keyed_account.rs +++ b/sdk/src/nonce_keyed_account.rs @@ -153,8 +153,14 @@ impl<'a> NonceKeyedAccount for KeyedAccount<'a> { return Err(InstructionError::MissingRequiredSignature); } - self.try_account_ref_mut()?.lamports -= lamports; - to.try_account_ref_mut()?.lamports += lamports; + let nonce_balance = self.try_account_ref_mut()?.lamports; + self.try_account_ref_mut()?.lamports = nonce_balance + .checked_sub(lamports) + .ok_or(InstructionError::ArithmeticOverflow)?; + let to_balance = to.try_account_ref_mut()?.lamports; + to.try_account_ref_mut()?.lamports = to_balance + .checked_add(lamports) + .ok_or(InstructionError::ArithmeticOverflow)?; Ok(()) } diff --git a/sdk/src/packet.rs b/sdk/src/packet.rs index d09601b7f8..a8f29b60a4 100644 --- a/sdk/src/packet.rs +++ b/sdk/src/packet.rs @@ -73,6 +73,7 @@ impl fmt::Debug for Packet { } impl Default for Packet { + #[allow(clippy::uninit_assumed_init)] fn default() -> Packet { Packet { data: unsafe { std::mem::MaybeUninit::uninit().assume_init() }, diff --git a/sdk/src/poh_config.rs b/sdk/src/poh_config.rs index 5c14e6d6de..a393365750 100644 --- a/sdk/src/poh_config.rs +++ b/sdk/src/poh_config.rs @@ -1,4 +1,4 @@ -use crate::clock::DEFAULT_TICKS_PER_SECOND; +use crate::{clock::DEFAULT_TICKS_PER_SECOND, unchecked_div_by_const}; use std::time::Duration; #[derive(Serialize, Deserialize, Clone, Debug, AbiExample)] @@ -28,8 +28,9 @@ impl PohConfig { impl Default for PohConfig { fn default() -> Self { - Self::new_sleep(Duration::from_micros( - 1000 * 1000 / DEFAULT_TICKS_PER_SECOND, - )) + Self::new_sleep(Duration::from_micros(unchecked_div_by_const!( + 1000 * 1000, + DEFAULT_TICKS_PER_SECOND + ))) } } diff --git a/sdk/src/process_instruction.rs b/sdk/src/process_instruction.rs index 3e6063145d..05e94d82ba 100644 --- a/sdk/src/process_instruction.rs +++ b/sdk/src/process_instruction.rs @@ -1,17 +1,14 @@ use solana_sdk::{ - account::Account, - feature_set::{ - bpf_compute_budget_balancing, max_cpi_instruction_size_ipv6_mtu, max_invoke_depth_4, - max_program_call_depth_64, pubkey_log_syscall_enabled, FeatureSet, - }, + account::AccountSharedData, instruction::{CompiledInstruction, Instruction, InstructionError}, keyed_account::KeyedAccount, message::Message, pubkey::Pubkey, + sysvar::Sysvar, }; use std::{cell::RefCell, fmt::Debug, rc::Rc, sync::Arc}; -// Prototype of a native loader entry point +/// Prototype of a native loader entry point /// /// program_id: Program ID of the currently executing program /// keyed_accounts: Accounts passed as part of the instruction @@ -40,7 +37,7 @@ pub trait InvokeContext { &mut self, message: &Message, instruction: &CompiledInstruction, - accounts: &[Rc>], + accounts: &[Rc>], caller_pivileges: Option<&[bool]>, ) -> Result<(), InstructionError>; /// Get the program ID of the currently executing program @@ -63,7 +60,17 @@ pub trait InvokeContext { /// Get the bank's active feature set fn is_feature_active(&self, feature_id: &Pubkey) -> bool; /// Get an account from a pre-account - fn get_account(&self, pubkey: &Pubkey) -> Option>; + fn get_account(&self, pubkey: &Pubkey) -> Option>>; + /// Update timing + fn update_timing( + &mut self, + serialize_us: u64, + create_vm_us: u64, + execute_us: u64, + deserialize_us: u64, + ); + /// Get sysvar data + fn get_sysvar_data(&self, id: &Pubkey) -> Option>>; } /// Convenience macro to log a message with an `Rc>` @@ -96,6 +103,21 @@ macro_rules! ic_msg { }; } +pub fn get_sysvar( + invoke_context: &dyn InvokeContext, + id: &Pubkey, +) -> Result { + let sysvar_data = invoke_context.get_sysvar_data(id).ok_or_else(|| { + ic_msg!(invoke_context, "Unable to get sysvar {}", id); + InstructionError::UnsupportedSysvar + })?; + + bincode::deserialize(&sysvar_data).map_err(|err| { + ic_msg!(invoke_context, "Unable to get sysvar {}: {:?}", id, err); + InstructionError::UnsupportedSysvar + }) +} + #[derive(Clone, Copy, Debug, AbiExample)] pub struct BpfComputeBudget { /// Number of compute units that an instruction is allowed. Compute units @@ -126,68 +148,32 @@ pub struct BpfComputeBudget { pub max_cpi_instruction_size: usize, /// Number of account data bytes per conpute unit charged during a cross-program invocation pub cpi_bytes_per_unit: u64, + /// Base number of compute units consumed to get a sysvar + pub sysvar_base_cost: u64, } impl Default for BpfComputeBudget { fn default() -> Self { - Self::new(&FeatureSet::all_enabled()) + Self::new() } } impl BpfComputeBudget { - pub fn new(feature_set: &FeatureSet) -> Self { - let mut bpf_compute_budget = - // Original + pub fn new() -> Self { BpfComputeBudget { - max_units: 100_000, - log_units: 0, - log_64_units: 0, - create_program_address_units: 0, - invoke_units: 0, - max_invoke_depth: 1, + max_units: 200_000, + log_units: 100, + log_64_units: 100, + create_program_address_units: 1500, + invoke_units: 1000, + max_invoke_depth: 4, sha256_base_cost: 85, sha256_byte_cost: 1, - max_call_depth: 20, + max_call_depth: 64, stack_frame_size: 4_096, - log_pubkey_units: 0, - max_cpi_instruction_size: std::usize::MAX, - cpi_bytes_per_unit: 250, - }; - - if feature_set.is_active(&bpf_compute_budget_balancing::id()) { - bpf_compute_budget = BpfComputeBudget { - max_units: 200_000, - log_units: 100, - log_64_units: 100, - create_program_address_units: 1500, - invoke_units: 1000, - ..bpf_compute_budget - }; - } - if feature_set.is_active(&max_invoke_depth_4::id()) { - bpf_compute_budget = BpfComputeBudget { - max_invoke_depth: 4, - ..bpf_compute_budget - }; - } - - if feature_set.is_active(&max_program_call_depth_64::id()) { - bpf_compute_budget = BpfComputeBudget { - max_call_depth: 64, - ..bpf_compute_budget - }; - } - if feature_set.is_active(&pubkey_log_syscall_enabled::id()) { - bpf_compute_budget = BpfComputeBudget { - log_pubkey_units: 100, - ..bpf_compute_budget - }; - } - if feature_set.is_active(&max_cpi_instruction_size_ipv6_mtu::id()) { - bpf_compute_budget = BpfComputeBudget { - max_cpi_instruction_size: 1280, // IPv6 Min MTU size - ..bpf_compute_budget - }; + log_pubkey_units: 100, + max_cpi_instruction_size: 1280, // IPv6 Min MTU size + cpi_bytes_per_unit: 250, // ~50MB at 200,000 units + sysvar_base_cost: 100, } - bpf_compute_budget } } @@ -312,7 +298,9 @@ pub struct MockInvokeContext { pub bpf_compute_budget: BpfComputeBudget, pub compute_meter: MockComputeMeter, pub programs: Vec<(Pubkey, ProcessInstructionWithContext)>, + pub accounts: Vec<(Pubkey, Rc>)>, pub invoke_depth: usize, + pub sysvars: Vec<(Pubkey, Option>>)>, } impl Default for MockInvokeContext { fn default() -> Self { @@ -324,17 +312,35 @@ impl Default for MockInvokeContext { remaining: std::i64::MAX as u64, }, programs: vec![], + accounts: vec![], invoke_depth: 0, + sysvars: vec![], } } } + +pub fn mock_set_sysvar( + mock_invoke_context: &mut MockInvokeContext, + id: Pubkey, + sysvar: T, +) -> Result<(), InstructionError> { + let mut data = Vec::with_capacity(T::size_of()); + + bincode::serialize_into(&mut data, &sysvar).map_err(|err| { + ic_msg!(mock_invoke_context, "Unable to serialize sysvar: {:?}", err); + InstructionError::GenericError + })?; + mock_invoke_context.sysvars.push((id, Some(Rc::new(data)))); + Ok(()) +} + impl InvokeContext for MockInvokeContext { fn push(&mut self, _key: &Pubkey) -> Result<(), InstructionError> { - self.invoke_depth += 1; + self.invoke_depth = self.invoke_depth.saturating_add(1); Ok(()) } fn pop(&mut self) { - self.invoke_depth -= 1; + self.invoke_depth = self.invoke_depth.saturating_sub(1); } fn invoke_depth(&self) -> usize { self.invoke_depth @@ -343,7 +349,7 @@ impl InvokeContext for MockInvokeContext { &mut self, _message: &Message, _instruction: &CompiledInstruction, - _accounts: &[Rc>], + _accounts: &[Rc>], _caller_pivileges: Option<&[bool]>, ) -> Result<(), InstructionError> { Ok(()) @@ -371,7 +377,25 @@ impl InvokeContext for MockInvokeContext { fn is_feature_active(&self, _feature_id: &Pubkey) -> bool { true } - fn get_account(&self, _pubkey: &Pubkey) -> Option> { + fn get_account(&self, pubkey: &Pubkey) -> Option>> { + for (key, account) in self.accounts.iter() { + if key == pubkey { + return Some(account.clone()); + } + } None } + fn update_timing( + &mut self, + _serialize_us: u64, + _create_vm_us: u64, + _execute_us: u64, + _deserialize_us: u64, + ) { + } + fn get_sysvar_data(&self, id: &Pubkey) -> Option>> { + self.sysvars + .iter() + .find_map(|(key, sysvar)| if id == key { sysvar.clone() } else { None }) + } } diff --git a/sdk/src/recent_blockhashes_account.rs b/sdk/src/recent_blockhashes_account.rs index d9a74089a3..58a0694b47 100644 --- a/sdk/src/recent_blockhashes_account.rs +++ b/sdk/src/recent_blockhashes_account.rs @@ -1,6 +1,6 @@ use crate::account::{ - create_account_with_fields, to_account, Account, InheritableAccountFields, - DUMMY_INHERITABLE_ACCOUNT_FIELDS, + create_account_shared_data_with_fields, to_account, AccountSharedData, + InheritableAccountFields, DUMMY_INHERITABLE_ACCOUNT_FIELDS, }; use crate::clock::INITIAL_RENT_EPOCH; use solana_program::sysvar::recent_blockhashes::{ @@ -8,7 +8,10 @@ use solana_program::sysvar::recent_blockhashes::{ }; use std::{collections::BinaryHeap, iter::FromIterator}; -pub fn update_account<'a, I>(account: &mut Account, recent_blockhash_iter: I) -> Option<()> +pub fn update_account<'a, I>( + account: &mut AccountSharedData, + recent_blockhash_iter: I, +) -> Option<()> where I: IntoIterator>, { @@ -23,7 +26,7 @@ where since = "1.5.17", note = "Please use `create_account_with_data_for_test` instead" )] -pub fn create_account_with_data<'a, I>(lamports: u64, recent_blockhash_iter: I) -> Account +pub fn create_account_with_data<'a, I>(lamports: u64, recent_blockhash_iter: I) -> AccountSharedData where I: IntoIterator>, { @@ -33,17 +36,19 @@ where pub fn create_account_with_data_and_fields<'a, I>( recent_blockhash_iter: I, fields: InheritableAccountFields, -) -> Account +) -> AccountSharedData where I: IntoIterator>, { - let mut account = - create_account_with_fields::(&RecentBlockhashes::default(), fields); + let mut account = create_account_shared_data_with_fields::( + &RecentBlockhashes::default(), + fields, + ); update_account(&mut account, recent_blockhash_iter).unwrap(); account } -pub fn create_account_with_data_for_test<'a, I>(recent_blockhash_iter: I) -> Account +pub fn create_account_with_data_for_test<'a, I>(recent_blockhash_iter: I) -> AccountSharedData where I: IntoIterator>, { @@ -64,7 +69,7 @@ mod tests { #[test] fn test_create_account_empty() { let account = create_account_with_data_for_test(vec![].into_iter()); - let recent_blockhashes = from_account::(&account).unwrap(); + let recent_blockhashes = from_account::(&account).unwrap(); assert_eq!(recent_blockhashes, RecentBlockhashes::default()); } @@ -75,7 +80,7 @@ mod tests { let account = create_account_with_data_for_test( vec![IterItem(0u64, &def_hash, &def_fees); MAX_ENTRIES].into_iter(), ); - let recent_blockhashes = from_account::(&account).unwrap(); + let recent_blockhashes = from_account::(&account).unwrap(); assert_eq!(recent_blockhashes.len(), MAX_ENTRIES); } @@ -86,7 +91,7 @@ mod tests { let account = create_account_with_data_for_test( vec![IterItem(0u64, &def_hash, &def_fees); MAX_ENTRIES + 1].into_iter(), ); - let recent_blockhashes = from_account::(&account).unwrap(); + let recent_blockhashes = from_account::(&account).unwrap(); assert_eq!(recent_blockhashes.len(), MAX_ENTRIES); } @@ -110,7 +115,7 @@ mod tests { .iter() .map(|(i, hash)| IterItem(*i, hash, &def_fees)), ); - let recent_blockhashes = from_account::(&account).unwrap(); + let recent_blockhashes = from_account::(&account).unwrap(); let mut unsorted_recent_blockhashes: Vec<_> = unsorted_blocks .iter() diff --git a/sdk/src/recent_evm_blockhashes_account.rs b/sdk/src/recent_evm_blockhashes_account.rs index dc36bf9ac7..3d8e7996f3 100644 --- a/sdk/src/recent_evm_blockhashes_account.rs +++ b/sdk/src/recent_evm_blockhashes_account.rs @@ -1,11 +1,13 @@ -use crate::account::{create_account_with_fields, to_account, Account, InheritableAccountFields}; +use crate::account::{ + create_account_shared_data_with_fields, to_account, AccountSharedData, InheritableAccountFields, +}; use crate::hash::Hash; use solana_program::sysvar::recent_evm_blockhashes::{RecentBlockhashes, MAX_ENTRIES}; pub fn update_account( - account: &mut Account, + account: &mut AccountSharedData, recent_blockhashes: [Hash; MAX_ENTRIES], ) -> Option<()> { let recent_blockhashes: RecentBlockhashes = RecentBlockhashes(recent_blockhashes); @@ -15,9 +17,11 @@ pub fn update_account( pub fn create_account_with_data_and_fields( fields: InheritableAccountFields, recent_blockhashes: [Hash; MAX_ENTRIES], -) -> Account { - let mut account = - create_account_with_fields::(&RecentBlockhashes::default(), fields); +) -> AccountSharedData { + let mut account = create_account_shared_data_with_fields::( + &RecentBlockhashes::default(), + fields, + ); update_account(&mut account, recent_blockhashes).unwrap(); account } @@ -28,6 +32,7 @@ mod tests { use crate::account::from_account; use crate::clock::INITIAL_RENT_EPOCH; use solana_program::hash::{Hash, HASH_BYTES}; + #[test] fn test_create_account() { let mut blocks: [Hash; MAX_ENTRIES] = [Hash::default(); MAX_ENTRIES]; @@ -44,7 +49,7 @@ mod tests { }); let account = create_account_with_data_and_fields((42, INITIAL_RENT_EPOCH), blocks); - let recent_blockhashes = from_account::(&account).unwrap(); + let recent_blockhashes = from_account::(&account).unwrap(); assert_eq!(recent_blockhashes.0, blocks); } diff --git a/sdk/src/secp256k1_instruction.rs b/sdk/src/secp256k1_instruction.rs index e4e773c536..24d2c0d64b 100644 --- a/sdk/src/secp256k1_instruction.rs +++ b/sdk/src/secp256k1_instruction.rs @@ -1,3 +1,4 @@ +#![allow(clippy::integer_arithmetic)] #![cfg(feature = "full")] use crate::instruction::Instruction; diff --git a/sdk/src/shred_version.rs b/sdk/src/shred_version.rs index de92687335..d253f30a33 100644 --- a/sdk/src/shred_version.rs +++ b/sdk/src/shred_version.rs @@ -15,6 +15,8 @@ pub fn version_from_hash(hash: &Hash) -> u16 { .for_each(|(accum, seed)| *accum ^= *seed) }); // convert accum into a u16 + // Because accum[0] is a u8, 8bit left shift of the u16 can never overflow + #[allow(clippy::integer_arithmetic)] let version = ((accum[0] as u16) << 8) | accum[1] as u16; // ensure version is never zero, to avoid looking like an uninitialized version diff --git a/sdk/src/signature.rs b/sdk/src/signature.rs index dfb779869a..725e6ffd88 100644 --- a/sdk/src/signature.rs +++ b/sdk/src/signature.rs @@ -1,62 +1,18 @@ //! The `signature` module provides functionality for public, and private keys. #![cfg(feature = "full")] -use crate::{pubkey::Pubkey, transaction::TransactionError}; -use ed25519_dalek::Signer as DalekSigner; +use crate::pubkey::Pubkey; use generic_array::{typenum::U64, GenericArray}; -use hmac::Hmac; -use itertools::Itertools; -use rand::{rngs::OsRng, CryptoRng, RngCore}; use std::{ borrow::{Borrow, Cow}, convert::TryInto, - error, fmt, - fs::{self, File, OpenOptions}, - io::{Read, Write}, - mem, - path::Path, + fmt, mem, str::FromStr, }; use thiserror::Error; -#[derive(Debug)] -pub struct Keypair(ed25519_dalek::Keypair); - -impl Keypair { - pub fn generate(csprng: &mut R) -> Self - where - R: CryptoRng + RngCore, - { - Self(ed25519_dalek::Keypair::generate(csprng)) - } - - /// Return a new ED25519 keypair - pub fn new() -> Self { - let mut rng = OsRng::default(); - Self::generate(&mut rng) - } - - pub fn from_bytes(bytes: &[u8]) -> Result { - ed25519_dalek::Keypair::from_bytes(bytes).map(Self) - } - - pub fn to_bytes(&self) -> [u8; 64] { - self.0.to_bytes() - } - - pub fn from_base58_string(s: &str) -> Self { - Self::from_bytes(&bs58::decode(s).into_vec().unwrap()).unwrap() - } - - pub fn to_base58_string(&self) -> String { - // Remove .iter() once we're rust 1.47+ - bs58::encode(&self.0.to_bytes().iter()).into_string() - } - - pub fn secret(&self) -> &ed25519_dalek::SecretKey { - &self.0.secret - } -} +// legacy module paths +pub use crate::signer::{keypair::*, null_signer::*, presigner::*, *}; /// Number of bytes in a signature pub const SIGNATURE_BYTES: usize = 64; @@ -157,346 +113,9 @@ impl FromStr for Signature { } } -pub trait Signer { - fn pubkey(&self) -> Pubkey { - self.try_pubkey().unwrap_or_default() - } - fn try_pubkey(&self) -> Result; - fn sign_message(&self, message: &[u8]) -> Signature { - self.try_sign_message(message).unwrap_or_default() - } - fn try_sign_message(&self, message: &[u8]) -> Result; -} - -impl PartialEq for dyn Signer { - fn eq(&self, other: &dyn Signer) -> bool { - self.pubkey() == other.pubkey() - } -} - -impl std::fmt::Debug for dyn Signer { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(fmt, "Signer: {:?}", self.pubkey()) - } -} - -/// Remove duplicates signers while preserving order. O(n²) -pub fn unique_signers(signers: Vec<&dyn Signer>) -> Vec<&dyn Signer> { - signers.into_iter().unique_by(|s| s.pubkey()).collect() -} - -impl Signer for Keypair { - /// Return the public key for the given keypair - fn pubkey(&self) -> Pubkey { - Pubkey::new(self.0.public.as_ref()) - } - - fn try_pubkey(&self) -> Result { - Ok(self.pubkey()) - } - - fn sign_message(&self, message: &[u8]) -> Signature { - Signature::new(&self.0.sign(message).to_bytes()) - } - - fn try_sign_message(&self, message: &[u8]) -> Result { - Ok(self.sign_message(message)) - } -} - -impl PartialEq for Keypair -where - T: Signer, -{ - fn eq(&self, other: &T) -> bool { - self.pubkey() == other.pubkey() - } -} - -impl From for Box -where - T: Signer + 'static, -{ - fn from(signer: T) -> Self { - Box::new(signer) - } -} - -#[derive(Debug, Error, PartialEq)] -pub enum SignerError { - #[error("keypair-pubkey mismatch")] - KeypairPubkeyMismatch, - - #[error("not enough signers")] - NotEnoughSigners, - - #[error("transaction error")] - TransactionError(#[from] TransactionError), - - #[error("custom error: {0}")] - Custom(String), - - // Presigner-specific Errors - #[error("presigner error")] - PresignerError(#[from] PresignerError), - - // Remote Keypair-specific Errors - #[error("connection error: {0}")] - Connection(String), - - #[error("invalid input: {0}")] - InvalidInput(String), - - #[error("no device found")] - NoDeviceFound, - - #[error("{0}")] - Protocol(String), - - #[error("{0}")] - UserCancel(String), -} - -#[derive(Clone, Debug, Default)] -pub struct Presigner { - pubkey: Pubkey, - signature: Signature, -} - -impl Presigner { - pub fn new(pubkey: &Pubkey, signature: &Signature) -> Self { - Self { - pubkey: *pubkey, - signature: *signature, - } - } -} - -#[derive(Debug, Error, PartialEq)] -pub enum PresignerError { - #[error("pre-generated signature cannot verify data")] - VerificationFailure, -} - -impl Signer for Presigner { - fn try_pubkey(&self) -> Result { - Ok(self.pubkey) - } - - fn try_sign_message(&self, message: &[u8]) -> Result { - if self.signature.verify(self.pubkey.as_ref(), message) { - Ok(self.signature) - } else { - Err(PresignerError::VerificationFailure.into()) - } - } -} - -impl PartialEq for Presigner -where - T: Signer, -{ - fn eq(&self, other: &T) -> bool { - self.pubkey() == other.pubkey() - } -} - -/// NullSigner - A `Signer` implementation that always produces `Signature::default()`. -/// Used as a placeholder for absentee signers whose 'Pubkey` is required to construct -/// the transaction -#[derive(Clone, Debug, Default)] -pub struct NullSigner { - pubkey: Pubkey, -} - -impl NullSigner { - pub fn new(pubkey: &Pubkey) -> Self { - Self { pubkey: *pubkey } - } -} - -impl Signer for NullSigner { - fn try_pubkey(&self) -> Result { - Ok(self.pubkey) - } - - fn try_sign_message(&self, _message: &[u8]) -> Result { - Ok(Signature::default()) - } -} - -impl PartialEq for NullSigner -where - T: Signer, -{ - fn eq(&self, other: &T) -> bool { - self.pubkey == other.pubkey() - } -} - -pub fn read_keypair(reader: &mut R) -> Result> { - let bytes: Vec = serde_json::from_reader(reader)?; - let dalek_keypair = ed25519_dalek::Keypair::from_bytes(&bytes) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?; - Ok(Keypair(dalek_keypair)) -} - -pub fn read_keypair_file>(path: F) -> Result> { - let mut file = File::open(path.as_ref())?; - read_keypair(&mut file) -} - -pub fn write_keypair( - keypair: &Keypair, - writer: &mut W, -) -> Result> { - let keypair_bytes = keypair.0.to_bytes(); - let serialized = serde_json::to_string(&keypair_bytes.to_vec())?; - writer.write_all(&serialized.clone().into_bytes())?; - Ok(serialized) -} - -pub fn write_keypair_file>( - keypair: &Keypair, - outfile: F, -) -> Result> { - let outfile = outfile.as_ref(); - - if let Some(outdir) = outfile.parent() { - fs::create_dir_all(outdir)?; - } - - let mut f = { - #[cfg(not(unix))] - { - OpenOptions::new() - } - #[cfg(unix)] - { - use std::os::unix::fs::OpenOptionsExt; - OpenOptions::new().mode(0o600) - } - } - .write(true) - .truncate(true) - .create(true) - .open(outfile)?; - - write_keypair(keypair, &mut f) -} - -pub fn keypair_from_seed(seed: &[u8]) -> Result> { - if seed.len() < ed25519_dalek::SECRET_KEY_LENGTH { - return Err("Seed is too short".into()); - } - let secret = ed25519_dalek::SecretKey::from_bytes(&seed[..ed25519_dalek::SECRET_KEY_LENGTH]) - .map_err(|e| e.to_string())?; - let public = ed25519_dalek::PublicKey::from(&secret); - let dalek_keypair = ed25519_dalek::Keypair { secret, public }; - Ok(Keypair(dalek_keypair)) -} - -pub fn keypair_from_seed_phrase_and_passphrase( - seed_phrase: &str, - passphrase: &str, -) -> Result> { - const PBKDF2_ROUNDS: u32 = 2048; - const PBKDF2_BYTES: usize = 64; - - let salt = format!("mnemonic{}", passphrase); - - let mut seed = vec![0u8; PBKDF2_BYTES]; - pbkdf2::pbkdf2::>( - seed_phrase.as_bytes(), - salt.as_bytes(), - PBKDF2_ROUNDS, - &mut seed, - ); - keypair_from_seed(&seed[..]) -} - #[cfg(test)] mod tests { use super::*; - use bip39::{Language, Mnemonic, MnemonicType, Seed}; - use std::mem; - - fn tmp_file_path(name: &str) -> String { - use std::env; - let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()); - let keypair = Keypair::new(); - - format!("{}/tmp/{}-{}", out_dir, name, keypair.pubkey()) - } - - #[test] - fn test_write_keypair_file() { - let outfile = tmp_file_path("test_write_keypair_file.json"); - let serialized_keypair = write_keypair_file(&Keypair::new(), &outfile).unwrap(); - let keypair_vec: Vec = serde_json::from_str(&serialized_keypair).unwrap(); - assert!(Path::new(&outfile).exists()); - assert_eq!( - keypair_vec, - read_keypair_file(&outfile).unwrap().0.to_bytes().to_vec() - ); - - #[cfg(unix)] - { - use std::os::unix::fs::PermissionsExt; - assert_eq!( - File::open(&outfile) - .expect("open") - .metadata() - .expect("metadata") - .permissions() - .mode() - & 0o777, - 0o600 - ); - } - - assert_eq!( - read_keypair_file(&outfile).unwrap().pubkey().as_ref().len(), - mem::size_of::() - ); - fs::remove_file(&outfile).unwrap(); - assert!(!Path::new(&outfile).exists()); - } - - #[test] - fn test_write_keypair_file_overwrite_ok() { - let outfile = tmp_file_path("test_write_keypair_file_overwrite_ok.json"); - - write_keypair_file(&Keypair::new(), &outfile).unwrap(); - write_keypair_file(&Keypair::new(), &outfile).unwrap(); - } - - #[test] - fn test_write_keypair_file_truncate() { - let outfile = tmp_file_path("test_write_keypair_file_truncate.json"); - - write_keypair_file(&Keypair::new(), &outfile).unwrap(); - read_keypair_file(&outfile).unwrap(); - - // Ensure outfile is truncated - { - let mut f = File::create(&outfile).unwrap(); - f.write_all(String::from_utf8([b'a'; 2048].to_vec()).unwrap().as_bytes()) - .unwrap(); - } - write_keypair_file(&Keypair::new(), &outfile).unwrap(); - read_keypair_file(&outfile).unwrap(); - } - - #[test] - fn test_keypair_from_seed() { - let good_seed = vec![0; 32]; - assert!(keypair_from_seed(&good_seed).is_ok()); - - let too_short_seed = vec![0; 31]; - assert!(keypair_from_seed(&too_short_seed).is_err()); - } - #[test] fn test_signature_fromstr() { let signature = Keypair::new().sign_message(&[0u8]); @@ -532,12 +151,7 @@ mod tests { // too long input string // longest valid encoding - let mut too_long: GenericArray = GenericArray::default(); - // *sigh* - for i in &mut too_long { - *i = 255u8; - } - let mut too_long = bs58::encode(too_long).into_string(); + let mut too_long = bs58::encode(&[255u8; SIGNATURE_BYTES]).into_string(); // and one to grow on too_long.push('1'); assert_eq!( @@ -546,73 +160,6 @@ mod tests { ); } - #[test] - fn test_keypair_from_seed_phrase_and_passphrase() { - let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); - let passphrase = "42"; - let seed = Seed::new(&mnemonic, passphrase); - let expected_keypair = keypair_from_seed(seed.as_bytes()).unwrap(); - let keypair = - keypair_from_seed_phrase_and_passphrase(mnemonic.phrase(), passphrase).unwrap(); - assert_eq!(keypair.pubkey(), expected_keypair.pubkey()); - } - - #[test] - fn test_keypair() { - let keypair = keypair_from_seed(&[0u8; 32]).unwrap(); - let pubkey = keypair.pubkey(); - let data = [1u8]; - let sig = keypair.sign_message(&data); - - // Signer - assert_eq!(keypair.try_pubkey().unwrap(), pubkey); - assert_eq!(keypair.pubkey(), pubkey); - assert_eq!(keypair.try_sign_message(&data).unwrap(), sig); - assert_eq!(keypair.sign_message(&data), sig); - - // PartialEq - let keypair2 = keypair_from_seed(&[0u8; 32]).unwrap(); - assert_eq!(keypair, keypair2); - } - - #[test] - fn test_presigner() { - let keypair = keypair_from_seed(&[0u8; 32]).unwrap(); - let pubkey = keypair.pubkey(); - let data = [1u8]; - let sig = keypair.sign_message(&data); - - // Signer - let presigner = Presigner::new(&pubkey, &sig); - assert_eq!(presigner.try_pubkey().unwrap(), pubkey); - assert_eq!(presigner.pubkey(), pubkey); - assert_eq!(presigner.try_sign_message(&data).unwrap(), sig); - assert_eq!(presigner.sign_message(&data), sig); - let bad_data = [2u8]; - assert!(presigner.try_sign_message(&bad_data).is_err()); - assert_eq!(presigner.sign_message(&bad_data), Signature::default()); - - // PartialEq - assert_eq!(presigner, keypair); - assert_eq!(keypair, presigner); - let presigner2 = Presigner::new(&pubkey, &sig); - assert_eq!(presigner, presigner2); - } - - fn pubkeys(signers: &[&dyn Signer]) -> Vec { - signers.iter().map(|x| x.pubkey()).collect() - } - - #[test] - fn test_unique_signers() { - let alice = Keypair::new(); - let bob = Keypair::new(); - assert_eq!( - pubkeys(&unique_signers(vec![&alice, &bob, &alice])), - pubkeys(&[&alice, &bob]) - ); - } - #[test] fn test_off_curve_pubkey_verify_fails() { // Golden point off the ed25519 curve diff --git a/sdk/src/signer/keypair.rs b/sdk/src/signer/keypair.rs new file mode 100644 index 0000000000..aa6f463256 --- /dev/null +++ b/sdk/src/signer/keypair.rs @@ -0,0 +1,326 @@ +#![cfg(feature = "full")] + +use { + crate::{ + derivation_path::DerivationPath, + pubkey::Pubkey, + signature::Signature, + signer::{Signer, SignerError}, + }, + ed25519_dalek::Signer as DalekSigner, + ed25519_dalek_bip32::Error as Bip32Error, + hmac::Hmac, + rand::{rngs::OsRng, CryptoRng, RngCore}, + std::{ + error, + fs::{self, File, OpenOptions}, + io::{Read, Write}, + path::Path, + }, +}; + +/// A vanilla Ed25519 key pair +#[derive(Debug)] +pub struct Keypair(ed25519_dalek::Keypair); + +impl Keypair { + /// Constructs a new, random `Keypair` using a caller-proveded RNG + pub fn generate(csprng: &mut R) -> Self + where + R: CryptoRng + RngCore, + { + Self(ed25519_dalek::Keypair::generate(csprng)) + } + + /// Constructs a new, random `Keypair` using `OsRng` + pub fn new() -> Self { + let mut rng = OsRng::default(); + Self::generate(&mut rng) + } + + /// Recovers a `Keypair` from a byte array + pub fn from_bytes(bytes: &[u8]) -> Result { + ed25519_dalek::Keypair::from_bytes(bytes).map(Self) + } + + /// Returns this `Keypair` as a byte array + pub fn to_bytes(&self) -> [u8; 64] { + self.0.to_bytes() + } + + /// Recovers a `Keypair` from a base58-encoded string + pub fn from_base58_string(s: &str) -> Self { + Self::from_bytes(&bs58::decode(s).into_vec().unwrap()).unwrap() + } + + /// Returns this `Keypair` as a base58-encoded string + pub fn to_base58_string(&self) -> String { + bs58::encode(&self.0.to_bytes()).into_string() + } + + /// Gets this `Keypair`'s SecretKey + pub fn secret(&self) -> &ed25519_dalek::SecretKey { + &self.0.secret + } +} + +impl Signer for Keypair { + fn pubkey(&self) -> Pubkey { + Pubkey::new(self.0.public.as_ref()) + } + + fn try_pubkey(&self) -> Result { + Ok(self.pubkey()) + } + + fn sign_message(&self, message: &[u8]) -> Signature { + Signature::new(&self.0.sign(message).to_bytes()) + } + + fn try_sign_message(&self, message: &[u8]) -> Result { + Ok(self.sign_message(message)) + } +} + +impl PartialEq for Keypair +where + T: Signer, +{ + fn eq(&self, other: &T) -> bool { + self.pubkey() == other.pubkey() + } +} + +/// Reads a JSON-encoded `Keypair` from a `Reader` implementor +pub fn read_keypair(reader: &mut R) -> Result> { + let bytes: Vec = serde_json::from_reader(reader)?; + let dalek_keypair = ed25519_dalek::Keypair::from_bytes(&bytes) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?; + Ok(Keypair(dalek_keypair)) +} + +/// Reads a `Keypair` from a file +pub fn read_keypair_file>(path: F) -> Result> { + let mut file = File::open(path.as_ref())?; + read_keypair(&mut file) +} + +/// Writes a `Keypair` to a `Write` implementor with JSON-encoding +pub fn write_keypair( + keypair: &Keypair, + writer: &mut W, +) -> Result> { + let keypair_bytes = keypair.0.to_bytes(); + let serialized = serde_json::to_string(&keypair_bytes.to_vec())?; + writer.write_all(&serialized.clone().into_bytes())?; + Ok(serialized) +} + +/// Writes a `Keypair` to a file with JSON-encoding +pub fn write_keypair_file>( + keypair: &Keypair, + outfile: F, +) -> Result> { + let outfile = outfile.as_ref(); + + if let Some(outdir) = outfile.parent() { + fs::create_dir_all(outdir)?; + } + + let mut f = { + #[cfg(not(unix))] + { + OpenOptions::new() + } + #[cfg(unix)] + { + use std::os::unix::fs::OpenOptionsExt; + OpenOptions::new().mode(0o600) + } + } + .write(true) + .truncate(true) + .create(true) + .open(outfile)?; + + write_keypair(keypair, &mut f) +} + +/// Constructs a `Keypair` from caller-provided seed entropy +pub fn keypair_from_seed(seed: &[u8]) -> Result> { + if seed.len() < ed25519_dalek::SECRET_KEY_LENGTH { + return Err("Seed is too short".into()); + } + let secret = ed25519_dalek::SecretKey::from_bytes(&seed[..ed25519_dalek::SECRET_KEY_LENGTH]) + .map_err(|e| e.to_string())?; + let public = ed25519_dalek::PublicKey::from(&secret); + let dalek_keypair = ed25519_dalek::Keypair { secret, public }; + Ok(Keypair(dalek_keypair)) +} + +/// Generates a Keypair using Bip32 Hierarchical Derivation if derivation-path is provided; +/// otherwise generates the base Bip44 Solana keypair from the seed +pub fn keypair_from_seed_and_derivation_path( + seed: &[u8], + derivation_path: Option, +) -> Result> { + let derivation_path = derivation_path.unwrap_or_else(DerivationPath::default); + bip32_derived_keypair(seed, derivation_path).map_err(|err| err.to_string().into()) +} + +/// Generates a Keypair using Bip32 Hierarchical Derivation +fn bip32_derived_keypair( + seed: &[u8], + derivation_path: DerivationPath, +) -> Result { + let extended = ed25519_dalek_bip32::ExtendedSecretKey::from_seed(seed) + .and_then(|extended| extended.derive(&derivation_path))?; + let extended_public_key = extended.public_key(); + Ok(Keypair(ed25519_dalek::Keypair { + secret: extended.secret_key, + public: extended_public_key, + })) +} + +pub fn generate_seed_from_seed_phrase_and_passphrase( + seed_phrase: &str, + passphrase: &str, +) -> Vec { + const PBKDF2_ROUNDS: u32 = 2048; + const PBKDF2_BYTES: usize = 64; + + let salt = format!("mnemonic{}", passphrase); + + let mut seed = vec![0u8; PBKDF2_BYTES]; + pbkdf2::pbkdf2::>( + seed_phrase.as_bytes(), + salt.as_bytes(), + PBKDF2_ROUNDS, + &mut seed, + ); + seed +} + +pub fn keypair_from_seed_phrase_and_passphrase( + seed_phrase: &str, + passphrase: &str, +) -> Result> { + keypair_from_seed(&generate_seed_from_seed_phrase_and_passphrase( + seed_phrase, + passphrase, + )) +} + +#[cfg(test)] +mod tests { + use { + super::*, + bip39::{Language, Mnemonic, MnemonicType, Seed}, + std::mem, + }; + + fn tmp_file_path(name: &str) -> String { + use std::env; + let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()); + let keypair = Keypair::new(); + + format!("{}/tmp/{}-{}", out_dir, name, keypair.pubkey()) + } + + #[test] + fn test_write_keypair_file() { + let outfile = tmp_file_path("test_write_keypair_file.json"); + let serialized_keypair = write_keypair_file(&Keypair::new(), &outfile).unwrap(); + let keypair_vec: Vec = serde_json::from_str(&serialized_keypair).unwrap(); + assert!(Path::new(&outfile).exists()); + assert_eq!( + keypair_vec, + read_keypair_file(&outfile).unwrap().0.to_bytes().to_vec() + ); + + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + assert_eq!( + File::open(&outfile) + .expect("open") + .metadata() + .expect("metadata") + .permissions() + .mode() + & 0o777, + 0o600 + ); + } + + assert_eq!( + read_keypair_file(&outfile).unwrap().pubkey().as_ref().len(), + mem::size_of::() + ); + fs::remove_file(&outfile).unwrap(); + assert!(!Path::new(&outfile).exists()); + } + + #[test] + fn test_write_keypair_file_overwrite_ok() { + let outfile = tmp_file_path("test_write_keypair_file_overwrite_ok.json"); + + write_keypair_file(&Keypair::new(), &outfile).unwrap(); + write_keypair_file(&Keypair::new(), &outfile).unwrap(); + } + + #[test] + fn test_write_keypair_file_truncate() { + let outfile = tmp_file_path("test_write_keypair_file_truncate.json"); + + write_keypair_file(&Keypair::new(), &outfile).unwrap(); + read_keypair_file(&outfile).unwrap(); + + // Ensure outfile is truncated + { + let mut f = File::create(&outfile).unwrap(); + f.write_all(String::from_utf8([b'a'; 2048].to_vec()).unwrap().as_bytes()) + .unwrap(); + } + write_keypair_file(&Keypair::new(), &outfile).unwrap(); + read_keypair_file(&outfile).unwrap(); + } + + #[test] + fn test_keypair_from_seed() { + let good_seed = vec![0; 32]; + assert!(keypair_from_seed(&good_seed).is_ok()); + + let too_short_seed = vec![0; 31]; + assert!(keypair_from_seed(&too_short_seed).is_err()); + } + + #[test] + fn test_keypair_from_seed_phrase_and_passphrase() { + let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); + let passphrase = "42"; + let seed = Seed::new(&mnemonic, passphrase); + let expected_keypair = keypair_from_seed(seed.as_bytes()).unwrap(); + let keypair = + keypair_from_seed_phrase_and_passphrase(mnemonic.phrase(), passphrase).unwrap(); + assert_eq!(keypair.pubkey(), expected_keypair.pubkey()); + } + + #[test] + fn test_keypair() { + let keypair = keypair_from_seed(&[0u8; 32]).unwrap(); + let pubkey = keypair.pubkey(); + let data = [1u8]; + let sig = keypair.sign_message(&data); + + // Signer + assert_eq!(keypair.try_pubkey().unwrap(), pubkey); + assert_eq!(keypair.pubkey(), pubkey); + assert_eq!(keypair.try_sign_message(&data).unwrap(), sig); + assert_eq!(keypair.sign_message(&data), sig); + + // PartialEq + let keypair2 = keypair_from_seed(&[0u8; 32]).unwrap(); + assert_eq!(keypair, keypair2); + } +} diff --git a/sdk/src/signer/mod.rs b/sdk/src/signer/mod.rs new file mode 100644 index 0000000000..bf7dcc52e0 --- /dev/null +++ b/sdk/src/signer/mod.rs @@ -0,0 +1,117 @@ +#![cfg(feature = "full")] + +use { + crate::{ + pubkey::Pubkey, + signature::{PresignerError, Signature}, + transaction::TransactionError, + }, + itertools::Itertools, + thiserror::Error, +}; + +pub mod keypair; +pub mod null_signer; +pub mod presigner; +pub mod signers; + +#[derive(Debug, Error, PartialEq)] +pub enum SignerError { + #[error("keypair-pubkey mismatch")] + KeypairPubkeyMismatch, + + #[error("not enough signers")] + NotEnoughSigners, + + #[error("transaction error")] + TransactionError(#[from] TransactionError), + + #[error("custom error: {0}")] + Custom(String), + + // Presigner-specific Errors + #[error("presigner error")] + PresignerError(#[from] PresignerError), + + // Remote Keypair-specific Errors + #[error("connection error: {0}")] + Connection(String), + + #[error("invalid input: {0}")] + InvalidInput(String), + + #[error("no device found")] + NoDeviceFound, + + #[error("{0}")] + Protocol(String), + + #[error("{0}")] + UserCancel(String), +} + +/// The `Signer` trait declares operations that all digital signature providers +/// must support. It is the primary interface by which signers are specified in +/// `Transaction` signing interfaces +pub trait Signer { + /// Infallibly gets the implementor's public key. Returns the all-zeros + /// `Pubkey` if the implementor has none. + fn pubkey(&self) -> Pubkey { + self.try_pubkey().unwrap_or_default() + } + /// Fallibly gets the implementor's public key + fn try_pubkey(&self) -> Result; + /// Infallibly produces an Ed25519 signature over the provided `message` + /// bytes. Returns the all-zeros `Signature` if signing is not possible. + fn sign_message(&self, message: &[u8]) -> Signature { + self.try_sign_message(message).unwrap_or_default() + } + /// Fallibly produces an Ed25519 signature over the provided `message` bytes. + fn try_sign_message(&self, message: &[u8]) -> Result; +} + +impl From for Box +where + T: Signer + 'static, +{ + fn from(signer: T) -> Self { + Box::new(signer) + } +} + +impl PartialEq for dyn Signer { + fn eq(&self, other: &dyn Signer) -> bool { + self.pubkey() == other.pubkey() + } +} + +impl std::fmt::Debug for dyn Signer { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(fmt, "Signer: {:?}", self.pubkey()) + } +} + +/// Removes duplicate signers while preserving order. O(n²) +pub fn unique_signers(signers: Vec<&dyn Signer>) -> Vec<&dyn Signer> { + signers.into_iter().unique_by(|s| s.pubkey()).collect() +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::signer::keypair::Keypair; + + fn pubkeys(signers: &[&dyn Signer]) -> Vec { + signers.iter().map(|x| x.pubkey()).collect() + } + + #[test] + fn test_unique_signers() { + let alice = Keypair::new(); + let bob = Keypair::new(); + assert_eq!( + pubkeys(&unique_signers(vec![&alice, &bob, &alice])), + pubkeys(&[&alice, &bob]) + ); + } +} diff --git a/sdk/src/signer/null_signer.rs b/sdk/src/signer/null_signer.rs new file mode 100644 index 0000000000..d5b5ccd38b --- /dev/null +++ b/sdk/src/signer/null_signer.rs @@ -0,0 +1,40 @@ +#![cfg(feature = "full")] + +use crate::{ + pubkey::Pubkey, + signature::Signature, + signer::{Signer, SignerError}, +}; + +/// NullSigner - A `Signer` implementation that always produces `Signature::default()`. +/// Used as a placeholder for absentee signers whose 'Pubkey` is required to construct +/// the transaction +#[derive(Clone, Debug, Default)] +pub struct NullSigner { + pubkey: Pubkey, +} + +impl NullSigner { + pub fn new(pubkey: &Pubkey) -> Self { + Self { pubkey: *pubkey } + } +} + +impl Signer for NullSigner { + fn try_pubkey(&self) -> Result { + Ok(self.pubkey) + } + + fn try_sign_message(&self, _message: &[u8]) -> Result { + Ok(Signature::default()) + } +} + +impl PartialEq for NullSigner +where + T: Signer, +{ + fn eq(&self, other: &T) -> bool { + self.pubkey == other.pubkey() + } +} diff --git a/sdk/src/signer/presigner.rs b/sdk/src/signer/presigner.rs new file mode 100644 index 0000000000..4b9c12c11a --- /dev/null +++ b/sdk/src/signer/presigner.rs @@ -0,0 +1,88 @@ +#![cfg(feature = "full")] + +use { + crate::{ + pubkey::Pubkey, + signature::Signature, + signer::{Signer, SignerError}, + }, + thiserror::Error, +}; + +/// A `Signer` implementation that represents a `Signature` that has been +/// constructed externally. Performs a signature verification against the +/// expected message upon `sign()` requests to affirm its relationship to +/// the `message` bytes +#[derive(Clone, Debug, Default)] +pub struct Presigner { + pubkey: Pubkey, + signature: Signature, +} + +impl Presigner { + pub fn new(pubkey: &Pubkey, signature: &Signature) -> Self { + Self { + pubkey: *pubkey, + signature: *signature, + } + } +} + +#[derive(Debug, Error, PartialEq)] +pub enum PresignerError { + #[error("pre-generated signature cannot verify data")] + VerificationFailure, +} + +impl Signer for Presigner { + fn try_pubkey(&self) -> Result { + Ok(self.pubkey) + } + + fn try_sign_message(&self, message: &[u8]) -> Result { + if self.signature.verify(self.pubkey.as_ref(), message) { + Ok(self.signature) + } else { + Err(PresignerError::VerificationFailure.into()) + } + } +} + +impl PartialEq for Presigner +where + T: Signer, +{ + fn eq(&self, other: &T) -> bool { + self.pubkey() == other.pubkey() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::signer::keypair::keypair_from_seed; + + #[test] + fn test_presigner() { + let keypair = keypair_from_seed(&[0u8; 32]).unwrap(); + let pubkey = keypair.pubkey(); + let data = [1u8]; + let sig = keypair.sign_message(&data); + + // Signer + let presigner = Presigner::new(&pubkey, &sig); + assert_eq!(presigner.try_pubkey().unwrap(), pubkey); + assert_eq!(presigner.pubkey(), pubkey); + assert_eq!(presigner.try_sign_message(&data).unwrap(), sig); + assert_eq!(presigner.sign_message(&data), sig); + let bad_data = [2u8]; + assert!(presigner.try_sign_message(&bad_data).is_err()); + assert_eq!(presigner.sign_message(&bad_data), Signature::default()); + + // PartialEq + assert_eq!(presigner, keypair); + assert_eq!(keypair, presigner); + let presigner2 = Presigner::new(&pubkey, &sig); + assert_eq!(presigner, presigner2); + } +} diff --git a/sdk/src/signers.rs b/sdk/src/signer/signers.rs similarity index 98% rename from sdk/src/signers.rs rename to sdk/src/signer/signers.rs index e25b6a059d..abc0772096 100644 --- a/sdk/src/signers.rs +++ b/sdk/src/signer/signers.rs @@ -4,6 +4,7 @@ use crate::{ signature::{Signature, Signer, SignerError}, }; +/// Convenience trait for working with mixed collections of `Signer`s pub trait Signers { fn pubkeys(&self) -> Vec; fn try_pubkeys(&self) -> Result, SignerError>; diff --git a/sdk/src/stake_weighted_timestamp.rs b/sdk/src/stake_weighted_timestamp.rs index 620583c887..44478e2856 100644 --- a/sdk/src/stake_weighted_timestamp.rs +++ b/sdk/src/stake_weighted_timestamp.rs @@ -1,6 +1,7 @@ /// A helper for calculating a stake-weighted timestamp estimate from a set of timestamps and epoch /// stake. use solana_sdk::{ + arithmetic::SaturatingArithmetic, clock::{Slot, UnixTimestamp}, pubkey::Pubkey, }; @@ -40,29 +41,29 @@ where V: Borrow<(Slot, UnixTimestamp)>, { let mut stake_per_timestamp: BTreeMap = BTreeMap::new(); - let mut total_stake = 0; + let mut total_stake: u128 = 0; for (vote_pubkey, slot_timestamp) in unique_timestamps { let (timestamp_slot, timestamp) = slot_timestamp.borrow(); - let offset = slot.saturating_sub(*timestamp_slot) as u32 * slot_duration; - let estimate = timestamp + offset.as_secs() as i64; + let offset = slot_duration.sol_saturating_mul(slot.saturating_sub(*timestamp_slot) as u32); + let estimate = timestamp.saturating_add(offset.as_secs() as i64); let stake = stakes .get(vote_pubkey.borrow()) .map(|(stake, _account)| stake) .unwrap_or(&0); stake_per_timestamp .entry(estimate) - .and_modify(|stake_sum| *stake_sum += *stake as u128) + .and_modify(|stake_sum| *stake_sum = stake_sum.saturating_add(*stake as u128)) .or_insert(*stake as u128); - total_stake += *stake as u128; + total_stake = total_stake.saturating_add(*stake as u128); } if total_stake == 0 { return None; } - let mut stake_accumulator = 0; + let mut stake_accumulator: u128 = 0; let mut estimate = 0; // Populate `estimate` with stake-weighted median timestamp for (timestamp, stake) in stake_per_timestamp.into_iter() { - stake_accumulator += stake; + stake_accumulator = stake_accumulator.saturating_add(stake); if stake_accumulator > total_stake / 2 { estimate = timestamp; break; @@ -70,29 +71,33 @@ where } // Bound estimate by `max_allowable_drift` since the start of the epoch if let Some((epoch_start_slot, epoch_start_timestamp)) = epoch_start_timestamp { - let poh_estimate_offset = slot.saturating_sub(epoch_start_slot) as u32 * slot_duration; + let poh_estimate_offset = + slot_duration.sol_saturating_mul(slot.saturating_sub(epoch_start_slot) as u32); let estimate_offset = Duration::from_secs(if fix_estimate_into_u64 { (estimate as u64).saturating_sub(epoch_start_timestamp as u64) } else { estimate.saturating_sub(epoch_start_timestamp) as u64 }); - let max_allowable_drift_fast = poh_estimate_offset * max_allowable_drift.fast / 100; - let max_allowable_drift_slow = poh_estimate_offset * max_allowable_drift.slow / 100; + let max_allowable_drift_fast = + poh_estimate_offset.sol_saturating_mul(max_allowable_drift.fast) / 100; + let max_allowable_drift_slow = + poh_estimate_offset.sol_saturating_mul(max_allowable_drift.slow) / 100; if estimate_offset > poh_estimate_offset - && estimate_offset - poh_estimate_offset > max_allowable_drift_slow + && estimate_offset.sol_saturating_sub(poh_estimate_offset) > max_allowable_drift_slow { // estimate offset since the start of the epoch is higher than // `MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW` estimate = epoch_start_timestamp - + poh_estimate_offset.as_secs() as i64 - + max_allowable_drift_slow.as_secs() as i64; + .saturating_add(poh_estimate_offset.as_secs() as i64) + .saturating_add(max_allowable_drift_slow.as_secs() as i64); } else if estimate_offset < poh_estimate_offset - && poh_estimate_offset - estimate_offset > max_allowable_drift_fast + && poh_estimate_offset.sol_saturating_sub(estimate_offset) > max_allowable_drift_fast { // estimate offset since the start of the epoch is lower than // `MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST` - estimate = epoch_start_timestamp + poh_estimate_offset.as_secs() as i64 - - max_allowable_drift_fast.as_secs() as i64; + estimate = epoch_start_timestamp + .saturating_add(poh_estimate_offset.as_secs() as i64) + .saturating_sub(max_allowable_drift_fast.as_secs() as i64); } } Some(estimate) diff --git a/sdk/src/timing.rs b/sdk/src/timing.rs index 17625382ea..adb0259c27 100644 --- a/sdk/src/timing.rs +++ b/sdk/src/timing.rs @@ -1,3 +1,4 @@ +#![allow(clippy::integer_arithmetic)] //! The `timing` module provides std::time utility functions. use std::time::{Duration, SystemTime, UNIX_EPOCH}; diff --git a/sdk/src/transaction.rs b/sdk/src/transaction.rs index 3e2b778541..077e1b21cd 100644 --- a/sdk/src/transaction.rs +++ b/sdk/src/transaction.rs @@ -94,6 +94,10 @@ pub enum TransactionError { #[error("Transactions are currently disabled due to cluster maintenance")] ClusterMaintenance, + + /// Transaction processing left an account with an outstanding borrowed reference + #[error("Transaction processing left an account with an outstanding borrowed reference")] + AccountBorrowOutstanding, } pub type Result = result::Result; @@ -143,6 +147,11 @@ impl Transaction { Self::new_unsigned(message) } + /// Create a signed transaction with the given payer. + /// + /// # Panics + /// + /// Panics when signing fails. pub fn new_signed_with_payer( instructions: &[Instruction], payer: Option<&Pubkey>, @@ -153,6 +162,11 @@ impl Transaction { Self::new(signing_keypairs, message, recent_blockhash) } + /// Create a signed transaction. + /// + /// # Panics + /// + /// Panics when signing fails. pub fn new( from_keypairs: &T, message: Message, @@ -170,6 +184,10 @@ impl Transaction { /// * `recent_blockhash` - The PoH hash. /// * `program_ids` - The keys that identify programs used in the `instruction` vector. /// * `instructions` - Instructions that will be executed atomically. + /// + /// # Panics + /// + /// Panics when signing fails. pub fn new_with_compiled_instructions( from_keypairs: &T, keys: &[Pubkey], @@ -230,6 +248,10 @@ impl Transaction { } /// Check keys and keypair lengths, then sign this transaction. + /// + /// # Panics + /// + /// Panics when signing fails, use [`Transaction::try_sign`] to handle the error. pub fn sign(&mut self, keypairs: &T, recent_blockhash: Hash) { if let Err(e) = self.try_sign(keypairs, recent_blockhash) { panic!("Transaction::sign failed with error {:?}", e); @@ -239,6 +261,10 @@ impl Transaction { /// Sign using some subset of required keys /// if recent_blockhash is not the same as currently in the transaction, /// clear any prior signatures and update recent_blockhash + /// + /// # Panics + /// + /// Panics when signing fails, use [`Transaction::try_partial_sign`] to handle the error. pub fn partial_sign(&mut self, keypairs: &T, recent_blockhash: Hash) { if let Err(e) = self.try_partial_sign(keypairs, recent_blockhash) { panic!("Transaction::partial_sign failed with error {:?}", e); @@ -247,6 +273,10 @@ impl Transaction { /// Sign the transaction and place the signatures in their associated positions in `signatures` /// without checking that the positions are correct. + /// + /// # Panics + /// + /// Panics when signing fails, use [`Transaction::try_partial_sign_unchecked`] to handle the error. pub fn partial_sign_unchecked( &mut self, keypairs: &T, @@ -538,7 +568,7 @@ mod tests { let key = Keypair::new(); let id0 = Pubkey::default(); let program_id = solana_sdk::pubkey::new_rand(); - let ix = Instruction::new( + let ix = Instruction::new_with_bincode( program_id, &0, vec![ @@ -615,7 +645,8 @@ mod tests { AccountMeta::new(keypair.pubkey(), true), AccountMeta::new(to, false), ]; - let instruction = Instruction::new(program_id, &(1u8, 2u8, 3u8), account_metas); + let instruction = + Instruction::new_with_bincode(program_id, &(1u8, 2u8, 3u8), account_metas); let message = Message::new(&[instruction], Some(&keypair.pubkey())); Transaction::new(&[&keypair], message, Hash::default()) } @@ -712,7 +743,7 @@ mod tests { fn test_partial_sign_mismatched_key() { let keypair = Keypair::new(); let fee_payer = solana_sdk::pubkey::new_rand(); - let ix = Instruction::new( + let ix = Instruction::new_with_bincode( Pubkey::default(), &0, vec![AccountMeta::new(fee_payer, true)], @@ -726,7 +757,7 @@ mod tests { let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); - let ix = Instruction::new( + let ix = Instruction::new_with_bincode( Pubkey::default(), &0, vec![ @@ -756,7 +787,7 @@ mod tests { let program_id = Pubkey::default(); let keypair0 = Keypair::new(); let id0 = keypair0.pubkey(); - let ix = Instruction::new(program_id, &0, vec![AccountMeta::new(id0, true)]); + let ix = Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, true)]); let message = Message::new(&[ix], Some(&id0)); Transaction::new_unsigned(message).sign(&Vec::<&Keypair>::new(), Hash::default()); } @@ -767,7 +798,8 @@ mod tests { let program_id = Pubkey::default(); let keypair0 = Keypair::new(); let wrong_id = Pubkey::default(); - let ix = Instruction::new(program_id, &0, vec![AccountMeta::new(wrong_id, true)]); + let ix = + Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(wrong_id, true)]); let message = Message::new(&[ix], Some(&wrong_id)); Transaction::new_unsigned(message).sign(&[&keypair0], Hash::default()); } @@ -777,7 +809,7 @@ mod tests { let program_id = Pubkey::default(); let keypair0 = Keypair::new(); let id0 = keypair0.pubkey(); - let ix = Instruction::new(program_id, &0, vec![AccountMeta::new(id0, true)]); + let ix = Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, true)]); let message = Message::new(&[ix], Some(&id0)); let mut tx = Transaction::new_unsigned(message); tx.sign(&[&keypair0], Hash::default()); @@ -794,7 +826,7 @@ mod tests { let keypair0 = Keypair::new(); let id0 = keypair0.pubkey(); let id1 = solana_sdk::pubkey::new_rand(); - let ix = Instruction::new( + let ix = Instruction::new_with_bincode( program_id, &0, vec![ @@ -822,7 +854,7 @@ mod tests { let presigner_keypair = Keypair::new(); let presigner_pubkey = presigner_keypair.pubkey(); - let ix = Instruction::new( + let ix = Instruction::new_with_bincode( program_id, &0, vec![ @@ -845,7 +877,7 @@ mod tests { // Wrong key should error, not panic let another_pubkey = solana_sdk::pubkey::new_rand(); - let ix = Instruction::new( + let ix = Instruction::new_with_bincode( program_id, &0, vec![ diff --git a/stake-accounts/Cargo.toml b/stake-accounts/Cargo.toml index d74a161588..8e83c5d739 100644 --- a/stake-accounts/Cargo.toml +++ b/stake-accounts/Cargo.toml @@ -3,7 +3,7 @@ name = "solana-stake-accounts" description = "Blockchain, Rebuilt for Scale" authors = ["Solana Maintainers "] edition = "2018" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -11,15 +11,15 @@ documentation = "https://docs.rs/solana-stake-accounts" [dependencies] clap = "2.33.1" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-cli-config = { path = "../cli-config", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-cli-config = { path = "../cli-config", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } [dev-dependencies] -solana-runtime = { path = "../runtime", version = "=1.5.19" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/stake-accounts/src/arg_parser.rs b/stake-accounts/src/arg_parser.rs index 0fed87d115..ac481fbbb4 100644 --- a/stake-accounts/src/arg_parser.rs +++ b/stake-accounts/src/arg_parser.rs @@ -149,7 +149,7 @@ where .global(true) .takes_value(true) .value_name("URL") - .help("RPC entrypoint address. i.e. http://devnet.solana.com"), + .help("RPC entrypoint address. i.e. http://api.devnet.solana.com"), ) .subcommand( SubCommand::with_name("new") diff --git a/stake-accounts/src/stake_accounts.rs b/stake-accounts/src/stake_accounts.rs index 563d325292..c85294b735 100644 --- a/stake-accounts/src/stake_accounts.rs +++ b/stake-accounts/src/stake_accounts.rs @@ -280,7 +280,7 @@ mod tests { use super::*; use solana_runtime::{bank::Bank, bank_client::BankClient}; use solana_sdk::{ - account::Account, + account::AccountSharedData, client::SyncClient, genesis_config::create_genesis_config, signature::{Keypair, Signer}, @@ -308,9 +308,13 @@ mod tests { fee_payer_keypair } - fn get_account_at(client: &C, base_pubkey: &Pubkey, i: usize) -> Account { + fn get_account_at( + client: &C, + base_pubkey: &Pubkey, + i: usize, + ) -> AccountSharedData { let account_address = derive_stake_account_address(&base_pubkey, i); - client.get_account(&account_address).unwrap().unwrap() + AccountSharedData::from(client.get_account(&account_address).unwrap().unwrap()) } fn get_balances( @@ -334,7 +338,8 @@ mod tests { (0..num_accounts) .map(|i| { let address = derive_stake_account_address(&base_pubkey, i); - let account = client.get_account(&address).unwrap().unwrap(); + let account = + AccountSharedData::from(client.get_account(&address).unwrap().unwrap()); (address, StakeState::lockup_from(&account).unwrap()) }) .collect() diff --git a/stake-monitor/Cargo.toml b/stake-monitor/Cargo.toml index 2698dccc9d..a5019258a6 100644 --- a/stake-monitor/Cargo.toml +++ b/stake-monitor/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-stake-monitor" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,23 +13,22 @@ documentation = "https://docs.rs/solana-stake-monitor" clap = "2.33.1" console = "0.11.3" log = "0.4.11" -serde = "1.0.118" +serde = "1.0.122" serde_yaml = "0.8.13" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-cli-config = { path = "../cli-config", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } -solana-transaction-status = { path = "../transaction-status", version = "=1.5.19" } -solana-version = { path = "../version" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-cli-config = { path = "../cli-config", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } +solana-transaction-status = { path = "../transaction-status", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } [dev-dependencies] serial_test = "0.4.0" -serial_test_derive = "0.4.0" -solana-local-cluster = { path = "../local-cluster", version = "=1.5.19" } -solana-core = { path = "../core", version = "=1.5.19" } +solana-local-cluster = { path = "../local-cluster", version = "=1.6.14" } +solana-core = { path = "../core", version = "=1.6.14" } tempfile = "3.1.0" [[bin]] diff --git a/stake-monitor/src/lib.rs b/stake-monitor/src/lib.rs index 693714829c..9c2ddd36df 100644 --- a/stake-monitor/src/lib.rs +++ b/stake-monitor/src/lib.rs @@ -358,7 +358,7 @@ pub fn process_slots(rpc_client: &RpcClient, accounts_info: &mut AccountsInfo, b #[cfg(test)] mod test { use super::*; - use serial_test_derive::serial; + use serial_test::serial; use solana_client::rpc_config::RpcSendTransactionConfig; use solana_core::{rpc::JsonRpcConfig, validator::ValidatorConfig}; use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster}; diff --git a/stake-o-matic/Cargo.toml b/stake-o-matic/Cargo.toml index 95e458f927..1d5893e3ab 100644 --- a/stake-o-matic/Cargo.toml +++ b/stake-o-matic/Cargo.toml @@ -7,22 +7,27 @@ documentation = "https://docs.rs/" license = "Apache-2.0" name = "solana-stake-o-matic" repository = "https://github.com/solana-labs/stake-o-matic" -version = "1.5.19" +version = "1.6.14" [dependencies] clap = "2.33.0" log = "0.4.11" +reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] } +semver = "0.9.0" +serde = { version = "1.0.122", features = ["derive"] } +serde_json = "1.0.62" serde_yaml = "0.8.13" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-cli-config = { path = "../cli-config", version = "=1.5.19" } -solana-cli-output = { path = "../cli-output", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } -solana-notifier = { path = "../notifier", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } -solana-transaction-status = { path = "../transaction-status", version = "=1.5.19" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-cli-config = { path = "../cli-config", version = "=1.6.14" } +solana-cli-output = { path = "../cli-output", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } +solana-notifier = { path = "../notifier", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } +solana-transaction-status = { path = "../transaction-status", version = "=1.6.14" } +thiserror = "1.0.21" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/stake-o-matic/src/confirmed_block_cache.rs b/stake-o-matic/src/confirmed_block_cache.rs new file mode 100644 index 0000000000..aed22fcca9 --- /dev/null +++ b/stake-o-matic/src/confirmed_block_cache.rs @@ -0,0 +1,248 @@ +use crate::retry_rpc_operation; +use log::*; +use serde::{Deserialize, Serialize}; +use solana_client::rpc_client::RpcClient; +use solana_sdk::{ + clock::{Slot, DEFAULT_SLOTS_PER_EPOCH}, + commitment_config::CommitmentConfig, + epoch_info::EpochInfo, +}; +use std::{ + cell::RefCell, + fs::{self, File, OpenOptions}, + io, + ops::Range, + path::{Path, PathBuf}, +}; + +#[derive(Clone, Debug, Default)] +struct Entry { + slots: Range, + path: PathBuf, +} + +impl Entry { + pub fn new>(base_path: P, slots: Range) -> Self { + let file_name = format!("{}-{}.json", slots.start, slots.end); + let path = base_path.as_ref().join(file_name); + Self { slots, path } + } + + fn parse_filename>(filename: F) -> Option> { + let filename = filename.as_ref(); + let slot_range = filename.file_stem(); + let extension = filename.extension(); + extension + .zip(slot_range) + .and_then(|(extension, slot_range)| { + if extension == "json" { + slot_range.to_str() + } else { + None + } + }) + .and_then(|slot_range| { + let mut parts = slot_range.splitn(2, '-'); + let start = parts.next().and_then(|p| p.parse::().ok()); + let end = parts.next().and_then(|p| p.parse::().ok()); + start.zip(end).map(|(start, end)| start..end) + }) + } + + pub fn from_pathbuf(path: PathBuf) -> Option { + path.file_name() + .and_then(|n| n.to_str()) + .and_then(Self::parse_filename) + .map(|slots| Self { slots, path }) + } + + pub fn path(&self) -> &Path { + &self.path + } +} + +const CACHE_VERSION: u64 = 0; +const DEFAULT_SLOTS_PER_ENTRY: u64 = 2500; +const DEFAULT_MAX_CACHED_SLOTS: u64 = 5 * DEFAULT_SLOTS_PER_EPOCH; +const CONFIG_FILENAME: &str = "config.yaml"; + +#[derive(Debug, Deserialize, Serialize)] +struct Config { + version: u64, + slots_per_chunk: u64, + max_cached_slots: u64, +} + +impl Default for Config { + fn default() -> Self { + Self { + version: CACHE_VERSION, + slots_per_chunk: DEFAULT_SLOTS_PER_ENTRY, + max_cached_slots: DEFAULT_MAX_CACHED_SLOTS, + } + } +} + +pub struct ConfirmedBlockCache { + rpc_client: RpcClient, + base_path: PathBuf, + entries: RefCell>, + config: Config, +} + +impl ConfirmedBlockCache { + fn store_config>(config_path: P, config: &Config) -> io::Result<()> { + let config_path = config_path.as_ref(); + let file = File::create(config_path)?; + serde_yaml::to_writer(file, config).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!( + "error: cannot store config `{}`: {:?}", + config_path.to_string_lossy(), + e, + ), + ) + }) + } + + fn load_config>(config_path: P) -> io::Result { + let config_path = config_path.as_ref(); + let file = File::open(config_path)?; + serde_yaml::from_reader(file).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!( + "error: cannot load config `{}`: {:?}", + config_path.to_string_lossy(), + e, + ), + ) + }) + } + + pub fn open, U: AsRef>(path: P, rpc_url: U) -> io::Result { + let path = path.as_ref(); + let config_path = path.join(CONFIG_FILENAME); + let rpc_url = rpc_url.as_ref(); + let rpc_client = RpcClient::new(rpc_url.to_string()); + let (config, entries) = match fs::read_dir(path) { + Ok(dir_entries) => { + let config = Self::load_config(&config_path)?; + if config.version != CACHE_VERSION { + return Err(io::Error::new( + io::ErrorKind::Other, + "unexpected cache version", + )); + } + let current_slot = rpc_client + .get_slot() + .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("{}", e)))?; + let eviction_slot = current_slot.saturating_sub(config.max_cached_slots); + let (delete, mut entries) = dir_entries + .filter_map(|de| Entry::from_pathbuf(de.unwrap().path())) + .fold( + (Vec::new(), Vec::new()), + |(mut delete, mut entries), entry| { + if entry.slots.end < eviction_slot { + delete.push(entry); + } else { + entries.push(entry); + } + (delete, entries) + }, + ); + let mut evicted_ranges = Vec::new(); + for d in &delete { + match std::fs::remove_file(&d.path) { + Ok(()) => evicted_ranges.push(format!("{:?}", d.slots)), + Err(e) => warn!("entry eviction for slots {:?} failed: {}", d.slots, e), + } + } + debug!("entries evicted for slots: {}", evicted_ranges.join(", ")); + entries.sort_by(|l, r| l.slots.start.cmp(&r.slots.start)); + Ok((config, entries)) + } + Err(err) => { + if err.kind() == io::ErrorKind::NotFound { + let config = Config::default(); + fs::create_dir_all(path)?; + Self::store_config(config_path, &config)?; + Ok((config, Vec::new())) + } else { + Err(err) + } + } + }?; + Ok(Self { + rpc_client, + base_path: path.to_path_buf(), + entries: RefCell::new(entries), + config, + }) + } + + fn lookup(&self, start: Slot) -> Option { + let entries = self.entries.borrow(); + for i in entries.iter() { + if i.slots.start == start { + debug!("HIT: {}", start); + return Some(i.clone()); + } + } + debug!("MISS: {}", start); + None + } + + fn fetch(&self, start: Slot, end: Slot, epoch_info: &EpochInfo) -> io::Result> { + debug!("fetching slot range: {}..{}", start, end); + // Fingers crossed we hit the same RPC backend... + let slots = retry_rpc_operation(42, || { + self.rpc_client.get_confirmed_blocks(start, Some(end)) + }) + .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("{:?}", e)))?; + + // Only cache complete chunks + if end + self.config.slots_per_chunk < epoch_info.absolute_slot { + debug!("committing entry for slots {}..{}", start, end); + let entry = Entry::new(&self.base_path, start..end); + let file = OpenOptions::new() + .write(true) + .create_new(true) + .open(entry.path())?; + serde_json::to_writer(file, &slots)?; + + self.entries.borrow_mut().push(entry); + } + + Ok(slots) + } + + pub fn query(&self, start: Slot, end: Slot) -> io::Result> { + let chunk_size = self.config.slots_per_chunk; + let mut chunk_start = (start / chunk_size) * chunk_size; + let mut slots = Vec::new(); + let epoch_info = self + .rpc_client + .get_epoch_info_with_commitment(CommitmentConfig::finalized()) + .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("{:?}", e)))?; + let last_slot = end.min(epoch_info.absolute_slot); + while chunk_start < last_slot { + let mut chunk_slots = if let Some(entry) = self.lookup(chunk_start) { + let file = File::open(entry.path())?; + serde_json::from_reader(file)? + } else { + let chunk_end = chunk_start + chunk_size - 1; + self.fetch(chunk_start, chunk_end, &epoch_info)? + }; + slots.append(&mut chunk_slots); + chunk_start += chunk_size; + } + let slots = slots + .drain(..) + .skip_while(|s| *s < start) + .take_while(|s| *s <= end) + .collect::>(); + Ok(slots) + } +} diff --git a/stake-o-matic/src/main.rs b/stake-o-matic/src/main.rs index 9602499863..6a89d9ca7c 100644 --- a/stake-o-matic/src/main.rs +++ b/stake-o-matic/src/main.rs @@ -1,41 +1,168 @@ #![allow(clippy::integer_arithmetic)] -use clap::{crate_description, crate_name, crate_version, value_t, value_t_or_exit, App, Arg}; -use log::*; -use solana_clap_utils::{ - input_parsers::{keypair_of, pubkey_of}, - input_validators::{is_amount, is_keypair, is_pubkey_or_keypair, is_url, is_valid_percentage}, -}; -use solana_cli_output::display::format_labeled_address; -use solana_client::{ - client_error, rpc_client::RpcClient, rpc_config::RpcSimulateTransactionConfig, - rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, rpc_response::RpcVoteAccountInfo, -}; -use solana_metrics::datapoint_info; -use solana_notifier::Notifier; -use solana_sdk::{ - account_utils::StateMut, - clock::{Epoch, Slot}, - commitment_config::CommitmentConfig, - message::Message, - native_token::*, - pubkey::Pubkey, - signature::{Keypair, Signature, Signer}, - transaction::Transaction, -}; -use solana_stake_program::{stake_instruction, stake_state::StakeState}; - -use std::{ - collections::{HashMap, HashSet}, - error, - fs::File, - path::PathBuf, - process, - str::FromStr, - thread::sleep, - time::Duration, +use { + clap::{ + crate_description, crate_name, crate_version, value_t, value_t_or_exit, App, Arg, + ArgMatches, + }, + log::*, + reqwest::StatusCode, + solana_clap_utils::{ + input_parsers::{keypair_of, pubkey_of}, + input_validators::{ + is_amount, is_keypair, is_pubkey_or_keypair, is_url, is_valid_percentage, + }, + }, + solana_cli_output::display::format_labeled_address, + solana_client::{ + client_error, rpc_client::RpcClient, rpc_config::RpcSimulateTransactionConfig, + rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, rpc_response::RpcVoteAccountInfo, + }, + solana_metrics::datapoint_info, + solana_notifier::Notifier, + solana_sdk::{ + account_utils::StateMut, + clock::{Epoch, Slot}, + commitment_config::CommitmentConfig, + message::Message, + native_token::*, + pubkey::Pubkey, + signature::{Keypair, Signature, Signer}, + transaction::Transaction, + }, + solana_stake_program::{stake_instruction, stake_state::StakeState}, + std::{ + collections::{HashMap, HashSet}, + error, + fs::File, + path::PathBuf, + process, + str::FromStr, + thread::sleep, + time::Duration, + }, + thiserror::Error, }; +mod confirmed_block_cache; mod validator_list; +mod validators_app; + +use confirmed_block_cache::ConfirmedBlockCache; + +enum InfrastructureConcentrationAffectKind { + Destake(String), + Warn(String), +} + +#[derive(Debug)] +enum InfrastructureConcentrationAffects { + WarnAll, + DestakeListed(HashSet), + DestakeAll, +} + +impl InfrastructureConcentrationAffects { + fn destake_memo(validator_id: &Pubkey, concentration: f64, config: &Config) -> String { + format!( + "🏟️ `{}` infrastructure concentration {:.1}% is too high. Max concentration is {:.0}%. Removed ◎{}", + validator_id, + concentration, + config.max_infrastructure_concentration, + lamports_to_sol(config.baseline_stake_amount), + ) + } + fn warning_memo(validator_id: &Pubkey, concentration: f64, config: &Config) -> String { + format!( + "🗺 `{}` infrastructure concentration {:.1}% is too high. Max concentration is {:.0}%. No stake removed. Consider finding a new data center", + validator_id, + concentration, + config.max_infrastructure_concentration, + ) + } + pub fn memo( + &self, + validator_id: &Pubkey, + concentration: f64, + config: &Config, + ) -> InfrastructureConcentrationAffectKind { + match self { + Self::DestakeAll => InfrastructureConcentrationAffectKind::Destake(Self::destake_memo( + validator_id, + concentration, + config, + )), + Self::WarnAll => InfrastructureConcentrationAffectKind::Warn(Self::warning_memo( + validator_id, + concentration, + config, + )), + Self::DestakeListed(ref list) => { + if list.contains(validator_id) { + InfrastructureConcentrationAffectKind::Destake(Self::destake_memo( + validator_id, + concentration, + config, + )) + } else { + InfrastructureConcentrationAffectKind::Warn(Self::warning_memo( + validator_id, + concentration, + config, + )) + } + } + } + } +} + +#[derive(Debug, Error)] +#[error("cannot convert to InfrastructureConcentrationAffects: {0}")] +struct InfrastructureConcentrationAffectsFromStrError(String); + +impl FromStr for InfrastructureConcentrationAffects { + type Err = InfrastructureConcentrationAffectsFromStrError; + fn from_str(s: &str) -> Result { + let lower = s.to_ascii_lowercase(); + match lower.as_str() { + "warn" => Ok(Self::WarnAll), + "destake" => Ok(Self::DestakeAll), + _ => { + let file = File::open(s) + .map_err(|_| InfrastructureConcentrationAffectsFromStrError(s.to_string()))?; + let mut list: Vec = serde_yaml::from_reader(file) + .map_err(|_| InfrastructureConcentrationAffectsFromStrError(s.to_string()))?; + let list = list + .drain(..) + .filter_map(|ref s| Pubkey::from_str(s).ok()) + .collect::>(); + Ok(Self::DestakeListed(list)) + } + } + } +} + +pub fn is_release_version(string: String) -> Result<(), String> { + if string.starts_with('v') && semver::Version::parse(string.split_at(1).1).is_ok() { + return Ok(()); + } + semver::Version::parse(&string) + .map(|_| ()) + .map_err(|err| format!("{:?}", err)) +} + +pub fn release_version_of(matches: &ArgMatches<'_>, name: &str) -> Option { + matches + .value_of(name) + .map(ToString::to_string) + .map(|string| { + if string.starts_with('v') { + semver::Version::parse(string.split_at(1).1) + } else { + semver::Version::parse(&string) + } + .expect("semver::Version") + }) +} #[derive(Debug)] struct Config { @@ -55,8 +182,8 @@ struct Config { /// Amount of additional lamports to stake quality block producers in the validator_list bonus_stake_amount: u64, - /// Quality validators produce a block at least this percentage of their leader slots over the - /// previous epoch + /// Quality validators produce within this percentage of the cluster average skip rate over + /// the previous epoch quality_block_producer_percentage: usize, /// A delinquent validator gets this number of slots of grace (from the current slot) before it @@ -64,13 +191,81 @@ struct Config { /// cause a validator to go down delinquent_grace_slot_distance: u64, - /// Don't ever unstake more than this percentage of the cluster at one time - max_poor_block_productor_percentage: usize, + /// Don't ever unstake more than this percentage of the cluster at one time for poor block + /// production + max_poor_block_producer_percentage: usize, + + /// Vote accounts with a larger commission than this amount will not be staked. + max_commission: u8, address_labels: HashMap, + + /// If Some(), destake validators with a version less than this version subject to the + /// `max_old_release_version_percentage` limit + min_release_version: Option, + + /// Don't ever unstake more than this percentage of the cluster at one time for running an + /// older software version + max_old_release_version_percentage: usize, + + /// Base path of confirmed block cache + confirmed_block_cache_path: PathBuf, + + /// Vote accounts sharing infrastructure with larger than this amount will not be staked + max_infrastructure_concentration: f64, + + /// How validators with infrastruction concentration above `max_infrastructure_concentration` + /// will be affected. Accepted values are: + /// 1) "warn" - Stake unaffected. A warning message is notified + /// 2) "destake" - Removes all validator stake + /// 3) PATH_TO_YAML - Reads a list of validator identity pubkeys from the specified YAML file + /// destaking those in the list and warning any others + infrastructure_concentration_affects: InfrastructureConcentrationAffects, + + /// Use a cluster-average skip rate floor for block-production quality calculations + use_cluster_average_skip_rate: bool, + + bad_cluster_average_skip_rate: usize, +} + +impl Config { + #[cfg(test)] + pub fn default_for_test() -> Self { + Self { + json_rpc_url: "https://api.mainnet-beta.com".to_string(), + cluster: "mainnet-beta".to_string(), + source_stake_address: Pubkey::new_unique(), + authorized_staker: Keypair::new(), + validator_list: HashSet::default(), + dry_run: true, + baseline_stake_amount: 25_000, + bonus_stake_amount: 175_000, + quality_block_producer_percentage: 15, + delinquent_grace_slot_distance: 21_600, + max_poor_block_producer_percentage: 20, + max_commission: 100, + address_labels: HashMap::default(), + min_release_version: None, + max_old_release_version_percentage: 10, + confirmed_block_cache_path: default_confirmed_block_cache_path(), + max_infrastructure_concentration: 100.0, + infrastructure_concentration_affects: InfrastructureConcentrationAffects::WarnAll, + use_cluster_average_skip_rate: false, + bad_cluster_average_skip_rate: 50, + } + } +} + +fn default_confirmed_block_cache_path() -> PathBuf { + let home_dir = std::env::var("HOME").unwrap(); + PathBuf::from(home_dir).join(".cache/solana/som/confirmed-block-cache/") } fn get_config() -> Config { + let default_confirmed_block_cache_path = default_confirmed_block_cache_path() + .to_str() + .unwrap() + .to_string(); let matches = App::new(crate_name!()) .about(crate_description!()) .version(crate_version!()) @@ -95,7 +290,6 @@ fn get_config() -> Config { .takes_value(true) .validator(is_url) .help("JSON RPC URL for the cluster") - .conflicts_with("cluster") ) .arg( Arg::with_name("cluster") @@ -136,20 +330,39 @@ fn get_config() -> Config { .validator(is_keypair) .required(true) .takes_value(true) + .help("Keypair of the authorized staker for the source stake account.") ) .arg( Arg::with_name("quality_block_producer_percentage") .long("quality-block-producer-percentage") .value_name("PERCENTAGE") .takes_value(true) - .default_value("75") + .default_value("15") .validator(is_valid_percentage) - .help("Quality validators produce a block in at least this percentage of their leader slots over the previous epoch") + .help("Quality validators have a skip rate within this percentage of the cluster average in the previous epoch.") + ) + .arg( + Arg::with_name("bad_cluster_average_skip_rate") + .long("bad-cluster-average-skip-rate") + .value_name("PERCENTAGE") + .takes_value(true) + .default_value("50") + .validator(is_valid_percentage) + .help("Threshold to notify for a poor average cluster skip rate.") + ) + .arg( + Arg::with_name("max_poor_block_producer_percentage") + .long("max-poor-block-producer-percentage") + .value_name("PERCENTAGE") + .takes_value(true) + .default_value("20") + .validator(is_valid_percentage) + .help("Do not add or remove bonus stake from any non-delinquent validators if at least this percentage of all validators are poor block producers") ) .arg( Arg::with_name("baseline_stake_amount") .long("baseline-stake-amount") - .value_name("VLX") + .value_name("SOL") .takes_value(true) .default_value("5000") .validator(is_amount) @@ -157,11 +370,84 @@ fn get_config() -> Config { .arg( Arg::with_name("bonus_stake_amount") .long("bonus-stake-amount") - .value_name("VLX") + .value_name("SOL") .takes_value(true) .default_value("50000") .validator(is_amount) ) + .arg( + Arg::with_name("max_commission") + .long("max-commission") + .value_name("PERCENTAGE") + .takes_value(true) + .default_value("100") + .validator(is_valid_percentage) + .help("Vote accounts with a larger commission than this amount will not be staked") + ) + .arg( + Arg::with_name("min_release_version") + .long("min-release-version") + .value_name("SEMVER") + .takes_value(true) + .validator(is_release_version) + .help("Remove the base and bonus stake from validators with \ + a release version older than this one") + ) + .arg( + Arg::with_name("max_old_release_version_percentage") + .long("max-old-release-version-percentage") + .value_name("PERCENTAGE") + .takes_value(true) + .default_value("10") + .validator(is_valid_percentage) + .help("Do not remove stake from validators running older \ + software versions if more than this percentage of \ + all validators are running an older software version") + ) + .arg( + Arg::with_name("confirmed_block_cache_path") + .long("confirmed-block-cache-path") + .takes_value(true) + .value_name("PATH") + .default_value(&default_confirmed_block_cache_path) + .help("Base path of confirmed block cache") + ) + .arg( + Arg::with_name("max_infrastructure_concentration") + .long("max-infrastructure-concentration") + .takes_value(true) + .value_name("PERCENTAGE") + .default_value("100") + .validator(is_valid_percentage) + .help("Vote accounts sharing infrastructure with larger than this amount will not be staked") + ) + .arg( + Arg::with_name("infrastructure_concentration_affects") + .long("infrastructure-concentration-affects") + .takes_value(true) + .value_name("AFFECTS") + .default_value("warn") + .validator(|ref s| { + InfrastructureConcentrationAffects::from_str(s) + .map(|_| ()) + .map_err(|e| format!("{}", e)) + }) + .help("How validators with infrastruction concentration above \ + `max_infrastructure_concentration` will be affected. \ + Accepted values are: \ + 1) warn - Stake unaffected. A warning message \ + is notified \ + 2) destake - Removes all validator stake \ + 3) PATH_TO_YAML - Reads a list of validator identity \ + pubkeys from the specified YAML file \ + destaking those in the list and warning \ + any others") + ) + .arg( + Arg::with_name("use_cluster_average_skip_rate") + .long("use-cluster-average-skip-rate") + .help("Use a cluster-average skip rate floor for block-production quality calculations") + ) .get_matches(); let config = if let Some(config_file) = matches.value_of("config_file") { @@ -176,17 +462,25 @@ fn get_config() -> Config { let cluster = value_t!(matches, "cluster", String).unwrap_or_else(|_| "unknown".into()); let quality_block_producer_percentage = value_t_or_exit!(matches, "quality_block_producer_percentage", usize); + let max_commission = value_t_or_exit!(matches, "max_commission", u8); + let max_poor_block_producer_percentage = + value_t_or_exit!(matches, "max_poor_block_producer_percentage", usize); + let max_old_release_version_percentage = + value_t_or_exit!(matches, "max_old_release_version_percentage", usize); let baseline_stake_amount = sol_to_lamports(value_t_or_exit!(matches, "baseline_stake_amount", f64)); let bonus_stake_amount = sol_to_lamports(value_t_or_exit!(matches, "bonus_stake_amount", f64)); + let min_release_version = release_version_of(&matches, "min_release_version"); let (json_rpc_url, validator_list) = match cluster.as_str() { "mainnet-beta" => ( - "http://api.mainnet-beta.solana.com".into(), + value_t!(matches, "json_rpc_url", String) + .unwrap_or_else(|_| "http://api.mainnet-beta.solana.com".into()), validator_list::mainnet_beta_validators(), ), "testnet" => ( - "http://testnet.solana.com".into(), + value_t!(matches, "json_rpc_url", String) + .unwrap_or_else(|_| "http://testnet.solana.com".into()), validator_list::testnet_validators(), ), "unknown" => { @@ -219,6 +513,22 @@ fn get_config() -> Config { _ => unreachable!(), }; let validator_list = validator_list.into_iter().collect::>(); + let confirmed_block_cache_path = matches + .value_of("confirmed_block_cache_path") + .map(PathBuf::from) + .unwrap(); + + let bad_cluster_average_skip_rate = + value_t!(matches, "bad_cluster_average_skip_rate", usize).unwrap_or(50); + let max_infrastructure_concentration = + value_t!(matches, "max_infrastructure_concentration", f64).unwrap(); + let infrastructure_concentration_affects = value_t!( + matches, + "infrastructure_concentration_affects", + InfrastructureConcentrationAffects + ) + .unwrap(); + let use_cluster_average_skip_rate = matches.is_present("use_cluster_average_skip_rate"); let config = Config { json_rpc_url, @@ -231,8 +541,16 @@ fn get_config() -> Config { bonus_stake_amount, delinquent_grace_slot_distance: 21600, // ~24 hours worth of slots at 2.5 slots per second quality_block_producer_percentage, - max_poor_block_productor_percentage: 20, + max_commission, + max_poor_block_producer_percentage, address_labels: config.address_labels, + min_release_version, + max_old_release_version_percentage, + confirmed_block_cache_path, + max_infrastructure_concentration, + infrastructure_concentration_affects, + use_cluster_average_skip_rate, + bad_cluster_average_skip_rate, }; info!("RPC URL: {}", config.json_rpc_url); @@ -270,72 +588,164 @@ fn get_stake_account( .map(|stake_state| (account.lamports, stake_state)) } -/// Split validators into quality/poor lists based on their block production over the given `epoch` -fn classify_block_producers( - rpc_client: &RpcClient, - config: &Config, - epoch: Epoch, -) -> Result<(HashSet, HashSet), Box> { - let epoch_schedule = rpc_client.get_epoch_schedule()?; - let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch); - let last_slot_in_epoch = epoch_schedule.get_last_slot_in_epoch(epoch); +pub fn retry_rpc_operation(mut retries: usize, op: F) -> client_error::Result +where + F: Fn() -> client_error::Result, +{ + loop { + let result = op(); - let minimum_ledger_slot = rpc_client.minimum_ledger_slot()?; - if minimum_ledger_slot >= last_slot_in_epoch { - return Err(format!( - "Minimum ledger slot is newer than the last epoch: {} > {}", - minimum_ledger_slot, last_slot_in_epoch - ) - .into()); + if let Err(client_error::ClientError { + kind: client_error::ClientErrorKind::Reqwest(ref reqwest_error), + .. + }) = result + { + let can_retry = reqwest_error.is_timeout() + || reqwest_error + .status() + .map(|s| s == StatusCode::BAD_GATEWAY || s == StatusCode::GATEWAY_TIMEOUT) + .unwrap_or(false); + if can_retry && retries > 0 { + info!("RPC request timeout, {} retries remaining", retries); + retries -= 1; + continue; + } + } + return result; } +} - let first_slot = if minimum_ledger_slot > first_slot_in_epoch { - minimum_ledger_slot - } else { - first_slot_in_epoch - }; +type BoxResult = Result>; - let confirmed_blocks = rpc_client.get_confirmed_blocks(first_slot, Some(last_slot_in_epoch))?; - let confirmed_blocks: HashSet = confirmed_blocks.into_iter().collect(); +/// quality poor cluster_skip_rate, too_many_poor_block_producers +type ClassifyResult = (HashSet, HashSet, usize, bool); +fn classify_producers( + first_slot: Slot, + first_slot_in_epoch: Slot, + confirmed_blocks: HashSet, + leader_schedule: HashMap>, + config: &Config, +) -> BoxResult { let mut poor_block_producers = HashSet::new(); let mut quality_block_producers = HashSet::new(); + let mut blocks_and_slots = HashMap::new(); - let leader_schedule = rpc_client.get_leader_schedule(Some(first_slot))?.unwrap(); + let mut total_blocks = 0; + let mut total_slots = 0; for (validator_identity, relative_slots) in leader_schedule { let mut validator_blocks = 0; let mut validator_slots = 0; for relative_slot in relative_slots { let slot = first_slot_in_epoch + relative_slot as Slot; if slot >= first_slot { + total_slots += 1; validator_slots += 1; if confirmed_blocks.contains(&slot) { + total_blocks += 1; validator_blocks += 1; } } } - trace!( - "Validator {} produced {} blocks in {} slots", - validator_identity, - validator_blocks, - validator_slots - ); if validator_slots > 0 { let validator_identity = Pubkey::from_str(&validator_identity)?; - if validator_blocks * 100 / validator_slots >= config.quality_block_producer_percentage - { - quality_block_producers.insert(validator_identity); - } else { - poor_block_producers.insert(validator_identity); - } + let e = blocks_and_slots.entry(validator_identity).or_insert((0, 0)); + e.0 += validator_blocks; + e.1 += validator_slots; } } + let cluster_average_rate = 100 - total_blocks * 100 / total_slots; + for (validator_identity, (blocks, slots)) in blocks_and_slots { + let skip_rate: usize = 100 - (blocks * 100 / slots); + let skip_rate_floor = if config.use_cluster_average_skip_rate { + cluster_average_rate + } else { + 0 + }; + if skip_rate.saturating_sub(config.quality_block_producer_percentage) >= skip_rate_floor { + poor_block_producers.insert(validator_identity); + } else { + quality_block_producers.insert(validator_identity); + } + trace!( + "Validator {} produced {} blocks in {} slots skip_rate: {}", + validator_identity, + blocks, + slots, + skip_rate, + ); + } + let poor_block_producer_percentage = poor_block_producers.len() * 100 + / (quality_block_producers.len() + poor_block_producers.len()); + let too_many_poor_block_producers = + poor_block_producer_percentage > config.max_poor_block_producer_percentage; + + info!("cluster_average_skip_rate: {}", cluster_average_rate); info!("quality_block_producers: {}", quality_block_producers.len()); trace!("quality_block_producers: {:?}", quality_block_producers); info!("poor_block_producers: {}", poor_block_producers.len()); trace!("poor_block_producers: {:?}", poor_block_producers); - Ok((quality_block_producers, poor_block_producers)) + info!( + "poor_block_producer_percentage: {}% (too many poor producers={})", + poor_block_producer_percentage, too_many_poor_block_producers, + ); + + Ok(( + quality_block_producers, + poor_block_producers, + cluster_average_rate, + too_many_poor_block_producers, + )) +} + +/// Split validators into quality/poor lists based on their block production over the given `epoch` +fn classify_block_producers( + rpc_client: &RpcClient, + config: &Config, + epoch: Epoch, +) -> BoxResult { + let epoch_schedule = rpc_client.get_epoch_schedule()?; + let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch); + let last_slot_in_epoch = epoch_schedule.get_last_slot_in_epoch(epoch); + + let first_available_block = rpc_client.get_first_available_block()?; + let minimum_ledger_slot = rpc_client.minimum_ledger_slot()?; + debug!( + "first_available_block: {}, minimum_ledger_slot: {}", + first_available_block, minimum_ledger_slot + ); + + if first_available_block >= last_slot_in_epoch { + return Err(format!( + "First available block is newer than the last epoch: {} > {}", + first_available_block, last_slot_in_epoch + ) + .into()); + } + + let first_slot = if first_available_block > first_slot_in_epoch { + first_available_block + } else { + first_slot_in_epoch + }; + + let leader_schedule = rpc_client.get_leader_schedule(Some(first_slot))?.unwrap(); + + let cache_path = config.confirmed_block_cache_path.join(&config.cluster); + let cbc = ConfirmedBlockCache::open(cache_path, &config.json_rpc_url).unwrap(); + let confirmed_blocks = cbc + .query(first_slot, last_slot_in_epoch)? + .into_iter() + .collect::>(); + + classify_producers( + first_slot, + first_slot_in_epoch, + confirmed_blocks, + leader_schedule, + config, + ) } fn validate_source_stake_account( @@ -347,7 +757,7 @@ fn validate_source_stake_account( get_stake_account(&rpc_client, &config.source_stake_address)?; info!( - "stake account balance: {} VLX", + "stake account balance: {} SOL", lamports_to_sol(source_stake_balance) ); match &source_stake_state { @@ -383,16 +793,11 @@ fn simulate_transactions( rpc_client: &RpcClient, candidate_transactions: Vec<(Transaction, String)>, ) -> client_error::Result> { - let (blockhash, _fee_calculator) = rpc_client.get_recent_blockhash()?; - - info!( - "Simulating {} transactions with blockhash {}", - candidate_transactions.len(), - blockhash - ); + info!("Simulating {} transactions", candidate_transactions.len(),); let mut simulated_transactions = vec![]; for (mut transaction, memo) in candidate_transactions { - transaction.message.recent_blockhash = blockhash; + transaction.message.recent_blockhash = + retry_rpc_operation(10, || rpc_client.get_recent_blockhash())?.0; let sim_result = rpc_client.simulate_transaction_with_config( &transaction, @@ -426,19 +831,19 @@ fn transact( ) -> Result, Box> { let authorized_staker_balance = rpc_client.get_balance(&authorized_staker.pubkey())?; info!( - "Authorized staker balance: {} VLX", + "Authorized staker balance: {} SOL", lamports_to_sol(authorized_staker_balance) ); let (blockhash, fee_calculator, last_valid_slot) = rpc_client - .get_recent_blockhash_with_commitment(CommitmentConfig::finalized())? + .get_recent_blockhash_with_commitment(rpc_client.commitment())? .value; info!("{} transactions to send", transactions.len()); let required_fee = transactions.iter().fold(0, |fee, (transaction, _)| { fee + fee_calculator.calculate_fee(&transaction.message) }); - info!("Required fee: {} VLX", lamports_to_sol(required_fee)); + info!("Required fee: {} SOL", lamports_to_sol(required_fee)); if required_fee > authorized_staker_balance { return Err("Authorized staker has insufficient funds".into()); } @@ -558,6 +963,131 @@ fn process_confirmations( ok } +const DATA_CENTER_ID_UNKNOWN: &str = "0-Unknown"; + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +struct DataCenterId { + asn: u64, + location: String, +} + +impl Default for DataCenterId { + fn default() -> Self { + Self::from_str(DATA_CENTER_ID_UNKNOWN).unwrap() + } +} + +impl std::str::FromStr for DataCenterId { + type Err = String; + fn from_str(s: &str) -> Result { + let mut parts = s.splitn(2, '-'); + let asn = parts.next(); + let location = parts.next(); + if let (Some(asn), Some(location)) = (asn, location) { + let asn = asn.parse().map_err(|e| format!("{:?}", e))?; + let location = location.to_string(); + Ok(Self { asn, location }) + } else { + Err(format!("cannot construct DataCenterId from input: {}", s)) + } + } +} + +impl std::fmt::Display for DataCenterId { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}-{}", self.asn, self.location) + } +} + +#[derive(Clone, Debug, Default)] +struct DatacenterInfo { + id: DataCenterId, + stake: u64, + stake_percent: f64, + validators: Vec, +} + +impl DatacenterInfo { + pub fn new(id: DataCenterId) -> Self { + Self { + id, + ..Self::default() + } + } +} + +impl std::fmt::Display for DatacenterInfo { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + f, + "{:<30} {:>20} {:>5.2} {}", + self.id.to_string(), + self.stake, + self.stake_percent, + self.validators.len() + ) + } +} + +fn get_data_center_info() -> Result, Box> { + let token = std::env::var("VALIDATORS_APP_TOKEN")?; + let client = validators_app::Client::new(token); + let validators = client.validators(None, None)?; + let mut data_center_infos = HashMap::new(); + let mut total_stake = 0; + let mut unknown_data_center_stake: u64 = 0; + for v in validators.as_ref() { + let account = v + .account + .as_ref() + .and_then(|pubkey| Pubkey::from_str(pubkey).ok()); + let account = if let Some(account) = account { + account + } else { + warn!("No vote pubkey for: {:?}", v); + continue; + }; + + let stake = v.active_stake.unwrap_or(0); + + let data_center = v + .data_center_key + .as_deref() + .or_else(|| { + unknown_data_center_stake = unknown_data_center_stake.saturating_add(stake); + None + }) + .unwrap_or(DATA_CENTER_ID_UNKNOWN); + let data_center_id = DataCenterId::from_str(data_center) + .map_err(|e| { + unknown_data_center_stake = unknown_data_center_stake.saturating_add(stake); + e + }) + .unwrap_or_default(); + + let mut data_center_info = data_center_infos + .entry(data_center_id.clone()) + .or_insert_with(|| DatacenterInfo::new(data_center_id)); + data_center_info.stake += stake; + total_stake += stake; + data_center_info.validators.push(account); + } + + let unknown_percent = 100f64 * (unknown_data_center_stake as f64) / total_stake as f64; + if unknown_percent > 3f64 { + warn!("unknown data center percentage: {:.0}%", unknown_percent); + } + + let data_center_infos = data_center_infos + .drain() + .map(|(_, mut i)| { + i.stake_percent = 100f64 * i.stake as f64 / total_stake as f64; + i + }) + .collect(); + Ok(data_center_infos) +} + #[allow(clippy::cognitive_complexity)] // Yeah I know... fn main() -> Result<(), Box> { solana_logger::setup_with_default("solana=info"); @@ -566,6 +1096,46 @@ fn main() -> Result<(), Box> { let notifier = Notifier::default(); let rpc_client = RpcClient::new(config.json_rpc_url.clone()); + if !config.dry_run && notifier.is_empty() { + error!("A notifier must be active with --confirm"); + process::exit(1); + } + + // Sanity check that the RPC endpoint is healthy before performing too much work + rpc_client.get_health().unwrap_or_else(|err| { + error!("RPC endpoint is unhealthy: {:?}", err); + process::exit(1); + }); + + let cluster_nodes_with_old_version: HashSet = match config.min_release_version { + Some(ref min_release_version) => rpc_client + .get_cluster_nodes()? + .into_iter() + .filter_map(|rpc_contact_info| { + if let Ok(pubkey) = Pubkey::from_str(&rpc_contact_info.pubkey) { + if config.validator_list.contains(&pubkey) { + if let Some(ref version) = rpc_contact_info.version { + if let Ok(semver) = semver::Version::parse(version) { + if semver < *min_release_version { + return Some(rpc_contact_info.pubkey); + } + } + } + } + } + None + }) + .collect(), + None => HashSet::default(), + }; + + if let Some(ref min_release_version) = config.min_release_version { + info!( + "Validators running a release older than {}: {:?}", + min_release_version, cluster_nodes_with_old_version, + ); + } + let source_stake_balance = validate_source_stake_account(&rpc_client, &config)?; let epoch_info = rpc_client.get_epoch_info()?; @@ -573,11 +1143,17 @@ fn main() -> Result<(), Box> { info!("Epoch info: {:?}", epoch_info); - let (quality_block_producers, poor_block_producers) = - classify_block_producers(&rpc_client, &config, last_epoch)?; + let ( + quality_block_producers, + poor_block_producers, + cluster_average_skip_rate, + too_many_poor_block_producers, + ) = classify_block_producers(&rpc_client, &config, last_epoch)?; - let too_many_poor_block_producers = poor_block_producers.len() - > quality_block_producers.len() * config.max_poor_block_productor_percentage / 100; + let too_many_old_validators = cluster_nodes_with_old_version.len() + > (poor_block_producers.len() + quality_block_producers.len()) + * config.max_old_release_version_percentage + / 100; // Fetch vote account status for all the validator_listed validators let vote_account_status = rpc_client.get_vote_accounts()?; @@ -595,20 +1171,40 @@ fn main() -> Result<(), Box> { }) .collect::>(); + let infrastructure_concentration = get_data_center_info() + .map_err(|e| { + warn!("infrastructure concentration skipped: {}", e); + e + }) + .unwrap_or_default() + .drain(..) + .filter_map(|dci| { + if dci.stake_percent > config.max_infrastructure_concentration { + Some((dci.validators, dci.stake_percent)) + } else { + None + } + }) + .flat_map(|(v, sp)| v.into_iter().map(move |v| (v, sp))) + .collect::>(); + let mut source_stake_lamports_required = 0; let mut create_stake_transactions = vec![]; let mut delegate_stake_transactions = vec![]; let mut stake_activated_in_current_epoch: HashSet = HashSet::new(); + let mut infrastructure_concentration_warnings = vec![]; for RpcVoteAccountInfo { - vote_pubkey, - node_pubkey, + commission, + node_pubkey: node_pubkey_str, root_slot, + vote_pubkey, .. } in &vote_account_info { - let formatted_node_pubkey = format_labeled_address(&node_pubkey, &config.address_labels); - let node_pubkey = Pubkey::from_str(&node_pubkey).unwrap(); + let formatted_node_pubkey = + format_labeled_address(&node_pubkey_str, &config.address_labels); + let node_pubkey = Pubkey::from_str(&node_pubkey_str).unwrap(); let baseline_seed = &vote_pubkey.to_string()[..32]; let bonus_seed = &format!("A{{{}", vote_pubkey)[..32]; let vote_pubkey = Pubkey::from_str(&vote_pubkey).unwrap(); @@ -626,10 +1222,15 @@ fn main() -> Result<(), Box> { ) .unwrap(); + debug!( + "\nidentity: {}\n - vote address: {}\n - root slot: {}\n - baseline stake: {}\n - bonus stake: {}", + node_pubkey, vote_pubkey, root_slot, baseline_stake_address, bonus_stake_address + ); + // Transactions to create the baseline and bonus stake accounts if let Ok((balance, stake_state)) = get_stake_account(&rpc_client, &baseline_stake_address) { - if balance != config.baseline_stake_amount { + if balance <= config.baseline_stake_amount { info!( "Unexpected balance in stake account {}: {}, expected {}", baseline_stake_address, balance, config.baseline_stake_amount @@ -666,7 +1267,7 @@ fn main() -> Result<(), Box> { } if let Ok((balance, stake_state)) = get_stake_account(&rpc_client, &bonus_stake_address) { - if balance != config.bonus_stake_amount { + if balance <= config.bonus_stake_amount { info!( "Unexpected balance in stake account {}: {}, expected {}", bonus_stake_address, balance, config.bonus_stake_amount @@ -702,14 +1303,127 @@ fn main() -> Result<(), Box> { )); } + let infrastructure_concentration_destake_memo = infrastructure_concentration + .get(&node_pubkey) + .map(|concentration| { + config.infrastructure_concentration_affects.memo( + &node_pubkey, + *concentration, + &config, + ) + }) + .and_then(|affect| match affect { + InfrastructureConcentrationAffectKind::Destake(memo) => Some(memo), + InfrastructureConcentrationAffectKind::Warn(memo) => { + infrastructure_concentration_warnings.push(memo); + None + } + }); + + if let Some(memo_base) = infrastructure_concentration_destake_memo { + // Deactivate baseline stake + delegate_stake_transactions.push(( + Transaction::new_unsigned(Message::new( + &[stake_instruction::deactivate_stake( + &baseline_stake_address, + &config.authorized_staker.pubkey(), + )], + Some(&config.authorized_staker.pubkey()), + )), + format!("{} {}", memo_base, "base stake"), + )); + + // Deactivate bonus stake + delegate_stake_transactions.push(( + Transaction::new_unsigned(Message::new( + &[stake_instruction::deactivate_stake( + &bonus_stake_address, + &config.authorized_staker.pubkey(), + )], + Some(&config.authorized_staker.pubkey()), + )), + format!("{} {}", memo_base, "bonus stake"), + )); + } else if *commission > config.max_commission { + // Deactivate baseline stake + delegate_stake_transactions.push(( + Transaction::new_unsigned(Message::new( + &[stake_instruction::deactivate_stake( + &baseline_stake_address, + &config.authorized_staker.pubkey(), + )], + Some(&config.authorized_staker.pubkey()), + )), + format!( + "⛔ `{}` commission of {}% is too high. Max commission is {}%. Removed ◎{} baseline stake", + formatted_node_pubkey, + commission, + config.max_commission, + lamports_to_sol(config.baseline_stake_amount), + ), + )); + + // Deactivate bonus stake + delegate_stake_transactions.push(( + Transaction::new_unsigned(Message::new( + &[stake_instruction::deactivate_stake( + &bonus_stake_address, + &config.authorized_staker.pubkey(), + )], + Some(&config.authorized_staker.pubkey()), + )), + format!( + "⛔ `{}` commission of {}% is too high. Max commission is {}%. Removed ◎{} bonus stake", + formatted_node_pubkey, + commission, + config.max_commission, + lamports_to_sol(config.bonus_stake_amount), + ), + )); + } else if !too_many_old_validators + && cluster_nodes_with_old_version.contains(node_pubkey_str) + { + // Deactivate baseline stake + delegate_stake_transactions.push(( + Transaction::new_unsigned(Message::new( + &[stake_instruction::deactivate_stake( + &baseline_stake_address, + &config.authorized_staker.pubkey(), + )], + Some(&config.authorized_staker.pubkey()), + )), + format!( + "🧮 `{}` is running an old software release. Removed ◎{} baseline stake", + formatted_node_pubkey, + lamports_to_sol(config.baseline_stake_amount), + ), + )); + + // Deactivate bonus stake + delegate_stake_transactions.push(( + Transaction::new_unsigned(Message::new( + &[stake_instruction::deactivate_stake( + &bonus_stake_address, + &config.authorized_staker.pubkey(), + )], + Some(&config.authorized_staker.pubkey()), + )), + format!( + "🧮 `{}` is running an old software release. Removed ◎{} bonus stake", + formatted_node_pubkey, + lamports_to_sol(config.bonus_stake_amount), + ), + )); + // Validator is not considered delinquent if its root slot is less than 256 slots behind the current // slot. This is very generous. - if *root_slot > epoch_info.absolute_slot - 256 { + } else if *root_slot > epoch_info.absolute_slot - 256 { datapoint_info!( "validator-status", ("cluster", config.cluster, String), ("id", node_pubkey.to_string(), String), ("slot", epoch_info.absolute_slot, i64), + ("root-slot", *root_slot, i64), ("ok", true, bool) ); @@ -754,7 +1468,7 @@ fn main() -> Result<(), Box> { ), )); } - } else { + } else if poor_block_producers.contains(&node_pubkey) { // Deactivate bonus stake delegate_stake_transactions.push(( Transaction::new_unsigned( @@ -818,6 +1532,7 @@ fn main() -> Result<(), Box> { ("cluster", config.cluster, String), ("id", node_pubkey.to_string(), String), ("slot", epoch_info.absolute_slot, i64), + ("root-slot", *root_slot, i64), ("ok", false, bool) ); } else { @@ -827,6 +1542,7 @@ fn main() -> Result<(), Box> { ("cluster", config.cluster, String), ("id", node_pubkey.to_string(), String), ("slot", epoch_info.absolute_slot, i64), + ("root-slot", *root_slot, i64), ("ok", true, bool) ); } @@ -837,13 +1553,13 @@ fn main() -> Result<(), Box> { info!("All stake accounts exist"); } else { info!( - "{} VLX is required to create {} stake accounts", + "{} SOL is required to create {} stake accounts", lamports_to_sol(source_stake_lamports_required), create_stake_transactions.len() ); if source_stake_balance < source_stake_lamports_required { error!( - "Source stake account has insufficient balance: {} VLX, but {} VLX is required", + "Source stake account has insufficient balance: {} SOL, but {} SOL is required", lamports_to_sol(source_stake_balance), lamports_to_sol(source_stake_lamports_required) ); @@ -874,11 +1590,22 @@ fn main() -> Result<(), Box> { &config.authorized_staker, )?; + if cluster_average_skip_rate > config.bad_cluster_average_skip_rate { + let message = format!( + "Cluster average skip rate: {} is above threshold: {}", + cluster_average_skip_rate, config.bad_cluster_average_skip_rate + ); + warn!("{}", message); + if !config.dry_run { + notifier.send(&message); + } + } + if too_many_poor_block_producers { let message = format!( "Note: Something is wrong, more than {}% of validators classified \ as poor block producers in epoch {}. Bonus stake frozen", - config.max_poor_block_productor_percentage, last_epoch, + config.max_poor_block_producer_percentage, last_epoch, ); warn!("{}", message); if !config.dry_run { @@ -886,16 +1613,106 @@ fn main() -> Result<(), Box> { } } - if !process_confirmations( + if too_many_old_validators { + let message = format!( + "Note: Something is wrong, more than {}% of validators classified \ + as running an older release", + config.max_old_release_version_percentage + ); + warn!("{}", message); + if !config.dry_run { + notifier.send(&message); + } + } + + let confirmations_succeeded = process_confirmations( confirmations, if config.dry_run { None } else { Some(¬ifier) }, - ) { + ); + + for memo in &infrastructure_concentration_warnings { + if config.dry_run && !notifier.is_empty() { + notifier.send(memo) + } + } + + if !confirmations_succeeded { process::exit(1); } Ok(()) } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_quality_producer_with_average_skip_rate() { + solana_logger::setup(); + let config = Config { + quality_block_producer_percentage: 10, + max_poor_block_producer_percentage: 40, + use_cluster_average_skip_rate: true, + ..Config::default_for_test() + }; + + let confirmed_blocks: HashSet = [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 14, 21, 22, 43, 44, 45, 46, 47, 48, + ] + .iter() + .cloned() + .collect(); + let mut leader_schedule = HashMap::new(); + let l1 = Pubkey::new_unique(); + let l2 = Pubkey::new_unique(); + let l3 = Pubkey::new_unique(); + let l4 = Pubkey::new_unique(); + let l5 = Pubkey::new_unique(); + leader_schedule.insert(l1.to_string(), (0..10).collect()); + leader_schedule.insert(l2.to_string(), (10..20).collect()); + leader_schedule.insert(l3.to_string(), (20..30).collect()); + leader_schedule.insert(l4.to_string(), (30..40).collect()); + leader_schedule.insert(l5.to_string(), (40..50).collect()); + let (quality, poor, _cluster_average, too_many_poor_block_producers) = + classify_producers(0, 0, confirmed_blocks, leader_schedule, &config).unwrap(); + assert!(quality.contains(&l1)); + assert!(quality.contains(&l5)); + assert!(quality.contains(&l2)); + assert!(poor.contains(&l3)); + assert!(poor.contains(&l4)); + assert!(!too_many_poor_block_producers); + } + + #[test] + fn test_quality_producer_when_all_poor() { + solana_logger::setup(); + let config = Config { + quality_block_producer_percentage: 10, + use_cluster_average_skip_rate: false, + ..Config::default_for_test() + }; + + let confirmed_blocks = HashSet::::new(); + let mut leader_schedule = HashMap::new(); + let l1 = Pubkey::new_unique(); + let l2 = Pubkey::new_unique(); + let l3 = Pubkey::new_unique(); + let l4 = Pubkey::new_unique(); + let l5 = Pubkey::new_unique(); + leader_schedule.insert(l1.to_string(), (0..10).collect()); + leader_schedule.insert(l2.to_string(), (10..20).collect()); + leader_schedule.insert(l3.to_string(), (20..30).collect()); + leader_schedule.insert(l4.to_string(), (30..40).collect()); + leader_schedule.insert(l5.to_string(), (40..50).collect()); + let (quality, poor, _cluster_average, too_many_poor_block_producers) = + classify_producers(0, 0, confirmed_blocks, leader_schedule, &config).unwrap(); + assert!(quality.is_empty()); + assert_eq!(poor.len(), 5); + assert!(too_many_poor_block_producers); + } +} diff --git a/stake-o-matic/src/validator_list.rs b/stake-o-matic/src/validator_list.rs index 74f739b093..18edcbd5bb 100644 --- a/stake-o-matic/src/validator_list.rs +++ b/stake-o-matic/src/validator_list.rs @@ -4,26 +4,40 @@ solana_sdk::pubkeys!( "123vij84ecQEKUvQ7gYMKxKwKF6PbYSzCzzURYA4xULY", "12NG4Y7LGq8VQgtDZxn82ok4eGUEqmhzKh3wZtsakjLN", "12Y25eHzGPaK5R5DjQ1kgJWuVd7zrtQ7cmaMPfmacsJV", + "13nbrL1VjkfTZuaz7rNriYw6fWDFggqEf2g1C4mPETkr", "234u57PuEif5LkTBwS7rHzu1XF5VWg79ddLLDkYBh44Q", + "238Fmy2TwU26Fo8XFRu2PzDWNbcn3bitywEPYG6tpztu", "23SUe5fzmLws1M58AnGnvnUBRUKJmzCpnFQwv4M4b9Er", + "23vHFSzVQAqbUQkTMaogVbPuyWM3RbKowC9jnEXQVFm6", "25UM59KCvciYwhjCq7t1rC8ZuvsxQBC2QRcaRNfq7xML", "27SB7d27xvtBJjgsAV8JBDjQroySmZepiNSepeRbRhe9", "28LgQ7MeEZVgNJfYRc6UnoAz2SnSjKbyCKM6sntCRotb", + "295DP6WSsiJ3oLNPhZ3oSZUA6gwbGe7KDoHWRnRBZAHu", + "29tXWWzvGNvE5j8i6FLfHpmanPC9treZsCo1uA4ik1kL", "29Xwdi4HpBr1u9EAqDz3tBbMuwqBuczLPuVe2gGkg7ZF", + "2bQyrSEPaQ9BMbu7Ftv7ye1fxtSLW3oZRj4d2U64AJmc", "2BT25HZHpyzYmTbqyqzxBK7YjjH4a6aZ783TEgTTGYo5", "2bvUqyatpaDLn9ch9BxNYRwg5SFpq3rDfWJKfohC9iD4", "2C26iHJcU5dqJJQ6NME3Lq583RT1Js9QDtgfmzknRajc", + "2C9pDcbRQJxbUHivgDdg4LGuMwm5oeVCnHS9w5JktNTo", + "2Ceu5z672tACCDB6cmB3n4tih7B8dikv6k6kfYr3Li1e", "2CGskjnksG9YwAFMJkPDwsKx1iRAXJSzfpAxyoWzGj6M", + "2CJnVLz7tMWQDHHRkyXQpnaFF95i5FZsGU9NZcUdWJ2h", + "2cxgydEWqiVTookrgPWucNuQqmwThyoNAYPX3GqeDdvh", "2D1oCLRK6geGhV5RyZ52JD9Qzqt311AEH1XrTjZdzbRh", + "2dg2NiZC1LyC5LbUsv3sJYU1LhAqkJrK23J6hssoLJAA", "2dm9YbgXtR5yimmgsLkfaMLcNZxhjywW4bLnvChms3tb", "2DvsPbbKrBaJm7SbdVvRjZL1NGCU3MwciGCoCw42fTMu", "2dYoJ9T45W6N8bCaccesfJ3i2diazR7YkyeLMX4okKth", + "2eDDjJSKdxf8qwojH1E2SoZFHqst56GXxtmAnoZtGdtu", "2eoKP1tzZkkXWexUY7XHLSNbo9DbuFGBssfhp8zCcdwH", "2EQekczAd7QmnWhmPpbhCCsWeFooqZmQqHNz8qo8S8RA", "2FCxeG7mBYy2kpYgvgTZLCYXnaqDc4hGwYunhzHmEUD1", "2GAdxV8QafdRnkTwy9AuX8HvVcNME6JqK2yANaDunhXp", "2GAtWPFNhEfj6nmXJp2YqPhSY55TvLeZpLRq9t3hqi1F", "2gV5onEfn8KmtZ3Lck39GrNEZyTxJ1RiNV5s7fRdC3gc", + "2gx4am78NPRPxEW1CKg6yZ3viFob4bHnz6AUDSCMnmp4", + "2iAP1WMsKJVje22cgPJNGC7Jgv5DQj37QZwcDNUDd9F3", "2ibbdJtxwzzzhK3zc7FR3cfea2ATHwCJ8ybcG7WzKtBd", "2iczkZceGZQqimksY8uk6NLrQXoMFZGK1mTWos4QnZ3a", "2jrM8c8ZhpX9CovseJ2sRsrU9yzsFrP7d3gCi5ESU5Rs", @@ -31,40 +45,66 @@ solana_sdk::pubkeys!( "2jypS1SoX6MLEfuNvUH23K7UU3BsRu3vBphcd7BVkEpj", "2jYzt9Ly7dNzNpMV9sbHBNuwMEvVdSi9L8yJCTJT21ki", "2K2UhSoUNNi7Q78F5QCpcutRaifTjckGYKLpsw7i9noa", + "2KCo4n2nFpAAVec2NS46XHWpWRKcDLu6sR9fPSbCb3nS", "2khFqurxeMKKfhFJ9dfas1L9LsHwt2qHGW8Ztinzoeob", "2LsRRgttA1PKXXeKTZP2QhetgM94Dj5uecmTzyQkTvXK", "2mEvgikTj9SRL59MXogmYfyAR9ze51HV5dCgPUEj6V5t", + "2ofEZBxkiZoBpxXcXT68RTHfuQQFChSYVXVPGbFfvMTP", + "2p64GWwGEWtHdwjdeXCMHU5LBstm5BenLdGsJZzDrKHH", "2Pik6jn6yLQVi8jmwvZCibTygPWvhh3pXoGJrGT3eVGf", "2PSBMBFVykqSc4MFQ3krJVRosjmixSbNeNJfGMmL1DFp", + "2PvsR9DM2GZavFQGDsdJwXJvPWsyneyT9Gpu7wXGDkSr", + "2qRkFXBL3Ej7vzPCTnygaKVA8mNMPe66w762oHLYWrDH", "2qYB7wEBJ1UstVNZLbWyvTXNfocSsWEh7cFaTLkqsvfR", "2RLf3RSy1ScBFL5UzVDw3jYKCAuGA9vHpr9dnbQzJt8V", "2ryEov5c84JLWwt5Ub1xGYWux1LF63j7kaNRc6jiv4As", "2RYnM1C5XuzWzZu4sD7TyJTgxQTKzFVHG6jNtbK65q2y", + "2sBsdFT58SPfd5LQyE8MhEgJWpaoHUCoN4QFCVqNZpnj", + "2TcLzmdcpQqbNEBjU2Hxp1MiETSww1HJeWW84F61Z13k", "2tvTYUajoBgeAd66Zhd5Nc2BgKGCgdmasR94fpBokzop", "2tZoLFgcbeW8Howq8QMRnExvuwHFUeEnx9ZhHq2qX77E", "2URaCX9G2dKKrtwygpZemfTpDivpSNT31T25vX4YGR4R", + "2uzuT8dgVVLLgG57n5W9vxMTaNAaLavnt8gtiF7V1FVV", + "2vobGAfskQtdSCFHhrmMCK9PFojyGPhSxYtjDP8irkqE", "2VzCLy98rzmvKGo23e1eM4LANCt9JFrtVTBBMZzGT4FW", "2X5JSTLN9m2wm3ejCxfWRNMieuC2VMtaMWSoqLPbC4Pq", "2XAHomUvH3LFjYSxzSfcbwS73JgynpQHfapMNMJ8isL9", "2xFjhfxTKGVvGDXLwroqGiKNEF3KCSFaCRVLHfpsiPgd", + "2xoWe7LGX8Kmnwoc27VF2iYkxfKESjb3b9rU1iM9wHJT", "2XP9MfzWQnX3DiAJQFSKXKyqBr4M7GKhHnKc9P7z519H", + "2y857Ss2GgyL9WooNqt6sAgxDVwr9pE6i4BiJ2wrC4g3", "2yDwZer11v2TTj86WeHzRDpE4HJVbyJ3fJ8H4AkUtWTc", + "2YgttBBx9Ax6WhF9xu8CSWw5usKFWxaxzJtg8zPajadx", "2YhQPcbz1NYuu1B7Kpxi2NGrpZ8Jdv9azw8Jf6E9FC71", "2YLPihCDxqztR5be69jhoNDPMxV6KeTJ2X2LtVBXDgp4", + "2YtaVYd8fZHXpPezG5gRp9nudXGBEPcqFtJe8ikN1DZ7", "2ZeChc7Res7fUVdcGCDTJfRd9N8R21hiBPLAuJsqHHwh", + "2ZETk6Sy3wdrbTRRCFa6u1gzNjg59B5yFJwjiACC6Evc", + "2zF9q2xyKpsu9CSGJDj81j3N9ryvrhPRcjCkzafCezsg", "2zHkPFBSxWF4Bc6P7XHaZMJLfBqtSgfDCBqTZ7STXE1a", + "2zQSe8o4xws8q3tMFU5AoPxMzu1JZoufR3tsZ1zknHiK", "2ZuLSKq6t5nqRLp3GqtRSttu7FE9if2nrMegHLnp5skK", "2ZZkgKcBfp4tW8qCLj2yjxRYh9CuvEVJWb6e2KKS91Mj", "31cud34DHkL5zM4ZiHXgsrkAu13Jeck7ahvkPU9i4Jze", + "33FtaV5DrLUPYYQK7QAiD3LBDXD2VoCv6BuCQVoRdq57", "33kmrfcRXVtWZxmVJ1GTsPNKpBXWTj1tv65wTVTiifyw", "33LfdA2yKS6m7E8pSanrKTKYMhpYHEGaSWtNNB5s7xnm", + "34D4nS1eywoA1wiwcgrBP8Ewj9NXyaZ3dP9DJKfkvpGn", "368KipD4nBzVs4AizHj1iU4TErSSqmZaNGVyzHx8TVXM", + "36gARMU4V3D6hu5EJi7wYFW6cC1tNym9DkjZfAGFQTbk", + "36GQk6L2UmRhy7T3Vjb75HySi7226SK4eHn9WpycmZst", "37Gr1zVPr79E3AdPFj8EMyKZYt7Bnz3VWKjdFctQC8fL", "383CX582368Zao48isK6qp33xBphCvhfgdEowfiXeG1k", + "38hgERMK335yrDsyPkc4wbW2FUiXgmuWRght9n7RVAtz", "38zXVD94Hp71ftxT5JqeTYzTGECk1BmiSJVoXzbS7jsR", "39FH4cnkSawRtr9N2VbUVST4o6ZiixW2K4QCzLqW8tMg", "39moskfERPLyaspZAZNsbrXgiUxWGGgyjPWu1ZaN2dJv", + "3adqz1JN9sbsjHGxQizz2ibJmyCHtUpP9aPnZYxixB4c", "3ANJb42D3pkVtntgT6VtW2cD3icGVyoHi2NGwtXYHQAs", + "3bcAoEkLDKNuFg77wzCx2dbPNSgeZVF5AYHDet1rPyd8", + "3BCokPfahX9rLYMh6E6uYTEFuchiKd9wZcXUvwDHFYiH", + "3bQ4s7ynWKjEPrkTfDx1aT2sXejXXYjbfYumBHc5LA83", + "3cJeH1TCZcNf5gCZnSbfZne9DQCiexkzuH6gwQEeBjqA", "3ckQZncmgmS1aZCC7Eot659hoBQ8k85ExddD7fu9qJ2c", "3Df9iVRoSkX3YZ6GmMexeLSX8vyt8BADpHn571KfQSWa", "3dVEmuYaJxLdLNGnNxCuLtELp38VKMxJPY6gUJScu6Jc", @@ -72,71 +112,128 @@ solana_sdk::pubkeys!( "3FhfNWGiqDsy4xAXiS74WUb5GLfK7FVnn6kxt3CYLgvr", "3Fiu2KFBf3BoT9REvsFbpb7L1vTSs7jnmuDrk4vZ9DNE", "3g7c1Mufk7Bi8Kk4wKTGQ3eLwfHYqc7ySpP46fqEMsEC", + "3gEaREHMBHTafrMHrFgibZeDq73GUeiiZt8U1KGwDVGE", "3HitRjngqhAgVuNdFwtR1Lp5tQavbJri8MvKUq5Jpw1N", + "3ht1z7tMieDiLkukray7AauF214xtsWFFG1E4A1oeAXU", "3i7sS5McrJ7EzU8nbdA5rcXT9kNiSxLxhwyfuxbsDvBj", "3iPu9xQ3mCFmqME9ZajuZbFHjwagAxhgfTxnc4pWbEBC", "3jddze9ZFYxTfVkV7xr78TSSkLahj6Zf1G3TUrhnWQuw", "3JeLfM9V7CkK1D8W8LFirU65AZsWRo127PumwNc3dFQi", + "3JfoYf6wmQxhpry1L61dnDWYJbL7GYi4yt7mybehuhne", + "3js25yzgUwUkk4b61NYSuw4SfMbmStoPXMy8ZUp3hkf7", + "3kiAniQf6y9ZT3SdE8X7Rq5jM3MX6BUZy5KDT3wt6zAk", "3kWT2K2HfxrspLFoJhKUAio3QF85EuTemJKTUcPEjm7m", "3LtAt3iqmeTgJ3GD8DtCcjkRkJdDKAF42nJytn28syeP", "3LWv8RrdEyMtePAMCmohBzWAz7fmN7Cf2ctSUxJKEQnS", + "3MdUXXiLWeXQauVSiuGwPjakCv8J5CX5v1fu8eutJ7v1", "3mx22d1aJLazEutJyHVszdwyLJcrRo26EKB4AWDbRxRc", "3NchsxHzVUAv6MTGEuAVt8QRdi93uHGNRmS9AEiZkMVh", "3NdgfZXaj83dKoskxY2LPyQmrjtF3ikCmfCJeKTdFqaB", + "3ndqwmmqTEFaydt6bgTDohL35WJCjv2cezUcYezcHHcJ", "3nL1oAkcW4M88VG4D78dNxHrqaNdKyJqKW3wbhhBjhig", + "3NtGCPqA5dTucxitLz5KTxERZ7XdVSZ8c2m97TGupV3S", "3nvAV4PVG2w1F9GDh3YMnhYNvEEzV3LRMJ5e6bMYcULk", "3NyhrTWkxdLmi1nuW7Xo7tfqSKrsM6dxQkZzhs7Z1JWx", "3oaf5Y78LHEt38Bcb8bBtabCBySWyyEor7LN8hGwHLLU", "3oSpYov4ngdWKcZBnxxSPTe2KmeC5s3iKTi518KP9exz", + "3pqniPoVa85STVdvSDpKiaAqvWrMWLgsbJaNos5mvnYk", "3PVz8crz85wgqgudf6mxws2psgKc4kr51MhfmU6VekEG", "3pzTi41c6NAbZyTiyPEAQtmi2K5MyWZJMxx6nDvWPgnQ", "3qaaXFYh389e1Ncboc7qbCWxSQdbaiYuTFrJVYuh7jo2", + "3Qj4rFsMRMsXnYescUVi53kDY4KjNnNy2QE4tc4WpQET", "3QK8tbsVSwU6xRzLWhVFJCcnqm9WPxSUdaa7cXzBQZZh", "3QuBhrNbo47ywuK2TmAbvKp331kNDr1up5HM4J6KKYwC", + "3R82jDjQsrzZgQKiEJbKfdCA9ngYQrjZehYuEFmhhfCP", "3rFxX6D68YhDpF7c6vDt2yhfp8CXXcjNNga43cCJ8Ww9", + "3sxNcHfzE1ygfrYB9xpjyuQ5nBnc1pwJdVCDhZ8eK7LW", "3SYNAWbZuuWMYKHwL73Gpeo1ySqRSshf5WDoW9vYVz9V", + "3tEqZrbb7xwaRwri19Z5TAznrewnM2m2SCkvSmLztWcE", "3Teu85ACyEKeqaoFt7ZTfGw256kdYGCcJXkMA5AbMfp4", + "3TQbBx85TDi5uQFLDzjZmecs1esSen2Lh1ybHvdUPkEu", + "3uF82ATbSzKFpAZKQ9LV4BziPsKycMeUZCzaWfL6rSbU", "3viEMMqkPRBiAKXB3Y7yH5GbzqtRn3NmnLPi8JsZmLQw", + "3viEMMqkPRBiAKXB3Y7yH5GbzqtRn3NmnLPi8JsZmLQw", + "3vkog7Kaki74rn7JFWxKyrWfTEUnp4cLpJyvgs233MyM", "3W4fe5WTAS4iPzBhjGP8a1LHBTx8vbscqThXT1THqEGC", "3w6hQh7Ndx93eqbaEMLyR3BwqtRxT2XVumavvU93mcRk", + "3wwYJDVkY1rK5emynSYgbwUy9X3eFcNQiyYxc4Jsd9iL", "3Wyqj2cgKYK2sSSb3wVv3wJ5yD3yigV8iLLttkZfKn8d", "3wz211BhQAE2n5fjDQSStM2iSizhNRyJDNRkDEc1YwMF", "3wZB4hqoLXqBuzFun2sBstu72RgYnR37RWk8YnqzEBLa", "3X5fSxjnJ3Y626cGNosRokUW3nYdTpvZeheGpSjD5uEb", + "3X6FsQ8awkcU4iXTF82T4RtnTJx9LTY5D3dHK6zDE1Tp", + "3XeDMpzQc5SmrSE6w9nYBXkdk8AzX7q5XohzxmwMmExG", + "3xgtKbSXjtZe7hqxHbK2WLYJGPJw1hfvZKzHrTkygiZX", "3xKsqGgLMNVazzNBsKa9TPG2Vo5fGLr1xkKrTMVXVVkT", "3xUTkgPKNJZ3dkpDMV8zWV34BkmvKanguKipv6M9x2Mt", + "3Ye1g9E65wj9wtbTLetQbjsQ6SFj4s7RdTJaxjq6duDq", + "3zAa3TC1Z8KZUmi5TDcQLbXDCETNEdDkcBfKEBchTuCm", + "3zQeAZpvgzhffeSyoZLJUrbkAk6ZjV9H1SBEtCmm8HyL", "41NQTqFPCwdiiGMibSExCNJ95ALFBkcxJtvQw2S41oLc", "42oRgGrFtPHPdw28dw9HiJEKTc7WVJh1ND3dDc2m2UWm", + "43h2uYRTSVhMNXKuxY4Kn6T558u436qy59cV6Sz6rdRi", + "43ZCLRdQgcajUq4WTxtTqkqGtpNnJTmLUs4ef4qGKtAc", + "44J72PpPim1PJHge3TwJWAMnuPhwE7DMLaZmCerYEC61", "45aGtJWVx9xbhp11diPithdQS1E9Hzjm5b5HEpAM68Ax", "45tyReiehTPZ7rYq35khyF1bnanHPHavNrRj5QH8HnAc", + "462x4mp5aZ29SetJR3oka3d2ARXVKUcs9f9hZsapf7ML", + "46GijDorcsduUvWFNWKAV1yB6XwPG699wS2gR4no4zGU", + "46WCeEExQaEJfatG53qgxMzgPqubbrAvVBeYSyUQt317", "473ToSs8wTyGd2DTmwb1zNkr7TweNC1Wfui2FzKNB1JE", + "47qmq7tBedjcH2vt8TkANNcPwARqHr5Xh2fJxCkTSV9y", + "486kJEz1XJ95nULg2Ccj9Av9yi1inexzHRVW9UjfR2B6", + "49gM7gXEJEokKHEoUCNve3uCRMAoRwKUpEiqK2nku6C2", + "49Q14TEnx7XTHsFtRs9xhQ12wXRHwaWJ5YSpGhVNhSgy", + "4A7XYUpU2Cvj84fBhkcUQPQMJsZywqgjvD65zSRZmquP", "4ajWybNN1XqaapKEEiz4MPMyCP7Ppuw7FMQwQ57o7gFZ", + "4AYWAYndF6EsfgwVTrsHLMviNsvuqh9dAMcJynpJk6YB", + "4AZNBZYtjpbJAtFi8cYqy79EcP2qHp4fkLBagfdpvMrY", + "4baXhu594FEQtZsAmHNjNM8K3NxmPNsYCxyPUZnhwHLm", "4bLyjRauEjdJGb86g9V9p2ysveMFZTJiDZZmg8Bj29ss", "4BWRooHWbpeBRmjb8y3HgtkMcK5MWVAKce2dSXnTP5Tr", "4Bx5bzjmPrU1g74AHfYpTMXvspBt8GnvZVQW3ba9z4Af", "4Bzp9fzcdjctbdo23SCwCEkPeQzCeyTb3WtwiK3KNVRc", + "4cLRyEVzhvt1MKqEeVeVfsxfJzZyUwpJGQADBW9qgwks", "4cxKnptRvBHYMqUX5hsbEBcj2GmoAxmJGYoqN5YgfUQp", + "4cDG4caeUNBz1Kx754vNvZKa54NwDv5ph19xuNk1zsvB", "4dT3ySaMTMyG7wssPNYUixRBxfkbWTbnsoaWLzfwUinU", "4dWYFeMhh2Q6bqXdV7CCd4mJC81im2k6CXCBKVPShXjT", "4eyn57baA11sgvkQafTcrwJ9qVs6QptXBahf43Li1jKc", "4fBQr617DmhjekLFckh2JkGWNboKQbpRchNrXwDQdjSv", "4FZSiJpGgprsVxkzc2F8v3bgnRpk8Ez1Dq7ohXwY1q9V", "4g5gX1mmFGGragqYQ1AsRpB8ZJvwCoUKVT5LtKTDrNSp", + "4GBSypESidsbB6ACFRUTkwDwcv1G5anashx6UvSypqCF", "4gEKnFpiJ8XC6DdFw4D65uYQeMF8x7KDqMrBPrDVjMPb", + "4GFicguUX8WDqFL84XWpvKyqm7NXNUg1T3GTiKUu7MkA", "4GhLBaxr1oEHWpoGnWh3mcRXUkBU5EEQZv3L27c7ohoq", "4gMboaRFTTxQ6iPoH3NmxLw6Ux3SEAGkQjfrBT1suDZd", + "4GsnFvSfNWzTbeXAPMCfbUPjYmHU73a8wyiX3zrds44W", "4hDEtsHXAf6TMBNJHogmN5noitFzxGxKAs5YwsKZzrDd", + "4HjA5dBRcMajmaYfwYxqdJBzYbuFxPqjoVjnsTk6Xjqv", + "4imbBQgQvg447bVmyodJYjyM5y6NH865h97bswNM3cXk", "4Jb1YfUUN1xxdYb28wPLT6A52j459uLNBJaetpk3vAKE", + "4JPnDAjvQPYB8bJVw2E2Wvi1sZZd5VHAMeBncqqt8U8J", "4jZMrzWGfMHDRkEBqwnx1cPR6uP3i8v2EaKALzi7bYbc", "4JZsGW4WUSjAjH4joCaAAVnNi5ERfHr93YUDxmHZpDM7", "4keeN1kQVHQFKBJvZNKjmLHpLvZMEM3rtrVhwM23Sbgr", "4LyxkXdt9cwze6MkBY71pKdV2S3SxzNBYoUvNkDH9hBR", + "4mCp1G9zmqRH53wX7j17wmZimHbn6ep1NvLmsMUwHjDj", + "4mdxZgQQdkVJvPK8Z8T55sbUXU25ZzjTNs1ydvrzVnYs", + "4MNtUgysSfjwfpgYBJFJQA2Kn5LXPQzgRLnJoCAseKrx", "4N8tZu9Yn9AkkpJCYVSH2o7jCHMThkeG8SRNc3ThnrFL", "4Nh8T1d4YBZHEuQNRmFbLXPT5HbWicqPxGeKZ5SdAr4i", + "4NKRcNv3BGZAqHnehxFFPpWdKLtmJ4WWidoPk4hK3UXY", "4nu5rdaXjhXHniTtVG5ZEZbU3NBZsnbTL6Ug1zcTAfop", + "4o8VRbGZcmiWm4Zc79LsBgDcqXmmVte3kvCroq2zwLG9", + "4oNUWNoSNnwghHBCGsuAaQEuaB6oZEXE2w4VNhRxoaQc", + "4oRsVpbjGU17ndfQVQSFNYRzuj3uETiBT3e1Hz3GHLLi", + "4P5UxtvVBXBmYRGMV1GM6xVf5soMsYaPo5JDMwcWhLVU", "4pZjWxF6277CRncZjggHdiDN96juPucZHg537d2km4f9", + "4q1KX2Epud4kS7tYuyndLaon1FskmDqcwh5ubxHiSzdP", "4QNekaDqrLmUENqkVhGCJrgHziPxkX9kridbKwunx9su", + "4QY21MyFAtXbagGymZuBLu3a6wUkFg5qaUDRwYj4Pnuy", "4rGW4pdnjvMi22PnCdDBGxX5ChTrMcyFSF3T2xHUEQvC", + "4SqdkosjugZVRdX2kRptUng487Uece5toWHZXVh6cpQV", "4sRKUyYwqmc38TpPGmkbLfjKkyNBGEBaiYJaMCYfkUBh", "4STBf6muaBxEx8e3wcUT1DwVq5UuXxXYjWPcNXecpdE5", "4u2qTnf4QVC8PcgNFPBwY2PwdkiMa4jb3KnNZo4zZbtV", @@ -144,25 +241,40 @@ solana_sdk::pubkeys!( "4veSBAABaESW2WpnJzcdNcduopX7X1f63KziC24FhQee", "4vgoKb76Z2vj9V9z7hoQpZkkwJrkL1z35LWNd9EXSi2o", "4vXPjSaZfydRqhnM85uFqDWqYcFyA744R2tjZQN8Nff4", + "4WkMVnmyoWuAGifnmqdWNtD3nudHp4hPPqvnyUHLkGWC", "4WufhXsUhPc7cdHXYxxDrYZVVLKa9jCDGC4ccfmuBvu2", "4X1qvzrv95EfE6x3i13iokffWBLQGT3t2ZkGaKmERX7r", + "4x7HEA12XAiqjsM5FbWkyNnwKfqzSDHWA1XA79uFpzGJ", + "4Xqmh7JpjaFj5wJ6tNGbEY8eoY8U3fPMUKzfQXcGWiDR", "4XWxphAh1Ji9p3dYMNRNtW3sbmr5Z1cvsGyJXJx5Jvfy", "4YGgmwyqztpJeAi3pzHQ4Gf9cWrMHCjZaWeWoCK6zz6X", + "4z755TDizaUVyRRKw7y8DnTnnon8ksQYsZyU3feF6yFc", "4ZtE2XX6oQThPpdjwKXVMphTTZctbWwYxmcCV6xR11RT", + "4Zto93KdBuynSnyyQct6ecMVxGNrjvVHe4CbWJTtvLSq", "512wm7UysDB8PNwWpjMBmRgYHdQAoj7o6EDJ9CUyK2kb", "518q2YT5TjpwZM3sLSTk58VVmdYkF86abh7GGyoUaHZ", + "52GEvaeCcEyAUKrfoPcey6vdyw6th588nYPuCkn3Kxes", "52MERCkzgb4icyneihfLaeaqhWZYPxYH8fyJLEddnYXY", + "52rpdXBbJG4ChidZc1BiMU5JucsJQQa98zZUEUaP8Rwy", "55nmQ8gdWpNW5tLPoBPsqDkLm1W24cmY5DbMMXZKSP8U", "55ofKaF1xdfgC9mB4zUhrffdx7CVoxTbNo7GeQLyj3YL", + "55tZynRDphTaxtH17x87FjcyJjCHCch3SrVxuanUJZmd", "57DPUrAncC4BUY7KBqRMCQUt4eQeMaJWpmLQwsL35ojZ", + "57Nqrmi7wnUsvBdrkSpyfJHWic9dJqw1KpfgYtmx7XzR", "58J9ucd9Qc6gMD8QHh2sHTyJyD8kdjHRQZkEAyAZ72YA", "58ktQdWmK3D3VxX1smCXhHGKdNNEJ93PGpXtX8RLdLHL", "58M2W8tybgWy6pJVqk7tT7YF7C3rmUxVM4MWN7LG6m7D", + "59TSbYfnbb4zx4xf54ApjE8fJRhwzTiSjh9vdHfgyg1U", "59WHuha1QunWmupWhFA4vr3WMUe8BLN7dc8HUsJ4YC86", "5aGEHgWCyHNxCcNMHP5TDddUkT5uXGpuwBfonE13jnMB", + "5AGFPAidurZA45DFXc6cERHsvkdMKohXdug7bBFoCsd8", + "5B5zutCiy12JLoi6urb7uX9rZzwUhYH4VmiDobMB42dU", + "5B8dRstrVg4NXw39yswMdr6ETHCsbKaSbWCAxCH6gofs", "5BFP2FY7kdV3ogDrKf9UtKphrRSd3kNGC7p3q17i5rSR", "5Cf18uw63TPsS8XZ2gHiQKzxPh7i5axu6knFfAXFDEUe", + "5cK8WPnW9Q7rfTynaHTGHXHNRyZxHHT1iDH5LyPeaSQe", "5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on", + "5dB4Ygb8Sf3Sssdxxrpbb4NFX9bMrYnieiz11Vr5xJkJ", "5dLMRyPWx6rdPGZpZ7uuZiqry96dUT5yz48u62Gzugi6", "5DsrdX4xPok2YNHUEtQsRuyAkDcdSBPXM74ezfRgy8Vm", "5EamRRDR1j78iE2Q1TUmoDQRw59m2GTs8QJWtnTZsKf8", @@ -170,74 +282,148 @@ solana_sdk::pubkeys!( "5fcmYHLur87RDZDmdXi6jDEuWKVeg9KcNHBu1UNqPjHU", "5fnyGEnVu3nyMrUysGQLXz38QH51VNtmYGSA99197xCX", "5FPQXMJxXKJmuShQCiTRYPzXL9LBfgphprhXR54pr2eu", + "5gpRDdBffGa9quGE7hTPVCg9zVnHTS26qvbd12G5kSS2", "5H3sMCaSJdN2k1hyuFTzq2BrZHUq7CinTa82hJS6EDTf", "5hwtqBGMzoat2a7dzaPXMLKNPZUcsFm2jAko8Kx6tJLk", "5idH3j6ugKTqSx6WHZax9LmVyu3MQ3sy1zsNqPv9mEgh", + "5isoKqxB8G3CVngTkrHddmvjHhuKBiYZwLfWDufWZtwU", "5jAKgxnCLVrb5zdDxjnRotwNirVG26Set4ZZ6BWC6Sx", "5jLw8DGMmjwaCJWbkT3dksXVEdWrXzQtiBd2TfsF1J1H", + "5jQqKbCAeYLiKK4WqppHhKBxe4DzDZMRLLaDhDQJ19F9", + "5Kev1Y8njZLiybgnqTpTnjZ2H6NMtCeSK6J9TeqhyZnL", "5KFXF9DS2ETQVfUTAfMygw6LNbqiXWXYr4y2k1kYr9bA", + "5KG9uYHFKSmJVgvXys4dKkZ1iVzmsHxDJWP1SsAw9ahj", + "5KK7GDAws7uYezSUcugdVrWNrKNA9ooP4t57Jq5W1mTa", "5KZRD6hDCZd1dgM9rJLweFFRkHu3qAgBhHgQ96Wz1VSn", "5m28zJcp7CsTrH2szyNQhygvDis3dPwbgrtYsWi3J4jN", "5Mbpdczvb4nSC33AWXmh6wmDxSZpGRANNcZypdPSGv9y", "5MGfrpdVsyifhn2x62j6PnBWQk2c5xTUK1o8KFrwPLWG", + "5MJU7ekXcjCf2sQq8BHUo99wxnPC2w96AizSUMhWZZeP", + "5MNLjn4p1bNUMRc7YP3rEWB5BQbzNsHYaqmQLwshAndB", "5mwyVeNzQx6CGnNSN6jPMFdui9LDvmFQBHcpHNNvDrMc", "5NH47Zk9NAzfbtqNpUtn8CQgNZeZE88aa2NRpfe7DyTD", "5NLjk9HANo3C9kRfxu63h2vZUD1cER2LacWD7idoJtKF", "5NorYZBbtgbouD3toX3761ZGbaYTWrNSDNci4G4zV8eo", "5nR5ktqmZufaVuK8N8nNoqVrQqopL6qAnf7YNvsjynhz", "5nVDe1R4QW8XcaWrDUo88tG1V8CgAV2BqWpCX4mF49TE", + "5nvj4tHGRCRFmTaJfpjx3RUcNPtHv7dDkxMbc3yF8UGP", + "5o2kjsEZDYnWGfTqBJdrBnRYKvRy7wjrniivKwFqyTsB", + "5oBPhPGrCTHta55o8XybDBNTk4KAdAKgaSZmTDdPFpMH", "5ogMBk74DTpRaEahTtBrrsFN5mcZ2cfmZfPsJMhJm31t", + "5oR5dh1WTi7ACiq8bdYmQN84kDG4HDQuX6cjyJErgGz4", "5oVky3o3pNbZfWndUBJbxH82ZDqaUx7k1CorxfisKWZt", "5p3Y7UV2oZrSTTSLJzJknEzqQpetmk2NB2hQEKPc43dC", "5PLDu8auwqtMkHW9zdPsfUcvyESZ45umFc4r8cWUw3Zp", + "5pzqyoXaZT6ADEGbiziPL3qTfvSA1XuP9ZLXpYijp82K", + "5QhhX9AD3PSKP6eZHLYdFGaWuyXMM3b61vF949edSrRE", "5qsT9h2TuTMPLtX6gcD2DG6mcZwechxmWhKRFHbCt6Pu", + "5qsTBZQPAPYsCBw9aPC6wCLpyPua7VmK9yFWk8gLQaUP", "5rxRt2GVpSUFJTqQ5E4urqJCDbcBPakb46t6URyxQ5Za", "5SAMpCcejTXQMnbrtkNv6nSxqaYgjRbk733QNzc4teJC", "5sjVVuHD9wgBgXDEWsPajQrJvdTPh9ed9MydCgmUVsec", "5sjXEuFCerACmhdyhSmxGLD7TfvmXcg2XnPQP2o25kYT", + "5sLRnoek1tvdnJU4qjEGRwUmVNnMRUD3eiJVxPZ4zddD", + "5t5yxCvtHxCkDJCCrChBQ2hdcUrK61tr8L2QRHtbnpCY", "5TkrtJfHoX85sti8xSVvfggVV9SDvhjYjiXe9PqMJVN9", + "5TLrfzh1tXa4dPu13mYfaX1dB87Md1tzCCofes3VMNYx", "5TZbMUkDaxxbyhkpgMQHZQCyvHAmsg9ZyDHf4R26qrap", "5UBdwqhphJ54qRFg6G9iRZzuaSmbFmwy2KyE28JMLbYU", "5ueaf3XmwPAqk92VvUvQfFvwY1XycV4ZFoznxffUz3Hh", + "5unroM4ZHe4ysnprhGrsHBUMsCbkfAHU1Z4rMtosbL26", "5uTcsQSrUffYo6RYSWj75SuGMkJ4v9x5RYuQoTc5aWGR", + "5uy7h23zmpVvG4QnFV91ATJzW7PziYu13i2BNev2n1dL", "5vaCfp7UEpW5qdJYyVH4m93oMzzyzTqXdbr7xLGobY8q", "5vdpdDS5vvUrPTpGq8zDWmirYheKHq8RWrQfUrbarN29", "5vfvM4qv8UERxSU4qjKhcyJYgfvBwxM3zotkbyXg5z4z", "5vKzPeQeveU8qnvgaECkdVdBks6MxTWPWe48ZMeC6fdg", + "5vtNLiiVi7N3WDPeHtaJu4CA1zyXBrdmC41XZktxDZ5d", "5vxoRv2P12q4K4cWPCJkvPjg6jYnuCYxzF3juJZJiwba", + "5wf29x6Ws3NJRLxrE27cB5xi5ia8DAo3vNyKpX5Yj4dK", + "5WhrU6gqgCwNBW7tkGsAZTB5bno3ymHVrmQb5yyxexBP", + "5wsN9Q4XLXvxjefK2tszV1z8DRKSXyGo2NxvzrftnDQZ", + "5Y7Rq8DBLwmDGgAUPKXyqJ57mRC33krMyH9dzMpuwTxF", + "5yqmdjMVX9F64YuE97neemY6Q1s4MgVaBbJiz9g5qGiC", + "5Za8eDus559NMWtNxwpWFqW4cNBuuVN6JRSCiRqdXhSn", + "5ZimkW45n4mWVCqXsqEEJuJWvhoqZFX7iRBz9jtHW3PQ", + "5zRUbp1Dtu3qQaRVf36oMDaeH91D2ePnc5DEgnh1ivFg", + "63KsHFi8cZ8mYPAa3dRycoDBV1cds3UNGTEijLyf23VA", + "64vN9NTyfwP6J4iTVgQcJKjXmJ4jh5xp1NB61xugEtmm", "67VDb2iEdx6XjCfBLXhUgKQQjTuLe9X2eLqTq5nBjUTy", + "688vLxT7Gsb4YX9YotViUauLC5aYbnjm1SQtaEQUKitf", + "68qujN79HiknCPBbGESncjUDeC8V42DigCGjQjpaVher", + "68ZxNmvRkaLLeNdqkKWKKSCmFiF28Zcy4xVHN5QZuzY3", + "696mZGRLiSvLhMCSTHRrmmgUvxPEjvBBdETDL4a3PGLx", "69k73WLdHRge7E3vCUiDx7Dkm1DQSBBGAu9FqNj4AeJD", + "69y24KUYmXFY2N6BMfzL8TfiKjQtBNCCjtnju7bxh4zG", "6AaA8HJGpYK9RDN5NQjDJfHPcqX63hnw3NXEa9rTXbEs", + "6au2pU33RmTdpoZ9WcYrHnTmTByMJbMMPmZPC7Z454hP", "6AzAaGnhw5E9Nbkk4e7uhRFEtTVKPYwxWsLhVL8wPDuU", + "6BeAn53Zti3ke5zRm92YzwaaZU5SzMpCmTgkSR7vE1mC", "6bkTEGxMJf18WBT7QDjs39aEEmN39mQFFQfihyQ2yJkU", + "6C1mHAPxQACd8NNS1D9KpGxqSRUz5s6itsaJx1uteofx", "6C2zZ5hac7V9KzdwoGeRFjVpAgPPbXvxGrcRtq2Pf2CG", + "6D6puBzRwMwVNZUuEipFycFL7xZgL9sPEnj5p68Tn8iP", "6DmskQV9ricrKpuGuHzyWmdsJcHPauvbPALdbsJxM9GB", "6Dr57RWT2ctMt2XiQxj9Nec5mBrfjucfAyh8hWQE9cp9", "6dr7c5k6SsFRFfmoNqADxZQsvPjPjg4meeEHVX8cn6HU", + "6DrFc9AuBwCe9VUSArhP1bfYQRSYie66pkH6C7wyw5uv", "6E5NygCNcfyPHkLbHMckzF25cgQoxN3DfMqH9bwyQRpf", + "6EfiVm1bAo8yWgZppb5irTqciv5VC2eoNTFToST5c6Mg", + "6eWWU6wZ4LA2q25v11VkVtD311MruJRzzp6cc8szqW4E", + "6F16m2H44H4LHseHDuk67k2zdEXCWQdGnA2BQ4yMQFMX", + "6fr7ptNzTzzgcqmBDT1PJt3RTnuTnM4v7jfFS3zaaopa", + "6FTLATh7CDdqkFyYJuTR7oFyvhVK6UHUK92fELg2mRno", "6FWhS2CHjtCf81GMsqHRXQqDUh3UKyyWGF15QGCWWb7Q", - "6Kwr8fUZPmSFNWaXfRL7e7v38itt276DFVu7RiYn8oW5", + "6g7urUx43pwjUZ9CBD9c76oLQtpHCgCxp9hQhv6RUMB", + "6giEzjcXWwiodVL48LtoFexax73cBorvq4NM8a2xUkd8", + "6hvLbQ32BYawSQobBJ1Sjspb7BBkQ5ueUbh6H7eSexYi", "6j7DvYDyFTdrK99apFuuT8w2WaeaezfwLDLk8Em8sB2m", + "6JaKmYstgSj55SwwQDihDq1Q251FrL5ev8XNiqFSP2qe", "6jhfZV63yGmPM4djkHYNZ5eTxmuMxmi55UkJBDeMjUqL", + "6JUvAc4NV51SfX8G9zwoRptU6hw1eC3fYz443Mh3Qj7w", + "6KnzXAhpE6ki8GuNQBqpHsVdHhsyg5csChPGLdkTHRvW", "6Ku6Cj3Y3FETU6JEuwjpLLu65CnKhi5YGtKdUTRGud7i", + "6Kwr8fUZPmSFNWaXfRL7e7v38itt276DFVu7RiYn8oW5", "6m8LGKXMT5QrRQdQsQAd2VHpYwJZebbrc48WgkPWeRYc", + "6mAuJ3xKzgo4zSxoi3s4uhaSLh9qqtyZQBhfhJbSaPQJ", "6MEyD2RCk74jqYDrmuoDqor7KQca8B3pyRDw91ynmgki", "6Mi3Q2eNUCess79xZJwGMhYYwzNo5Tr24Xbw3QhEWyFt", + "6NDU8PkeQhH8DF5Yw1Cn1AexQoQLnqr13GhNuQL1gfuT", "6nrkRvzUpTst8teZJawMFFHrmixJ2sxAUxPKrqoGwCB8", + "6oB8HATu5ApWMWhFpE4Ms5XMNcKmjk83VpcND5U1vHof", "6p9f4oyPCR3iyAW69DjndFuggRt6KFp22RVaywthff9W", "6PdBqw4p1iaNd3CYg18THHpzDBuophRUk3qSFy3KNTuD", + "6pJAzQhw3MJBbR4BPzhWJk2Hf5p7idivRrepCuh1BrEu", + "6pjd2Dfsv7FNkNDCGhzb8vn1DEmvPSVicdQxvGKLVQwQ", "6PkpXpQeLMp45TC1PTPUhCpywcCxMcmUXvH3dNPMXQvo", "6PojfJo3DkFNHsybGRQr3E7DmtUPvnnUdeFFgN2MBkKE", + "6pUayMw7LVx31eA86LAxomnzqktGX4rTLvDzznRHDuNh", "6PwxMMGLFnAf9sjMHfVr15z9fntjYTNPxJ7gFhkFxkXi", + "6PYMaoJf89uNKjUPyf1eUh6KQ8vGAHt9Fb8EK5SqctKK", + "6Q62YX8UpQKABG1FANUsTjJJdrfYZNEAbZyceuaVbx79", "6qJPxxgZHCQKBvbGC9zCsuuPtHMMLszVCoiCvEhVULyJ", + "6qPPKb2zC6U9g8pwAGrYJxy9B9noYiKxwS7NnuRPqpUx", + "6QyznogeoYxn9VLZgijxfkFeEusTu8ip7FWHXwGnH1Bt", + "6R2SsTxEK89a9m84Z666c7M7wGcmbwNmTCyPFcAcftyX", "6rBEm4eWXATTETo1ncXYiAJBLwh1VtMBiRwMLRY75yR7", + "6rDzQVov7rYNcHSGsVcbQny7VYCinkn4U86Cfz8xYQdC", + "6smeNG6M7Aers4Ju1drfZPYBS4WFK89EwWphQjKTMQSj", + "6sSBHSuyRRphvkH4GAwccGRB8HdZLWC9VENN3c6S39sd", "6t8zWy766tsHBVNxhMwsTGiEYkGtjaZncRU3vcSEYtHU", "6TkKqq15wXjqEjNg9zqTKADwuVATR9dW3rkNnsYme1ea", "6tptWLfq3o2Q5M74ZJLpGGpA9jCAScHSaun5aQVTtp1h", + "6UrGCcP3H5REdZrPx9X22s8Pj7q2RzUWVT5LFCLBevZ9", + "6UynSxu2fiY5qU6Ae8cPLxq4jyWVpnr7o4fUyWLxCpcp", + "6V9ymEmZyvpJFpTzMAaSxs45QpoSvNUQRczhBF8mWKXt", "6vx5vGgqAa9dRaJpbViCNDjzxp6EyGV38YMYbNDqTzLr", "6vZuaLY4n4GP9DVroymfZ4D1oP6xpgF1ExLMqHQbt32L", + "6w1jYS7vrmprS1u9cQd9uFo58AZYvJ9JtzihmkRPgSz7", "6W2xi4iCGU8eTMCGtG3DQXgMGurXFnd5iVXCY5Sq7AbF", + "6W3xBXKnq4vGHvBjMNSgVviQ6vqDeWiL4LwnSFjvr8Yo", + "6w8Gxzq1AusnWxrnBH49wkWVemp7MPxXftfyUQy67yJZ", + "6WHCTDvSa47muoyi5zHoKKPcodkftixsauEfDNB9YSjL", "6WwCWBHYvNXnDswu6qrHbKoXMqtB1ZwRCD2U3oqWbZmB", + "6XSXMyUJnjTR5Az6jQyX4o9bjMXnyt94u5MvAtmrH3dZ", "6YpwLjgXcMWAj29govWQr87kaAGKS7CnoqWsEDJE4h8P", "6zCt5z72rfN9sRk2hTgc1LeFDbEBfXYmW6xtSNmgyama", "6zdz3xCHLqu9uruKskX24eztqWUchf4P8YjY3pMnwCY9", @@ -246,182 +432,333 @@ solana_sdk::pubkeys!( "71bhKKL89U3dNHzuZVZ7KarqV6XtHEgjXjvJTsguD11B", "721wicnref9s6HiU1ZWmYTWp3ZRonzHWRxAPSc2Ceu9K", "731Lnc3mbXpquV8FmFnTL8EE36uoZRiDMUKXehwZq8x2", + "73YGZpfTSBv7PBLvmgcAwa7K2fAaBoGe2Z9YNWz7J4rB", + "74TbcQoVmGdmdZdUZTEpMaLonAtKTK29GsZpfn1LWxoo", "74U9LiSPv2gb8bBZSh5uVNf89W4wZ8zs9B8EvRVVwr87", "75ApR5PzaMtcx2DhzZvkj3MDRQWsDCjcuEzWsygboVix", "75yzn7njq32yY1ieCZxFNVFZQWtEbQHpaaG6dSZFfmX5", + "779hnWTRpb2wSdqFjduzxeyPk41hqSaassmwPjQDGMnd", "77uXenX1Y9T2D1pcnHnYsYiwTTHbnzkyrKX5fQFMGVCR", "787PK2WaCUZCyYEmuYQSGmoxu7MyqK1usn43FfiVwhcB", + "78QcjcDqBqvxrjLZVH3Y7vmyCmNdu7VSVnHGMiH4CpcR", + "79SdgYawG2AHZWRTkvSzTn2DLEznQ2WrBxvi68JMFup7", + "7A4WJBegWKXVMhVoKshm4GzjW3Pb9od9ECWxF5DbrSZu", "7AbNcvhBBHeL3LFhULonjidYSKKaZzyoiM6dDv8zQpjo", + "7aEnmY6hs4dNHpqtMyE7ACVaMaUuiRxJCJzhWMTCUq6V", + "7ajm6amGXayr8qe3nPYA6j1bPMLMCxEmhNdzgz1EjnW4", + "7aKHeoUDCYbEYdSEj63i9m6vmkXLbiafWxoCvyhcQtPw", + "7aKL2G6UMcXWXrvajhs3SxNpBh4gYxgWjT1HBvr36MEK", "7arfejY2YxX9QrmzHrhu3rG3HofjMqKtfBzQLf8s3Wop", "7AW5VGSNcaECGKJD2C4rpRuWpcT4kdAHrbahc7KFQM3p", "7bFx5g3sh5CqupFYtch3J1RdZBZs29HtpXAWyPPyptB3", + "7BncGLSgSexiAXz1dRB7cZEDdkKey2sK5xiLpHESDjpf", "7BS1RfipQ7zwuKAdiUX5CNFCKNEdk82TN2C3CmoXR4ux", + "7bVEnUUkMX6EWG8CXCuqPSzgEfhnQarAgiwzxPSQqPQj", "7C3FrWyhFGc75WgccpnpuuCRSqpZiWpvj6d7U7jScSKU", "7Cj9XfthjKp4KxccS8tV5RmjZtucBuyyXJDRovLkyFmS", "7cY1beonNGzrqUk4pNWErm2vYcyw5yyLqwnrEHr6iKmu", + "7D5N7KZFN9hCcc6HmDi22JQaGwozaD33MQQbrkNwG7mK", "7dEjSFnrm66CJ7Aj5mC1hsYmMzmGgWPr6iZNhcvANZ1w", "7DU3QwALuHzaDU5YVWK1BgeFaPo4TJ7QPAifNHCHBDBC", + "7DyCSDDKvRe1BdxSyN6Q3bvW72VddJbJXG36Ghi8KRcZ", + "7ED12uoR6C3mr7Apf2x7YnSmEHkApFo4Jfm1bq8i6L4o", + "7ETjs9tfe3snSKSnKqzxJJHmpNTT474TfcYG8MSQnuet", "7EucomZSKvQdiZLvra8hLszL1kRYiGewxyMJnyyzdbH7", "7F2vcJca5ewzdJUNcVMKCLVYneq6CX9JFMH1U7JeVG5", "7FnrBgjPb1y8PNjzRLihQWUvky37F7wkvRb7MUL89Q8P", + "7fv6zGstESoyWYdrfeW1DzN4fabJm3M2mRUVid6bx4EY", + "7FVCgatxKrX34VwM4YRhUVdXsJAoB5Kk3EGWW5M2Nqub", + "7gEoBVoj9bAsuugEwoCzjJvVh2h2kNd31zmWLbPM1D6h", "7hAddyJcvQAS6SsfRKLJzYPuq4h1XykRSJEUmr64p8oF", "7hifPeGJ4YFHh935XngjGuv67PHruoBxiqeALMqqjDnz", + "7HSAu6Q5LAqrCk7pt649utsDrrEd7yP5NEcqodFd8TTb", "7JFfCpPEodnt6SWY41ePBRXR6LUGiKhLSKJNw9ZYjdah", "7K32uTNK2zJwp5WTt4t57qJMf1JnHBq2HcSkc4oV5sQb", "7kmwiz4wbzf1kUSZmKKzaJRxybGeDMLSqhR9s2FebhoY", + "7m3rgSgyS4HXnBAc5F8tPY9PTXgB5wLNz4xC8b6XA19z", "7mmRxJNttYcJVNsJmiLjTHYmNDt32EpzUSgLDoBsTKKK", "7NPcRcHu3jACoQf54nkRBLgdn7zBbUYhnsdC4VHqBQwK", + "7oQ4anNmvJmXUXu6pkXFb5fmovuPSdWzbRkwMBvHi4yf", "7qFWjQLAYeSGf7MBFo9LUYEiFN4LpMagwFu4MwAUueVn", "7RTTKCWBJ2XwtSHkUfpwBTH7SsdKqHrWfnD9Dv4z2Wyw", "7RUobwC33EbHaWWR2sbdaJhT8x8PpgUoAbJQYrQqrSgQ", + "7S1xGwMrB4x5fwhchayjHojKQoCWZsd5HnRHRUJGXekR", "7scarR3Z5obfefZr8bPKYoMNipua43K35AJAc1YchQBK", + "7sdh5QHFPo4ktG9SVTPM7Sek1WLZpwxNNHudojYi8dK6", "7sEpbQB3Dryn5JhQVCGWoGgUfYwNEZzjPNa1Tu9mVa5p", "7suRNpX7bJsXphHJtBv4ZsLjJZ1dTGeX256pLqJZdEAm", "7T5ZekSsBSgLNKVzQmCRQ5iqL5ycprREa1tz3GYmb4eT", "7TcmJn12spW6KQJp4fvvo45d1hpxS8EnLjKMxihtNZ1V", "7TG3LLqWYn8ybpqAJaiop1bVb5mPWJSDhxPaLzUdF3M2", + "7trWtWjH3cGfSu8z6MgkqEEuCJWN5NhRZBYvbT841Yi5", + "7urBmScRfdSH9CpQ2SAwfmvGXp59nTDx6Bw16USJVvGa", + "7UUFbQSderHWPqu6BoezL27ymsgrBbXSi3qQHAozwDtP", + "7UZAaZTjnsFMze3RWtzpxTG1CiJenrvPixvVxW5xSicN", + "7vu7Q2d4uu9V4xnySHXieeyWvoNh37321kqTd2ATuoj6", + "7VV8eZcVAN79xoGL2eEAj5sXVbQEsiqiTCZcbjisjXUx", + "7VVYonADe1jj2LtKZMfTKNPiK7gjVRDsX7dvA4BXf9sc", + "7WgNDqtFHr1hLYo8wcw8X5uCnGwDSYQMz7MMKL6dyLLt", + "7WL3m8c51RELcLyq5AbEA4jzfdiXerGym3DsVH5LBgua", + "7wsxae1rHhA7x1329kfhGKzukq4Ujhw9D241ziBxdKY7", + "7x29aMXJ3kxxTXeU7ur7NpLFWCmedz7LFVo2oUqYA7tY", "7X3csFXUN2AZph83GC2FZCpkCTZXfVWssaJ72cpwG96w", + "7xhwT3FQqW88unUPfVQat5F3koUTPSujfwaT41bt51qc", "7yiFPLeyH2B2p3xGrr2Y5nmk8B52nEaa6j55Nk3u6648", "7yzVecfpWupdJwVQby3inMggGSotFSnSrhvPGPp6JGdU", + "7za9eW1e6a1zZ7RLCmE8uGJoNZ9xCC774QXM55pmUcLm", "836riBS2E6qfxjnrTkQdzD1JkFAoQDyUjTmzi38Gg84w", + "83PWQUxkBDrTJmJeFL8VUah6BK4p1JPGdXVuJC9Vf2Pk", "84BoHfEzq1FccYEMZPeuTwc68DQC7LS6Bm5dRMsKypA5", "84GZWtzfKYX1yfstmjA9eUEp3RnWys8DmsPjsd1ay7wv", "8641M19beXr6FB4zaf6GPYdLaV695xikBLYFYTVEBZdm", "86hxQGfre1vvVsYP7jPBka8ySrECZQewjG3MfxoCoBYf", "87VQhN7dUfS9wacre7vqRm561bNUk5PwUB8xmroc2yEw", + "88ms3Y6Z3pNaMrYY4zdUwHp5K12csjNeffomBnAdyaBr", "896tvc8WPdR33Q5XYZRxPQkRZaqUv4Mtr1kvFhYPFao1", + "89vXkL62kciXTe93joRCGiP6xFCHrNxYGJbshoWoTut8", "8a3rCvLRcSMGqQvzoVcHYuJptd2o9sK2rt2CKfTtbGnE", + "8AgqfNWYTzmtoxRAvqFB39Z5kdhoW6BV9hYjW8Rs8NvF", "8ASvX43WvDF4LHe2ioNUbn3A4ZRD9iC6rsxyjo5sVGLp", "8aZ5CJf9qYnQtT2XYuDKASj2pCiPwhWoNsc2rBqc9s1n", "8aZtHhTNFhVWp4fV3dUfBwsKKBjqzHDwpTZRbpeqo7vo", "8bn2BhTzfNEQoMyEPJ2q58hFzQY8GxPkLRFWHMWksLet", "8Bp1GmdCguUrbJbrhFDL11ZddgC83cPcbKEUbPPPmxsj", + "8caQuNVnmywtQnKWv6j8MzzJ8mrLwJkeGcKEtkQkoFZA", + "8Ce22R38MddAZSpEhLC38BqUEzVAcZh7h9MgfVCWibN3", + "8cRQ182d1Bai4z6BzPBUk4WsByphHgnYwWQ6aju3FeNh", "8dHEsm9aLBt7q6zu3ESfRXkS2eCwkbbzzynfd2QxDzms", "8Di25FopYs1crYkwwrwuVJhuEkGj2rSCjcGXyZeBGttK", "8diJdQj3y4QbkjfnXr95SXoktiJ1ad965ZkeFsmutfyz", + "8E9KWWqX1JMNu1YC3NptLA6M8cGqWRTccrF6T1FDnYRJ", + "8EN5Zfv3gX46oAguTj2Gp1b8rmmL2mocsHkT2QfgorVk", + "8EqtKHaSgPskksNFSC8oWzSMT2mdSMMtNjGZ7E3KHxSn", + "8faCuTioHxq7DYADQwQeAHaKXjqBzELCgUQBieXhmKGb", "8FaFEcUFgvJns6RAU4dso3aTm2qfzZMt2xXtSgCh3kn9", "8fLtWUfZSpAJk7h4XhvM6TqGjXQxiwzWkymxmGtJoGdu", "8FRFYPcwBan1KBKR6HuPy152L7pr3ePVYVxXXnWzPjEd", + "8GaMqVpXH7JuEs8D8bdXpe7ztUasAP3wdEXpyZZbUJeb", + "8GpsptdhGCGybKqEw19pVBZg3gMaopiKtRMVzJFBddfB", + "8guxGZ3yR7L2pBtXgoBnPpq2RE4GM5qvK8UaMG5YXds7", "8hD1AVXPYCCrSfZGQKc56Lt6zuSLrFxejLFRT1SN2oXC", "8HkuMwe42b1W1xvxhLoBGgtvM49FtxTFjk4JgDpLpbMq", "8hpUJeGB6BF1JTZcbiNEgw9w9fdQ8dEi8jF4ohapsq3h", "8hzgFMZG4WqaaMZ9H84J9fvcxkYDPYnTDFznx9mxHWhP", + "8igp2RrQ1F4drmXGpV8qNyJL25Aom31jAGJ55avPZLc7", "8iorF4s4S4NYYEMwGNJZqBbKoMePdoyYyrozanasEWyZ", "8itTkbGjHRAx3cum5TD7bXaubmEFGxmKxqe6STrVqLdy", "8iZ1Qk38z15xMW5ATSPbb42pC7FJdFj8NtbG7uosNdXF", + "8J4eQKseuoL1BT6rHSZHNG82U7gDorjy6JuqXqEQSswe", + "8jYnpEZcE9SUYPuaUXA4TMBWn57G1pPecRmT1fLssHqs", "8KKQ4QJ7JWAosHwL5pmjKpYWMNSxqtQjJVes2hQezNRQ", + "8KYQAb2TqCq4Tay6rLTVwWr6BfSMtSmn6E2qosc3xm1f", + "8L1k1DCCwRoZVEVYZcUzLht9SxUBhkNw9fU5PGnZfw9u", + "8LknwWtMatn1uRenrXYzJS7MxQJXZ245dqTuQiD7wZtq", + "8LkSKTrwqFgw1Knh4gc2BFYb98DnUJvQAxhT1BAFYh2p", "8LSwP5qYbmuUfKLGwi8XaKJnai9HyZAJTnBovyWebRfd", "8mK7gCU3DhAG3YQrSN2p3HDM9S9vSd6wddhHKsXyKHvv", + "8NEL8uj7R7ep4qce1BRAoHeETnCcAojXRxAjdzw2jRdv", "8nGsjSv92c2n35pPcxZUVytPTpD4WqnZ751MeH42whqZ", + "8NgvLoYGP7wyramK2gEzS4sj5UKpRVHZeTUSUvMPMna5", "8NndwQsrH4f6xF6DW1tt7ESMEJpKz346AGqURKMXcNhT", "8noQwzDhpb67yzfREDKvymKWtSdPZtbfjm3pxPYA4bag", "8nvD1CUE48WdcmRdvbyWcM5LdJKRTNP3tXT6Qp2CSND5", "8nvhPfZMet1yWpYzrEJcaXjc6AENvSC9AcwRrXJeMxhK", "8onJp7KyshoMcxVm5CemhPgGvA1hdSnHbcjLCvJidV8o", "8oRw7qpj6XgLGXYCDuNoTMCqoJnDd6A8LTpNyqApSfkA", + "8pf1LTFXYNmvB1esMKvqLq92KZaAt2ETe3pGNxqy2pc4", "8PTjAikKoAybKXcEPnDSoy8wSNNikUBJ1iKawJKQwXnB", "8pUJmSJo9WXWVz3uf9rUmvQAxnbuCF6gWNeebbjGdyD1", "8pWmLkuR3yio1Kcu1CqciTPmPMTiCf72h9n6Z1DmQNgk", + "8Q4HYJH7MfjsakrFG2F5skB9eFc5mpBL97iL7tfCjoPW", "8RsYRsi6f3hiK4EhyLS22Cy5KkrNbuidVYmsaYR1Xx78", + "8S4Xb96cH4sNrnKfMDHd4HR2bmjWbeUeo1o6yJC6ZGkY", + "8sJbSYEP7HtR1VGwobWNwrwFkjSMoPZU1hMkPzJoNApb", "8SQEcP4FaYQySktNQeyxF3w8pvArx3oMEh7fPrzkN9pu", "8t6UUXRkQTBpanRoMjxNxio1baXXkEdeLniCVJGMdzLJ", + "8TBZGt7C7QjyXia45cuaRp4rhQZ7zLTZXY5ktiu28U6V", "8tiCet13nwqVRtG1UbW5Lf4uuj33X16JnHPZssfvPXpE", "8tk7QMWkXBbzw9AJJtLkrdf8ZnEQMiWmgXx2prk4DoQv", + "8tSzNoKE2tHYdTpCQB4apHaes2YWhCjbo7J5XCv1ULZ1", "8uJR7fEaTiriYLe4YMFkeeADdRkexxm8jkFCGRjPBvJD", + "8uvRcrxAx5e6FRzLzobXupokSLwF31cPEkJV46LxyWuQ", "8Vvc6PNQVbGAXiHssGez6a9ovgu6eyEp8PiTijfzE7nX", "8wFK4fCAuDoAH1fsgou9yKZPqDMFtJUVoDdkZAAMuhyA", + "8WjzRz9HM2bWBopD6UPbi3swyRE5kk9aJojhn6fBKTJk", "8WqBgoVXkVggLVuvZuF5wP8taQpzTuKGoK6brU5s5Hh8", + "8xsMN2rQHdmZJ4T829PAYGU6hdUivRU8c8X7jH8zNqmg", "8XvoJswfYCxWf2WkUmNBjtDWwonmKugYhxBruNpSfS4a", "8ybtbfJ6rHeU49gtkQUBhAnaXBYGPdMk8dd4VCPmtbGz", + "8yHavC6E35srdiEfWMyQTbU3ahsVheYQpMLDGHzXfFqq", + "8yJz6fotCuDhpQ38Hn1qFyF9zigwF71ENtd9FwkMwwWW", "8yS3Zc45xptsaay9iaUSpfdb5gaKcQaKAShVvEUFKpeN", "8ZgmpBG5ixt4LVRQEK538hsKTsJBgmFFH5L6X5e9iPTd", "8zH1mRkic3WDpUkSgtq1geCXXh4CLVfLrEi2TEqdTgFS", "8ZjS3d1bQihC3p5voM8by2xi5PoBNxzTJtaQ9rvxUbbB", + "8zYLLHSU8URmRaAyWEY2H7uqUF63uezRCYpzFFkMG1AX", + "911tr1Hifn3z2opEsEEhxFQuJzp1YNM9QMkBQspJviWz", + "91K6thzfVGAQJZkdwEdMYDA7sWL3QJ2Bm3PRXHXkq44R", "92ZDWNRurKikxrCQcfR9jMMYmqWksgTvSFFJ2Pa5FsMv", + "93E7eWXX8pVKLSrbBx13VpvDtvSU5PJs464uPoty9VeK", + "93g68j8QB4ZWAtEbvL6kfy1X6k2izXosDiuCfPPPYdjx", + "94Pk8zSFvQTvrkwBkMEHzjufx53w3kX6MymDx2ayH45e", + "955xvVJ18xqHdshcMdvZLM246fVABJn7Q8DfSEpyCeLe", "97vF6NK1NgmvMunNw9QL6ne9wxzUQ5RLAJqWxmDSkKuH", + "999vPueFgE7LEjk8awARTr1MVN5MMCAhaMph31EHPwfn", "99NHmMDJeSo1AM8dg32nTokVRXByoJuA2gjDUDfiKHem", + "9A1JePcV94NNZSPvzWvRGqiHhPwh9PVGytV9HyEdxHbc", "9A7aYiEPK5ymzPjniabXofR7jyEyihGAANakUf5AALT7", "9a95Qsywtw2LKK9GkiR9dpTnoPGMVrLBLTAMPnaUwDjY", "9B4oF52Web2deG8jdbNbBuM8DjiFyLYM6CTid3dmQkQ6", "9c2aGPBPGbzw1yeweN1TvC24uEV5oUaGvWfFNJif6npa", + "9CjCwpFfvex43ZrxC8iW26y34PsRbDsF3Y5fnf9iQTdR", + "9CpQtpHJ7UrsT6R27RECtE4dWWBAVnTcCTXj5HkbGJQC", "9cZua5prTSEfednQQc9RkEPpbKDCh1AwnTzv3hE1eq3i", "9DBdgP1ggAWJULBJXBPMzWvA539BhGVgTuTfebXhfynG", "9dCMmPpfNuWKyZ2D1iRstMCc1rhr2DbHVFrZ9wFncQjp", + "9DgTEERummZyV6MVSTmC8A9ZULgnN5Yh7VHjP2PADrws", "9dSTVY7hXEJsqExDcD8vYMAZpJ5mt9HBMPwRe94nBwny", "9fMqL641B7nQZ1xktU35qXuFESxMB7pkqmqVANtovzmE", + "9GERkwr654jBUn8cvDydFwnTZ6v4MZbyvp9ZKhRep3wU", + "9GGe8sYSRWG2QSdf14V5cKT3wcFgU9C9114BAVVPvHwF", + "9GMmVYJBw5Cj58P8QtXtesyQUtA9GyecPb6kCki7QSo5", "9gmnbM2GUVXiTfCg1Pj3ZTJdqyKdS81kjBWwwnZbS4MR", + "9gUMvQ8peCVhxU8ut4eyfzyTZZmvBUVDWw3s492yWNYC", + "9hedZ9TnXRLHipwYnuD8DdyvAwE7sPs9qdqNwjWvV3YD", "9jAhC6dhjVqVA184dVczcBAar2GtXT7D7LwtXxLji3Re", "9JKQiQqWkkUKHqnR73MmZ3kdiqQt7d3bEvy81Y5rv6k9", + "9jpddNRkSJTpD5GJFXocmLsP8JUasJzpwgKrHrLtA8a3", + "9JvKbbmSH4T9MuHfpWmb5osoQ59dSnjXzWbS57N9r3bY", + "9KCFj7pL3hzyCzhgiy1Z9nMxT5mkNBgm2QjfbX4nXBPi", + "9KJyBBRfCt29mR21aP2NZHuyvZnf1VjSSB55WPExRgSJ", + "9kkpTAQfndU5SW5iVbG4j1qngoUh59Jwqndd3XpkBzzm", + "9kKpZomqGpNYRPa3A9o7s2SKZVeHKFCWGt3GdXxbbymR", "9kUAkfKvczyRJMn3cRz7SVnbotSdiTVyCFXkX6qeXmXC", "9mbQ9mrRjBGiUVnz9Tdf7PuAkWomw3GLZ6Sup3R93Gw8", "9me8oFZvWuc9cjBuXiW8YDGGZgkitk9GTKYTNCHaBQaF", "9miqenD7FrGa3a4NNP6ygmYbpxtcAmW3AukuTUbAgG59", + "9MRUTN19MtA1matBH4ddgpS14mPAdeCoFnsLkaLxFeBQ", + "9ne3MLzrzESX4fa7cSyRvFsS9EsNCVfPRDgAXD1RgVT2", "9nwweyAkLSXutqy13N54c4DMyvgehnkoa72EiwtnBqzB", + "9oG814Uhivn77HToA3V4M755B6Sthx6aXf6jDG7Bwjh6", "9oKrJ9iiEnCC7bewcRFbcdo4LKL2PhUEqcu8gH2eDbVM", + "9oWDUVn41kNZuVCQBr563sgbLXGvZULKuMr74w7NSkz3", "9Paysbs5evoh9BiWiS77NNutMCG9koUK2xyAsJm89Rfh", "9pHNBdibr5ukpX28foKK3UfCMeaB5GyAuGcHyJ5DmUAJ", "9PqR63RosK5siiSNvHtQMyEKr3CvJt1jh2qxoVmghhst", "9pZZWsvdWsYiWSrt13MrxCuSigDcKfBzmc58HBfoZuwn", "9Q8xe8KgzVf2tKwdXgjNaYdJwvChihmjhcHdae7c4jPb", "9qpfxCUAPyaYPchHgRGXmNDDhPiNmJR39Lfx4A49Uh1P", + "9qrjiQG33wuqBGd9eWBevemxuw7FkY5osCxwYQt6SmhU", "9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv", + "9R7NanMZerZE5kwyfZ4hxo3C5aCNB3x463i5XtLqVEQN", + "9rGfXDukY86MrUcxZNGq3nTrUaQiE917DMQ2EFW1cbDL", + "9RLnzRod7LWYb3nemb75vKhEBSsGqS1uHeuqh8Xuz9B2", + "9RUxQquaeSkuwb2qFqenPw63qXLypEwMwUVNaGHzDifF", + "9SHZFX3LEuL9dRpCgiETWfakZU1ZaXiw7aaeTzgkDzEJ", "9StH5W5oei8tNy8vrFeu2443Bz52Xr1w7NBmes1xnd2N", + "9SXpQRC2veMSkTRY1G2vLktNgc3Bbw4Nkg4xK1a1aVjH", "9TA34Aso9JfisCAsdqtpJ6cukxhDdqyE5xSYYvxpCM6H", + "9tbzUabDi5D62Kkpd6oQs9r28Ts7TFJHLvx3pFJshZRA", "9tJ8BjHCSYxVMJZNvFLRJj39QPssKoTc2h7Fev9xXTPD", "9U4fqWRd3kcUHEX2jt1kFwF2dSXLnz9RA6B9W656Skbv", - "9uEazQxpRTyYX1hHMMySDBWad7zb54K9PkHKeZemK2m7", "9UfKWtaruM2whJNqLLcrxKrSuS3VcVssdbTyNvfQCUpg", + "9US59KW8j31mxr1opP4fbg2j86b2p88DDKhcSeyDznnA", + "9v7E6oEm1V86hjTubtBon7cRYPvQriWZKHZEX6j92Po4", "9wAkySUz2KihVRMUs86DcTNqPqtdNYVHMwFxoH7xbbiM", "9waRyqWAoP68etU17DdWamgpTnPb3skY7Er9kRZMzCfS", + "9wSnV8sujWbGrg6PL9uoRLGPiWRL33VMqHBMcLYeCAfh", + "9WXjR7Ea8hKt6Z84EGENQvGR3rFsovcxDYu61TJFcWJ", + "9X1qjnyb5CfMkGfEnuRZS3G58iyzbNZCp27RpiRVAiV7", "9xe4rcxYUe6iADdnvLkWn8K26bvyWgfrp9HYbtwR2sPs", + "9YHpZqGdwED2uAxZbgixESvavajvuHyVZJKbVBevjitB", + "9Yp7sEu3ecy31pKgQkCxrUWMsXiorGsCmxPG8FNwnFuN", "9yTy5duSRgxznU3mAfQp29qu7Q6sEQ5B54CedveiM8ru", + "9YVpEeZf8uBoUtzCFC6SSFDDqPt16uKFubNhLvGxeUDy", "9zkU8suQBdhZVax2DSGNAnyEhEzfEELvA25CJhy5uwnW", + "9Zqcgqref1GnwwPNWcXaK88qib5hKqRMaoQ4257tvBpG", "9ZTaH1QLsw9dcdkG2ZbtdbfzrNVyjmM3Vqxp16PDU8yX", + "A1ieLrRfZyrRQ64RoGVyVQ6zqRhnQKQutm6kRGRPg6ma", "A1voPbfnmCq8UBNQTBKnZ3Xbhs2x4cS2Gx2b2wJtqCh1", "A2wYfDhhcoEvm3M3oehAtFdcwdZcxWvWAnk4yrpzbSS9", "A3zoxWHVyqHui8y3Z4rKyqWJTyr78tusgAEpAtr4ZEfg", + "A4Bz67GutEFuHpoLLqfvqjU4PgwKkff4uNjEXXUomm6z", + "A4Kg15NX9i72WeQiH3Gp4u6QceScodz7CrkVdD2xhtws", + "A4xoiWbs1GmkV4p4PXkBZWM5UDfJqXx8z2sDmHP8FmG3", "A5hMwgm8QfooAuCMw9Rw2S9vXbBwCknFMhhUwKKHvYeJ", "A5mV39Cb5d6gbrssHvaYjGuurWpUnRGfMMtST5EKxx5m", "A5oH9BPo6PRnEHmLnhtyN2YXELnbTotEUDB8axHVBcY4", "A5TXyVrR7WwfNf2RjoN1W4Dw5CuuMDiLV9e77pWhmwAP", + "A6RXanjfgm9ivaGUFvjDHeSAe6BXYgJsX58UpiNF7TXe", + "A7zCq95mtG2enn2zWxNyVDvhU2EsH8T9oWHs6jV3rtCH", "A8Lv2ZPKKSBFiAiepFsmCBvWEBSVGzuKxSLVt9z62Bqt", "A91g1Y8xXFEvCGg9afjTn222JDuY7iSVmSg4fdbQEB21", "A9CwddX4BA8AgPCmcHKAEZU4JDFRzruMFytr9oo5ZzPv", + "A9qeyUzZoNXJQPe3fd3QgDujekiLg9Fd4VLX9UsSzAn6", "A9XUvhm5yKVs9Z3tYdyiAYRx9mNr2rqnv2VkY8D1N4uZ", "AbF88hkkpZ28VaT3vYn4xu5CeNC8G6Dq9cc8ciRR4fY5", + "AbnagVJhwwM4wDuZbvoxWeofdpSWoDMhcmZCdCrxtCkN", + "ABUhDLm3Y8HyLsmua9Xj9on87RyiEsw5j5eVVZQVw1hT", + "ABZonS7EB42XZFg66md9KnU6Tb576jGvxu9AxA4fFuQz", "AcjhWohnu7vYMdu4Yha63XZupqMKVVnrWmt1F57ScXhG", "ACPgwKgncgFAm8goFj4dJ5e5mcH3tRy646f7zYPaWEzc", "ACv5dTk7THbmUpHYGhgPzMhWr7oqHSkuPJpa5RfvmG5H", + "ADCqmaUWXbShzh8GJN1WeaUzy16y2yYEdodPDcru2RUN", + "AdKv3SFdhSZDfjUVNKmLAkWE4NPa9sudvkJ7HG1WYm9i", + "ADriSmPTSeyKwNCo3geTcAY31G94mHmCfRfrJMe3DmbV", + "AdVKEVMZSd6VZ53PYbw3PSaj4XzDjsNoEg9LwDnyWRE8", "AEPNDgaApdcfZEZpww458Az9i2NZrwxsVCdiUih4EaRh", + "AF3h2gdkGYndVj8W9qQN8jA45kQ5RB2WmoAQN2iBk37c", "Ag51oCkwGy5rkbGEYrcP9GDjvFGMJrEdLxvedLSTSR15", + "AGCsyz64NLvoDAG7Mi7k3WFbkMjRDCv158Q99WGGvKNM", "AgFQkQe2Em2GUkDD85qPmHrvybnaXKMa7anSNdCunnM4", "Ah5arzkbkHTMkzUaD5DiCAC1rzxqPgyQDFTnw8Krwz1V", + "AhT4yWiSg7nnEWQokWoGDz9QPURwa9sEHrPkidC2PK26", "AicKhNhJmkdqafRDjKLPgVqLzXLzJ8pS6aVrYrRkq1iq", + "AiPN5MwTHxRjG4eTQ1nrmxERRj4oXJURHPiTcNpVYcmk", "AiWqv1dqsbvkUMec7G4DmM88ka7SaoqDPkn5U2iuvqnn", "Aj6cb5bk1JbCYSHzNrV7xCzECWZY6Ys3VRTNw6vx2XWp", + "AjTrfjYY2SiTC5pLJXwNXpcP8q549YQ9VrPAxzjqjUaW", + "AK5hfHFusiS2y5cjqZkiyUyAvH5qfidQgrmCccENnet5", "AK7ZZx2sdo39coZN5FsPdae2xNGqVKHX2TWJixmY4ecX", "AkkJv1meyo2Ax2XTXEXWpvHTh4F8a68Lja5dx3TaX47K", "AKqueA5Vfmf6BWTXuPdWxrYCDNPGi5gDLrNpdc1CSEzy", "AkVMbJq8pqKEe87uFaxjmt35tX2cNhUJTJwv13iioHu7", + "ALPXVb1A7C8EkR7NuKy16pXcBRasdeRNmRPnWGQHpe7j", + "ALzqkbSgVaQz9nn5xh1BtEsey57otKRyGmaSLhwphYSn", "AM6BNu2WZibZhYYHNo9ZWxmEAB7PhjNQBGKAhhN2VrFt", "AmNRXJSSaGJrXaYBahD1PBoBR2ApkfLBAJ7SiRK4772R", "AnMbQV5XNUEwC8HYX5wGkfmM5vo6eCkFUH29PvjtUHWM", "Aom2EwxRjtcCZBDwqvaZiEZDPgmw4AsPQGVrsLa2srCg", "AoUwfPuiEek2thVRDhMP7HbQb9rguyab4rDiz2NAfwwA", + "AP2ZiF3mdoDsVd9AdzJWRUb5UHHoQjnLLPXn8rrkERGc", + "APjVTcfzJSzYEkBddGFN1mtFWb8jDzogpgUz99tQW3Ei", + "Aq8yWGbM9uA25KDKsU9KPwoPEuquP5vTqrYomh8VK9XL", + "Aqh2c1x2AA59pek7pz8PymDXzq62qmNiQ5GXhpWq3rNr", "AqhSZXj5TePD1E7d7VrQ5JhBdoa67Kz87kLQXPcfbwaw", + "AR5Lgk9sgoz69qGBeeTiMyyxZdhvCi2qkD3XUzre1Uvh", "ArpeD4LKYgza1o6aR5xNTQX3hxeik8URxWNQVpA8wirV", "AS3rwVs9WR8HTzN7GA4aLBs3JjWjt1yKHfSzmwoqp2bY", "AsvCrBKwz9Lj44Tp5zkhZabzR4bxE3HLTobrRRGQozcn", "AT2N17bBBtTAu6ombzhiLNLc8JinjMXmGMzFbxt6AvwC", "AtHoh3UapTWEBsygTDXNQgphTHcqTj9g4RFpbvPTBDND", + "ATzLEsaydfvFtNBxu71zBVpLAtdszhbKjCN5fdStJbUZ", + "AVAMqDmPX4qjDZYc71Hdh2ZtjhGrGsT1yv86hAFVNt9u", "AX44APbDNnr1J2wqa9yRQyuaiJ9NWmuEjyK51LiBWZWt", "AxBt2PSuKyxj4muiCyNqByX9uwMG95CFGea8LcacBwPN", "AXwviAZcTWRS2Et6BuFd2dEVhXFWpXi2et7bBN9CABcD", + "AydLFeGXYfMTqFDtcDVrVBRijeoHa1odeqHm7is7HuuM", + "AYEHTBfsPvdGxkCnrMHEu1nTziUJB8Qnhjktph5aQvrw", + "AyvS9yc8cuHM43EekkAd3kx25iGZvq9axPhHPvzre2Ym", + "AYxCoguM1XJXcd5e1bYVQB3Tdtu3vnT5iubjxgwvzNK6", "AyXMWbdxpvDoeJmueCBA3B4w9VURpiQu6pbjrwM2z3kR", "AZFkNiUSszcpsTSAmCWFTcLPe7iQf6sGp4ceV72JiCdt", "AZq4Y8BQYmqfav53bWKhqNRV3X34UJuybPXADVi67AGg", + "AZY3mmLS39SKps93TrsZoC9nT1nUrpYLUmQWzbtgyF4t", + "B1Yd287CKFxZnZXMvNjbM9V61kjW1agupihzR2xoMWBt", "B21L2hCrdE4SDhRi2fHKohfSUNAhoLeaWfBp1C9HdF6Y", + "B2UcYy4WiS1fSYKbMPeAKZoCEzgfQKKt5QBAA8NXLvpZ", "B4xFUYq2TDmz6PsiCj29vFyvmYvqTRAtnhQ3uSmxQVFd", + "B4ytRaLo8YysM3KHvqPsX6NktfoeU79jaQ3bsZX7CGSc", "B52Da5MCyTcyVJEsR9RUnbf715YuBAJMxCEEPzyZXgvY", "B53tbis1864ZZqg9BBdXFAkDponmVvKHwHySAY2tC8gy", "B5FNFrfrrfpBggFBp9h6Js93mrz1Tham1z1GgP3bDgNc", @@ -430,90 +767,179 @@ solana_sdk::pubkeys!( "B6PJC25oDXqzS1hhnx5RLTp6SAdQvctrHF4Juyi4pCWE", "B6ZramCQhQWcq4Vxo3H5y1MYvo37wRZiAwKk88qaYNiF", "B75YnUyemn7ixtnUtq4cDUVKrFwQmn8J2Er85ypcEJ1c", + "B7QNbMjAsaZDvNLVaBXo6Z4Wg7tKcESqPY9tQrSefvBy", "B8T2dbvYaM4gJfpLLbuBScuWKFhnG6KspRK72v5D5uoK", "B8wQDRb5JLuXjJhAtmY1MAQtLjWQySberTN7wLUHmP2B", "B9FoWJeAVrDGZbSzWYxsbwGJKrwqMu6vqJHr6JmRdCx3", "B9gJJ4vMLJvnb5geZjU9PqhkyHX4jESMYajfcALQgRry", + "B9jApJmpNnEpyweF1AApDumzJV1tYxqwqREUt3pJPnJm", + "BAFFC9FVYiK5sezrZc2Az9YZyTaeawXvKLG86FMbakxb", "Bbe9EKucmRtJr2J4dd5Eb5ybQmY7Fm7jYxKXxmmkLFsu", "BBvV71ncxGrMDjmKTkcvcjipzu3bv6ExyVPPuRxAPtrC", "Bdd4XhquueXBB7aZXVYUn1XBdJ18G7Wx3LUe6aKkmXEV", + "BdVXDJ15krsj829E7Gwou8McVYmVKTsPX3EcoBWPoHmM", "BebUNmLyM62d4BgE8N88YsJPygWrCWSNaCeq5s2U8uzC", + "BEFD2nMciBXpk43V8LPQ5D8NAedUzTswMD7JrXjQpQBP", + "BEq8K2LHtQdNGGURvpUxbbcutFHWQ1YATAvujMz5ju51", + "BerdkMBXBVjUoKUkAuRn4DbadZpxFCB5mSBDadr8GErq", + "BeTwHdCoNhXWBeKnGxJpt7VEbkuzJnwGdDG3ctpBgmPb", "BF1f26A9FdL6uWSajjTfstnLdCpynXGVrAEzqUyKXJKd", + "BfFiJgrPfecVSMTn1ma9UWMbqcFMftrxzgVp63TFWvV9", + "BfjmopwTknigm38Rj2synkw7mNTjgmm6hsCCb1hQetAK", + "Bg5j1Qfa2SEmio9U3UQKMtERNEjpqvushEmJMAi3SG4W", + "bG6V4uMwXbDvvGessojvShNU2YwUoWy191W6Yr4Jq8t", + "Bgp1oskwwj8mQe94U1Qn59BLEbqRDk1qRgD54bz2fTcr", + "BgrS49ViePLQ84qwwSc5FqC9rpfo78jUUZU4Qbmsva8J", + "Bha4mjHBAS1yFjvjPWWY7ht3jMneu4Lezq219pdU9dFu", "BhbARoxdh2MT3vb4awXraZFPzSwBdmF9pGgURKNsjBqC", "BHR2K2tpc1fowNyUf4PfAumc2tfaT2SpvQVqmmpuN6tF", + "bj53eWLx461E3m27qmHGtJE4NZxefhvZUoewioSavqH", + "BJDPVva3kGqpwRnsPFNw7pJgwdaVwqLTFLgbeuBWiiMa", + "BkPf8F1gAeZ4kPmTBSNgg1r3vnYi2NsU1Vfv3swkyBMB", + "BKQq7feS56yp1PvAcBQjb1zV2XYtASm8EGTLy7eGq3bN", "BkTtw74AC3rDKUbFboQaRVnhLEhsUrchotzSvuweaUCH", + "BkZue4zsibrhA39YBGs1HLgvDiv28N3nN6yKLJU97Sj5", "BLfpk2WoF8RnerCqjxHe7qFJjpEE427riMkf2fS6vUB6", + "BLWxzv9mGX3DLam8z59A55qDpF9KyMEt8krFf88Sacm8", "BLZtwHMTMgnZJdhJQxQaksgJgteXFFDBrA13ywWagji4", + "BmCFZq2tQ3zj3qY1pjK8iegUp2TAHj6cYPM2vJkSA84d", + "BmytjrTNhPLdJg1TUmtt6JtapA66vWgEm2CQFsG6Q7M5", + "BmztoE61fB71cg7uV33DWBDvEP5JLNWJbJxvPDfJ3eNN", "BMZY98zbjg2ey4XNfhBQXhEuvVqzaJ1T3AKD2quL3wnK", + "BN1dPUuuyfujPUcNaRJPquHTuqnxGfY2oeJUV28wTWMR", "Bo9T1z62GVKmnttMz4HxPPtRXs2BUkAd7T7yUsKyG4iA", + "Bodj2iMEYUdwu1rt5EugntmwdqBsuR7j4XxBJR7TVffE", + "BPL97jdKyszDH2xxWzgS3QVVwuYqHaWvhsPxNJPg6Nn3", "Bpq5BM15n4ps9zftpAiJARqVmAfUsmjSKfMQN7yEZARe", + "BpsPu22aaozdVrZu8kcqs7rrURKPH3fBAPnMF3oLG2Ea", "BQ7mx4ScgjetA378LnL1Nm3xiM9bbLuEsX7UKxPseRCU", + "BqGCBrgYpLv62ebUp7DKfnjvSJ2qBc783kehzKJDERbv", + "BQmxWxDnbLEQ3Pr9upNnaeiMV88K77JiXVUNoSHtYjPB", + "Brjpw4BpNaAw8jvaXmVEBpGYzKfAQi4HdtshD1a1KDLt", + "BrmBMYWThXPvWmRKt89FsZScadEGt7fy1FRV2QUArpwL", "BSF2yD9mqzaixDaLEraF1en82EWaXx7wbaCqSuKppqG5", + "BT5bANJXEmnacdBHiVWCMGWckJEUBU7VRiVMiLmA65JN", + "bT7ZZEREYCpTB1LxvGyBLyjviviZNVfejSfa4R68GmE", + "BtHbnzt6SDwyKpzQFUS4ReNEGrL7YYwhuRoMdV1zM1gZ", + "BtNTPnJo2YWQhiaSQtNnCeTz7XDWUARRLSshe1smVmx5", "BTPUdVrsgfKFPyBmy5ozHvzMk1QCK9vii79wgxtGhjgn", + "BTVmKrqHyQU5vSqExPNYeozop6JPMCgCCZkVfCMKEa6w", + "BuFKun14y2uagDQXKou6x4ArWuG46US7X8TEEJVvSMTq", "BunxTHgkSEyHHMikCe9ofDB5dsgcXKN6nqKC5WQsd1op", "BV8sS1jn1AvGAptY5TxNZdcm7aa49MZCXSpXQjzjdnYG", + "BVEX3B7fRUbadEcigqknwc3cM2CUXpyz9vtTccBpwt7r", + "BvJSLTtfFz2Qt1MLDJqfEtMw8uGtiB99nthg2aWGXBPP", "BVLVnUm2tkgX1sK2f5oVtTMR1opGra72s4qBD9LjMd9Q", + "BVSbFiadwxwi7RajrGgx4KAuwY7f3s6sB5AaL9pGZ4Hh", "BvVKJxCQNsFWpB5o1B6To4uZzqAUgNXLcfbS2inyL5XU", "BWBZDvXHUsm5WUhDBzA65TqMF2CNSCtow2M7tfAtJA2C", "BWiftESMUsve87rkjU7HsaA7fkiJRAbv3xZLQrmKZtnz", + "BxFCg1xrZukbRHhthKXRY1gZVnHvu1xntdg1YmYnqeE7", + "BXjzAqxtjEnUCczhNzoK4hYuiH8o7oB7keMPaSJXKpD8", + "BxLUkNxbwARzCsroVyFSniLBx9FirpQJviXUiB1ZpBXQ", + "BXWqrL3ZuDU3fJ2qbBGmmMagLBLgWv65YbXxqEat3Ud6", + "By8BBocWV2yLsEMfkfHk7JkCmg3wjh8hPyKF4kd5nTrZ", "BYxEvmSwA1o3vJGhooJoReSjrzou8A9h1TZX9S4xomHP", + "BZCeZyvroBrSq2SbwjQKU41kcxkutUti1A7rzZqCfaHK", + "BZehsFmSj4iUhwxHNqJVzduQQiaLqhzfE5mNT92UZf55", "BzwAtaU5sqkd2wALkiR7A7TimJzN2Df6vr81tMWmiRky", "C1QUyFjgVeG2mNBHErtmCLz8BUqS38saMUz8KA8r7921", "C3hD8Q7dLoodUm6E6LTWR3XqJgcRrvrVaMscwMBV6vaV", "C4eTa4tqvvzpTsp9pa5NKAbeDXJs6sHWS5BfxGB44Xex", + "C4N1bMSzbfDwHGMitxyufNZPaAkNYx8vJxHRnHWrptAT", "C5HvMeXdHGxi7nVTFPF6KcyK77RSWLLvEEB3ParXoK1F", "C8VJytJbZM7KFMXHNUdoF4V7V2QbhkxNs1qYybRoqUEK", "C8x8gRPxVQd1rk9VG7fm5MqtbPkTF7C9R7NUvb8HJ6xo", + "C9UahsjNtQao74K3zYdJdkGrhfcn7Rf1szmMtUP6fSRf", "Ca8DQQagVHeUAhWPWxGCmaMuccr6aGsm9HxeedxUKBC7", + "CaCaErMi1TtrTLLv9jaM9VVG95Tva2keMe8BZSuji3D8", + "CahiUdyge1w7Z4GrsXNpVYamxj7pJoY7uSThHC1LCBPE", + "CamZtw5WThJCqyBYuoBVN2yLBg2CjCin6SD9DBmcQWDv", "CaT9dSx37Quj1kcAXEVd6ncM6NLvYUSqhtgEnn1JtNKC", "CbhvvtosVdVwZ8GVrBqgYT3JrXLh8JRqgKpimhnZw31h", + "CCM98AN1SENGvAF6mNvYsCvQ8SPbommtAdxSLdYwdt39", "CcorN3BoG1XMZehZ9Xib9YLo4mcvo7pzeVurC28gYYqX", "Ccq6zHdtv3DWCP4AccTi4Ya2xPGsEVHSfoPmQ1qffb8H", + "CcZTCsb1HhptTfLCv8PXLApzVzhfvXi1MYUnmyiWMsY7", "CdgoKJdFPyqLXNmTFjiXSyrUefmUnjhdQr2kpTvDBfe9", + "cDK4eZakyrZPT4fdhpPUt8q4EekNEwwGEz84LFnQb2S", + "CEMuwgTq1TXoTvdFjuMYfTu8Rnvo8HVUbKGquAsiLCXs", "CfhJ45Zx4Jod7LydzyrDyrywhmreZMWMZaT4p8YtdaWk", "CFtGf5wQ7jPgJVSk4GiVxvqVZXfkpxzdnkFJGduUKA88", + "CG7zvuaN3PTuQN9tFNoE5jERxtYwg8YVuKE5CMYD2jp1", "CGgiEmA5whBdjKKyJVgFEBe2Z2qDVQQd2rMvAaUJP6Yt", + "CgieNgKQw629VPiovu6iHbtSQiCUt7jKBWnK586LHqTa", "ChLMXZ4KXsMpa8W1VymMxim1vdPSK5a1jDwfMbm7cycT", "ChorusM5BVgnAKbg9PF15285LkqeCoZWK2p9s35T7J2A", + "CHpj822jTX22VcSqzksxkJLB8kBf5gDMCqYbgXv36dvN", "CiY5RjWPs1XyegKyBLcG7Ue7YMf98eiEnmvqnSuSKbob", "CjhKCjNC1WUgBjAGst3D2XmSsWHvt3zGasasmogPTY6J", "Ck3SxoXUShtXfLKfUUXAtCwrFVsEohESJfWGWuSgtTQU", + "CK9yiW9cCVkJGs9qB2SZnXUJ9Q5btmrGWp6KuomttDXo", + "CKm2wXrhngjLRULcxx78k89VnpGvpAiRJ3DJaF9tZQTz", "CKs5FjmJ8qx2o5gzCJukb9Q6Z4TEJ7ogJjuA1Fch4bwA", "CKYDvsLjwp6tPXfYxLmBv9LtX3FPpaAPj8kw1Gqy7s8Y", "CM1c6z3pRNgHFcfZG4z3wE31jaR8c4gCYQBJVEoCUyq8", + "CMFZtuwCGnXbnARnNx9JrrAXhGHjbGEis9ajFwYPGqCs", + "Cmg9ZbuT5pR8o3CBLo4iwHCMxWzd21ZyNAoLH1sAwHxh", + "Cn23bN3TiQGAFiByaJxp8aE7suXJaarQsB321wJqEAJM", + "Cn5H2oxjXemT13eeFU45gobRYiJrjCrhGaqKTMd66SZM", + "CNVw7suEhz3LJFDzN1sjin1MScbjRPWB9yZ3tNT5QrB6", + "Cnw2PuZHpJjpLd1ZxxPxetuLiXHniZjjYMGhQpJYqRBU", "Co7UqfqzXzTjhBwvam3zhNi4p8dKtdSrfh6rQykoNMy7", + "CoCKdrHVE1bjMZDwP8Z16vd7U3E5tGyt1hLw4tTsysmU", + "CPi7yFjqm8MiLFJpdyfWowAgQex4DjJxxHcLa2rYZ2XZ", + "Cpp6qidLhCztkpTroSD8jwF3ZToYYmvLX2PdTs91bgqN", "CPPVEbGFbX3XAThetvfveCE1vYLWUwwJGT7DxkPAWb8D", "CRCp8aHuYiUVfWMn8dG7z1T7SMi448ruuP9n1e6NEMmt", "CS1Q8yNkw6a8SmY4nJ1jKrqhaDo18Wr4CnNbwsvKoswC", "CsaAGRau3ZvyMQvJ9CWSqbqeVv9zw2Am8FhnL9sr6jTk", + "CsiPaAm9Zr9EpGojSNjQ92h1kcKQvYubiKmpWb3C8B5w", "Csrv9JCbebTKu1uBWqkfwuPHwVCXsYDrQmeXf19onbsY", "CtxU5HwVbgspJVtWxwjuP8wXUMdkjYJ4EJwJ3jvZh4zu", "CUdHsUm5eaCZRctJo4HayvvYeh3AE3wCrNYXvMtVniBc", + "Cui1rpu4rp2f53vzjzGGo55DHvgf5HQVGqNbyRwjWMXc", "CV3F19YAhoW7DpfHQ5W9t2Zomb9h21NRi8k6hCA36Sk6", "CVtdVkbhutoU29LZKuDM12EUZmmdVZABmPk2CYnjq7yn", + "CvwM8CJEGbZhC3dVnVtmBAmzirSjAvQJ78yEvsqWJkPh", + "CWL6skWfKLDd6SY7NnkjfMgNR1QxHhxCadyFNL1ssNaS", "CWm26qFBekyTtUYg6ZLfJi2ePMLCdXjGkZkhFkXqJfrn", + "CXvTFeiDYsmHfUXyYgm5byEwM4T9T1Pcu5qRiTfKdsho", + "CYx1c425b2sZoEmFxLn92cb3QFvTY5umgmuwBiiFZDZr", "CZWpCTN4rCWer8fm5ZqFdx82CDiCJjZLKZ5Ti2gdmchQ", "CZY1ZJAUyD2ZfHE5ENChUmhqSVFwPnTm6Aq6N5tbBqaP", + "D17iHRzwBk5NFzAEiUb5JqhaqDUM269utRqduzMxcTT7", + "D1oFvnJJFoZAxUPDMUw7dsBgy5c7A1at8PEGiT6XEthk", + "D22jrsNWZG6qzTURrT5PM6QvUBt4vHLYE7dx4Frt5wNu", "D23NCAVxinE53BTemguZCheAqCdMGfNTUzWdoWvq4Xj", "D2NjDkcv8Y1dWGdtWAKPT4em2D3sYzM8AzMTpCG1RVf7", "D2PNC1USZ1XZ7mZPgdLjAvbDnaWBx59WKKM3CxjAqyu9", "D4kv7YbigKSHMbCpzLGyP8SywUAPw7Kvn5VdHkx2gNom", + "D4pLf3e7kDGC4yc156Mb9A7JSAYnFH63jjsvAkfguqZB", "D4xPTEsWkU3zN5bDyTX9zgqnAhMoMccnCRGRLwSfo1We", "D52Q6Ap8RVMw1EvJYTdEABP6M5SPg98aToMcqw7KVLD9", "D5JqF3qkLkeJKKEi145oMseEGc1ym9cWKtBKtg4ZBBnN", "D6beCFAZeFtXoZKio6JZV1GUmJ99Nz4XhtxMePFvuJWN", + "D6q1Ju9mSkJLokBE3q4ujrCf3TdLYLLRC95Tatf7TViC", "D6svmbCCUDFYmw8burYWAJwBq3e3Cdp9wiLdfNZ4SLus", "D71JRzjPpHipt8NAWnWb3yZoXezbkGXqSf7TVCir6wvT", + "D8goKEZAXaWCfVSFbGKZQtFn3B5XFdLLmJgUSgMjEJf7", + "D8P3w7GQ4zTYbJfEGgfdQWQ1vrL6umGYAUrMz4hBJjrN", "D9rCbP5rBrJztzv2EaACNt2LhXVLpPmsNgcWyB6LdfWW", "D9YkGDRwdQaPXZe6V6WwWUeTWwfSoNmXADf1GXePetpC", + "DA7SNDUGAHwcXxHoUhbPqTv2p8GnncMpRYYoT6eJKmSR", "DaB3ZwVtGLzSjazk5STQEu3MkJR2nkK3tDdCPAvx9QpM", "DadnDZbFH5BHHRHD7TaobaSQ7QATXgvWegHUcZ7ZGzmW", + "DahDt2bS4EWgf546qQm8PLiRZuZPGeUKD42urQJCBYJS", "Db2V7nPHc4sPHne87nYXPGn8Kv8rMsiWCAjgAXmpqcpC", "Db5FG9D5Z1WWDSvQioKkymwRiTTGcTHbryBniRqYE65G", "DbzdjE8TFSN1Zb4g3N9NsgFrzJ68G5WKtgSxqVox7Nxr", "DciwdVV1DXimdsgRGQuQ45zYVjZNaof6a6EZ1JjaCsvx", "DctYdX8c3qBZ7RUtYE4Ffunjv5SYFxVde4H4BDejPzMG", + "dCVbRnKNv3q9aBDMyvLRBYuWWt6tpEWjpucnZuJ2rAd", "DD89H8QdPyWGtR5QnrfM734G4qrD775HFGMobyrkHjn9", + "DEqpqWRASZVoDVMQc6NpNjJbiY5KxupgiUXWCsc4TUim", + "DeXED9nidMNJJa3ie4ZxC6XAw28aNhKmv25FDJjuqDX7", "DEZAHY54DgLq9Md8CyxBgNCe5hxDQi7fJaSE8jymtazr", "DF57amFm9aHKYL9pKLSWindiF8o2RRxtReLb6d8DQc38", "DFb6qaAkd5DTnFVYLDjzJNfsUPygP8GHHebN1CBv25cf", @@ -526,205 +952,381 @@ solana_sdk::pubkeys!( "DHa2QSwSdf4uVtFUyGeTPJ7XZcKrStoqQhHDa6dugP6R", "DhMuXF3UqZvi3GhdrAMVyEQ7pW4prM8DkW54scYXo9Ke", "DHNSHtEZHwPkkfomi5oMmCQy52ACoDpD5Kc6oNGTJTth", + "DhukytoqRv1H9J7LZiv8FyqTYhBhqb53gMgJT3dtdyk7", + "DiM3w5M4ATTQQheYRrizFCSoKCKmefPGnah8cPsrYt17", "DJo6wDUWAdAFvuRvy938ze4VkwCBrW2o28pbcZ1qZogo", "DJRbum8r82ts9uuGg9W7AheFwRE7atSV1touZPn2bEcH", "DJvMQcb3ZtXC49LsaMvAo4x1rzCxjNfBfZtvkUeR4mAx", "DKNy6YAPt6zq5jVD5S8EFSXpQmqA4NjrQf8t5v3tHo7h", "DKnZytVA5wKbNPYW1pvPpoE5YeSsxu12KJFa95gBAGm7", "DKyon4vSD7mF6uqgEJujABpEdhRbyX9X9EzFjmEz4VBx", + "DLDshHnnGetLXCyk9o3RpKC4iRATqgw3UyftYF55ffuq", "Dm8Kusyhxmz2NmwF8RivLKembinSL6h7tvh4vrMVNxoR", + "DnPv5bC7CZRUTrK5R2rJ3LyEDLcTGa194MxEoEBSutV6", + "DNtbLVUiYdwbKvcYvr9Wy19KLUvxSkhbZPAbqAmh4oDJ", + "DNw5GVXaC2ZD7zpBo4b85s3Y5KVwkmqDE61Sjbjutjmq", + "DPqppsgQs5wwx13ncWwmRkS5HotTJhgFHXtDj6qSAQbY", + "DPRGrY7hQxbG84ju4zC1MBgYivbydnjk3zn9JDEo9Pce", "Dq5r3zG6XGBcXNDRSWPSc7DiWwjqcGoiVcEhZ9mXEAaV", + "DQAi5Bdgz5aPhMMUMo3Nun8TBewGf4zJo2EqGt1jNNQ", + "DqbeCr74dFGDPvg8BpV6V2Jy2BCTZWmuvChkZVvPC3wP", "DqPvDQAjyZ557yaHJJzEekV11WKN5EZ6HQujNXQL3FJL", "DQSg1PLT3Px71U4LsfBNhg5yT9GgH8FnK7qQAq1aLmk8", + "DQTiiFVnwD7bSSgkMmwUsBsgnNBza8N6oEGeMC8YaieW", "Drf2oN83THfrUJHA9AGzJZaL9KMKggPoL9HJVttkSCgL", "Drkj3wbHHmE2iCnqXHKFTmwPkuSc4bsFdgAmqv6eXuWi", "Drskw4YqMzYRVikgsGPACY1GnE4zTWs4uqSKajKdkU4Z", "DsaF77cCADh79q7HPfz5TrWPfEmD5Gw1c15zSm4eaFyt", "DsnqNtwKA817a2VQypWEzaRXY2soq5Jgc68MgFBMR35p", "DTDiBe1ZLTJGzazmd3k6oHxKGTitNT6swT68MguT8QaS", + "DTPPJWkD94MGE2oA3cyszmCAS3QvxNjXznnRGjpanbRS", + "DtqjPaZAuxaNRsX1u9e5EHFF2JjeTFwF5SZ9YFJ6PyTj", "DV78gathrorcpWsWrUkWrWNowLXpizKsPBupStzeAJnL", "DVnKs7XAL9au7cWrTT335gZ3agJVwrqeSVnSWANo1SJG", "DwCuwTRTXQWm5M2b55h5oyx1zWkNvTUVGwmUGvBAVbqc", + "DWic5rF2jQeAQr771cCgzyLbEMgXaq1AUyhnpmgDze1a", "Dx4bMuKpGaxAnd53QYDyKhD45PjuFLx16mrgoRK36STf", + "Dx4qoPTLSRdbd4h5cy2TD5rDyV1d7LxQYdSY1TLStjcp", "DXCzguRGhTGvFm9hdVsDkFi4S3n6W2yrNeUjrFN8tkvL", "DxLtXrLUrqja3EFjkR4PXNYCuyVtaQnozonCdf3iZk8X", + "DZPn1XMuoBpNQzcXozfMMCUJ8YxgKyJP1Su5oKvvJk6h", + "DzTb7aPtvxo5tbbKxEjiSuBe74tdgswdj3BY8LwstoBg", "DzxNmWD99qvkPdDR94ojXhUrpzT8VdqB5ktYX7fZr4gc", + "E25BqJUzSjyzeZQR8LYUcUNNVrgRLHhXxhPtZGB7KCCp", "E2cy4hqcUpdyMpx3TuHKpdW2cJZ3cTSthk4jfqJryt6B", "E43Lu6um98dGLscCuPUobgKC2oLANeByzqdab5KjxV1W", "E4YYWrsKv9YkBjLRtVNYn792RavzkXL6NPJ5Z4sHXiG5", + "E5yehypUe6uYm88hQWVTX1AcAZJiPgffV74GJNYHPxGt", "E6MuSSCF5aoBstVcZaD6sk7hkQrxvh7s5ttVt8NzAiNM", + "E9bcuniYQhMscfMjE8zaAXQ47TH56gsQoKuzvqXHxnAY", "E9qZxXtwWT5FuwsXHLjA4cjJyyeYb3ixHxBSrJJDzPwx", + "EaVhV1UzbiAh8BCdTiAbvoGWktfK7fdR4PEXkkN1qG2n", "EBDnuJT5USg5HsQSZtWT1q8y5XjgW21b1ebYSahcX9V5", + "EBxhSfAWW2Cfouvj1k242W6U8krZVAxJS47SG8UKb4ch", "EC1TjttBQaKU1dXuMbv4ZMSFXuPDt7UCMvNXruxCXdA8", + "EC8cRNmwgbhbs5LtvufLkme1QqedbugySgkofYtPoDKd", "ED3Y3cuH3wtHXT8TmbDE5toHU6kwQzevWtvkP6rGFNgc", + "ED4pSxzaemm1KZ2kgSnimKp3nw8GB4qpUSmFCxhqUKRw", + "EDD6CuVDzPU7g9PCUZcM7Bf4rQqeNrt9ugCBh1iVRMaN", + "Edmn4FjZMGSmCjCE2FBLzHNjukEXbzEKiHptMfj87uU9", "EdSaRxfyh1Wjeq331Cr7aSpdentiPyCPXVHvSacovueU", + "EdujCuWbg7QKyty42igLbYvxGfWNWB5VsKZyi1wHbHTc", + "EEBa8frib8zBLxj61NEMAUoEyrHFgV9MUzneHVHFax42", + "EeHdr93aBEXELpbDx9p8ScgzstUZuNZmFFHM7oPccXzS", + "Eem6rzePhp56kYBvWNgU89PNjrnWqJs22WcuiSjmkBc5", "EEpAEPJ1MCfZJes2sdCtkFwdMWvRPmDNnynnG2JkhGso", "Ef5gVy3PFRJA7uQ4UkAD6AufNcZNtHN45k5N3L5mYatU", "EG8D25QxDJ6nbD3oBpu4tPDvihriy9mFiPq3CxCGFiPF", "EGknxV4LZM4DNL1Y68iAPQEdLsMZbL82wQbDmsGw6w8", + "EGR5mW7QjJriWjPzDswcC5njXPx2zzTruTRZKS9X2Zbv", + "EhJnXqSV4wjCEA1bH8LeZQZmJnMQXJEMj6Qnya3u68gn", + "EhxLJWG4Mx4QB8MVfoNLApiD43yKRES1gdD3zQzsFiLp", "EhZmFRvBcYQU5TscYdQV2i66pmmeKrzecbHYbUT9fuyk", "EjcB41hrq5Ltr9Yvda3jQ8zGkkfFGKadkykTCQnPeCne", + "EJqVBubhCVSMFCXrLWPUL5HcA5dkfVMcJkLyPRzAoUcn", + "EKBBsq7snZXyabwMa7jbyyRTMhaUQqtDHtpVgDnSg19c", + "EkVaQMGB3cbyKdqBwBagGtURjtoXsP6pS7HGyizwhUs2", "EL3RZmhvLAMMoDip59M3oKgqXXzHAPdZ88KQ2h82mCB8", + "ELERzoazbkXxQdP1qPGxnVVGBJy2W7Do2MMRt35N2H7J", "ELsehFqpFHn2hJoJgEuSCpYhTvjCxFz9ToiiCXWUE2Tx", "ELWMKHPVZpFTwBSzVPF2q4nmvexLxWycjy8fuoC6egBE", "EMeaA1d3kmoBNtZQNgqEZS9y44gMrA7iSuqS4nZ4qxpB", + "Emqe9upNXhojTRVT24mMxLpNB3Fnaoa1FibRtVELunUL", "EngVeJ8w7soeVvKwypuSutnXFPSWDLMq3Vw5wuAdSGjf", + "EnLMAih8NTU7ENb5tiTHqgP8MtTUnY4QDLUKrsRwhjtk", + "EnrrqXLwEwgDU9yfa7YVCxmm6uj9vjhCnNUk3qjpFgws", "Eo1uPQgv6jHk5etCBX3hJ7Y1iYti7WqhisrtY3krEHCN", + "Eo8rLxC2apCYS1qQCzQA3CvSTFqbGxXEBe51i1i5sWkH", + "Eogtuw7vSstp33UQZ9mXQ4X8s1PSmUfJ9fQAxj6JZgWE", "EoHfz2ybgn3GWN3TbTj2FB3AYAXCzhp4cWxMMnyAo4pi", + "EPdTTdBRGeiXp9yzpPK1RUQeinoCfgq9qdicBJqZcJgg", + "Eq1A6L2ZUpy1KPVni4TPYvedSTiFSNNTiJdECskb2Qrz", "EqaMZqSjRtm1c2FKLmZoSp7bgzEW9WBYVfT19n74cYo7", + "EQFsB8CDcLsCYeRJxhZ4fJWnXjCnbxrbhyqjyUJvkDcL", "ErbvzZx2Ss9GxizKyDviybhZPu8noHv4AM5vuzTh1ij6", + "ERWJKBdJPoXa3kq1gFJninu65fYDTAzNSPsDxCXjNX6Q", + "ErxAGCPBB5wMWU7mgZRXUoNyYSnMmVR9689hd6CMTfsS", + "EsLKC2zS5Su5sHvkABXe6u9RuCv7q86jGiwihSz9uSav", + "EsXshV7Yva6ZiY5P3u41vjWYNHTNMaS19qcoGAdiPZK4", "EtemtT8ofMe1puaYitDPxyz7KHXPjsxNwsgBqYYBCr8F", "ETMbiU7hEt7jkoA8H6ACsfeR7LyGA773k9HA13yJUfex", + "EtmHTosfXS5zbDbTd6RxWvGLWwmT6fbjR8YENZ6byfQ9", + "EtpFdJnQ25ZJMheLyURzyCD5ch1SL9smfMcEeKfAkEHq", + "EUoXy9YP2tAefgW5CHEvMGAu17McAvrXiQ5ucezjNYcd", + "EUwiTG1ii59qWfgsJwEMjqh34NmShMdP131BWWqJVPaG", + "EV7arpFrG8SFFAkfYRMEJtuqocMeXpNFD7zDt22qG23T", "EVCxvddWCFgh9LF4EJKzQb7stsPpSBrvTFUYnE51pX6t", "EVd8FFVB54svYdZdG6hH4F4hTbqre5mpQ7XyF5rKUmes", "EvenXreut1ywzoMaY3iXtwbuV4xcY2yz763TSxngja6n", + "EVQxcfApDm4snuJU1XHLcDmwqiLsAwRQ6MatFKmSTWv8", + "EvVrzsxoj118sxxSTrcnc9u3fRdQfCc7d4gRzzX6TSqj", + "EvyvbrFLX1M5yVRGFNHt6o3v3dBLgrz5NoE18rcwyyXh", "EW8YLbq9b6saEKn6K9R1K29CSST1XDTPpJLeXThpkXes", "EWg9NTC5s7Pa9FktUk6dX8xRYkvJ952peH1z1iznd4nV", + "EWQoNfVscjLbdHFAzCrbCt3FcQGuGFe7XWogU3KQ61Q4", "EXaDpiWsCGQY9d6AUa54XBAs7fvxF1z7V63xN9ikMMvP", + "EXEFh9rPB1VN5NGDQsJiAgR5qaxUzHQDRJqAZdqEYGV6", + "EYbvBPU9mSPTVJrZgioTt8PGPL9Bjv5342ENBMR5X8r8", "EyNimNczDewG7Ubov7T2RGKD2R7i54vbeDUnivnR6ePA", "F12Ah86ymdNPuXya5i3PKG7jeLfSMGpoRTriVTgcXr15", + "F16J8jYx76jpt2vgTT5SPv8hJGGcrShzCHG9LBV5vQD3", + "F1TuusSghAobmbGAgNrdxRS9nBjwT6J1yyALUvpEA1is", + "F3bXikq6WnjMQjKcvj7U2tasv9Q21xTWVUTp48GmhVas", "F3gsehGvHNXtF8mDbGVfB27Lq1paSgTiqe5nzvbFVREK", "F4R2g7TnRmr88GY9DjhFo5Ssk9Ji3phBRssrZL5rQxWs", + "F5f3vcPpfwgouhVVzSW41XZRzch16jr2qx7pYNYyVruW", + "F67LCN4eLwqBGbHyT14nJw61x4d4CMtzdcvFq3tZYwki", "F6ZzyJaFh2XNdDiZSnBHhQpqWH1YMifBxZNJ8JXeFwXB", "F7FgS6rrWckgC5X4cP5WtRRp3U1u12nnuTRXbWYaKn1u", + "F7zQemwQJo3bKVAvpcCAfkgXD18kZxYvgMxCP3X3qiK7", + "F8ZZW4WKUx273i4L2KubqUCVSKLmSgkpxwHRL7gar1F6", "FA7DtFHm4gh7gwtwdRphSq7wFdWBU4inVnrchCmTKLTG", "Fb5cEcYNgPXKJoEmvPvsU2ENYRVePQtExqgf77AnVX54", "FbWq9mwUQRNVCAUdcECF5yhdwABmcnsZ6a6zpixeKuQE", "Fc6NB99bkJQn7JsVSqdJs5fJEzj7KFpe4JHNQCGVCctj", "FcTYrxp31zVjTW4qjFKkgRcKXbWcBbiRQqJYpufwcJZN", + "FcWgrc99RAix3y9th526GnzN23MQSkFmyWaeo9xJ6Jfo", + "FDAR4Ms1QU9vparYAwi1jwYuvy8Zn5J79XyQvXWbHLQL", "FdC2FZ4geXeEh4pTPXWrNH4nqsriB3xD3GME9HPD8CVR", + "FdQgwQ38ETKv7x1mWNoAdrLR2YZyp16xFDC9YR8Gseva", + "FEKzY1TLRYWDc7AHTkREpoSHvx9EyNpPmxp9FeojPbJq", "FEq9FL3hzRDMtL8DinPAaeJpb28GBZYTpTeRcRyHSrGA", + "FFhtic9yPS8ao7Qg1GKjqyzwhGYK5tsksT9VrLioTgbY", "FFRanhUkAFoYNPE527F7BKeaHe44Pwi2SaiaCoppkJCz", + "FfREhUvTUisAFG9mHeetnDA9zYEGady5BJqj5jyCB5UY", + "FFrx4NAJginBWNm155TXLgx1annkmdqEwAP79nNRwxjQ", "FfwtopRBJWKEJiCmkNUFyaQ2FMubtzMhAzKgHDF7XrLa", "FHyqLxzvim55sLcM6SY4q99jqe5ifNi8oaj71pAef5gw", "FiqLajUHAPrdmSZzcuy8gKNU1AweQpCXy8sznWgGEZ4S", "fish2tcDFRiAM1cNKqF7b96WzQmmDRjsf5XD214MhpV", "FivGzpupCvU4yr9E3J8RvWtLNWTm6ZRcGS87a51BVHWS", + "Fj1XTsZGwbr6H72xaj4fPg8M4QyXUdnKJJbpZXunFpWr", + "Fjc7dkd5ir2heioaU8eomUgbX2JY49BCqBX3doB8o3H3", "FJKaQebiX7qd4fy4dhkXx5Re88B9DVxLT1pQRiMuUTrG", "Fk1pCDCzbBULB6Kw4cQUdTjvvbLBooW2TPSZjvK6YKA4", + "FK1TQPnYVzg1e925kHurusgkXxxFuBfEko6D8ZirKNeR", + "FkeDv7mGz5na2cycc3fwCtxmoWvZ8WvgVNxjYxeFmdGH", + "FkMj2LPSWd9LzLZrpZ2L9YL1CpB5eA5W3J1vyvvpp6Jm", + "FKny5Zv2nrLFKfNH4jatujiiNG2c5mq7MXweKAEBBzse", "FkYhpz7HSGQJvA6apj1BKoUfytQvWseLfSUrE3zjvkQb", + "FKYKRLCFmh7uUASUQjkL91yXCUuJ9wdbPCZEnv8HkKnf", + "FL2Fo4xmVeo6dGgsaBgJdG6sB3Gk8EmEpMBe67CKopRH", "FLcUYvDMd5nh4cyP3oErMHoKnKREmza5rdAZ6XHYU1bd", + "FLdAnmYGeGmwJY3qECfcZ8pyQ1LoTAeBPm8YKFDxQrMN", + "FLHB8AGEsED5jAF5sS1kSkAzSXVK23iuT7YDPHGmbcjb", "FLjMwfY9ahmyc6VgQpokBrra5Ucf5KSK5YW5x9MMKnpS", "FLpMRfbSMkBnFXDdGKdzcGP8JgrNVhaYowmtArNughqt", + "FmaWRHAtnhTX3iDhgTzaFHpcuP8TgjWux68zo3kJxttT", "FMHjnmeRLszDSDTmHrbqUi7rpXLcrynh9K6jQvjdhqf6", + "FMs94yuSx6idmVsanrH6KrLdtz1CyambgH1jxCLorxHR", "FmVd4YWnsiau4JLHYxpRW63uutA2sPKpKYGiwB3hBkwt", + "FnaAWEDBPSNCUk49EM6JFS2hr9kGqXKkEDXkw5fdR887", "FNBpvn9cNMmMA8GRfGxaD5P5zkG8m3YAJybgJkVi9bbK", "FNdoUuKVBigMFGpVvSMLXJB4FC7XQL1RjPUqUiwvPiCS", "FNH1XmR5WgK7CH7W7YdcfxtdgaKFueKtqaggVr3CnY7M", "FoDccJmq4PksAoMpRbygVVocdp4NrC8PSwwDd8nfKYzv", "Fou7Du6KtVb8dVMzKMYW39fuSGpMzJGwpkQ45NbxA3Tx", + "FpcTktAA3ZVCNUfgbvFa777BwdChhX2t8oEoueeJtbgT", "FQa4mYpWL7mNEXe8dWbd2FXxpreFtYJkD2S6hMD1oXHH", + "FqdAcsUQBMibVJVr259uSAnA5FMK2xACaz9vPEtdvkYn", "FQFrdHAhKFP9R5R6JkJPtVJhLDivDia4cNNUcL6Eja6j", "FqheXr2yJSTRGncTqVFFG5sLaTtXZeQbkQAxbL8mcGru", "FQPRF1x5eFUdNWvfVwwJkJdqqfstGnCkEFVKEKHp3GVG", "FR2rptYjvSsYeDur8khPK33q6Byjn4if6Fb4oTiJF3Fh", + "Fr3WfD9xCLX83AaktYvcihvqoaJKh7f1AnD8mCN1egHw", + "fRiGutrC4h4ZdYVE65g3pCeJNDg2g9j21AvMjhDMwW8", "Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN", "FTUh2jo7GmxFqLy8c9R9jTPapfGjwcaDdozBjhKJz7UN", "Fue9LZxjhk2DNXWxM3rPKr3z2qntChdeth615m7zUo8v", "Fv4zJ7RvV8gEYxEtLjnGZAX1qxjqRh56DzBgqvFEVjjM", "FVnG988wW3uF613QVxmQrkwtdzS8taxjFsuARTzZBwMo", + "FWUFG4jKyCVpEhzYHAYuRNjjFpfrt4y9NifzrZPZfwK8", + "Fx7dVi2oVpynBKzU2V7nRDbdfBjWrqqjLFxULXCVp2TB", + "FxMwrcSTgb8RKFwJxa6DSbsKQNmiDqstRTUeKej4Zw8D", "Fxv1ymSwB6tdRCbjBURQK6P68XR2njCGfWbnfzVciJsP", + "FXVTpozaNNzwiaEu5HbS9EK7HMNm7QPLk1UEH55hAkK5", + "Fxw5NgncJhGbnjH1wuZuavVPDQcowTwG4wKiWAB4WNAw", "Fycm3maimWfFWgdLJubtEogL1v4aJEkQapRwWRPxvf5m", "FYH7U2HPhgxQCsHBGDaC135Rsx4tZx7P6ZjxnGqWHBHn", "FzAv1TFpCyR65GrxeqBwnEzNVXEeUMPV5rKZGQhPR7mq", "fzCFpDGUcyX78KEMSfJwczi61td55v133eyTeQtBuW1", "G1uCu6JrV683QK3kdAzEiiAEBSMk32Ugy56u685cynJ2", + "G29KDaE6ed3YWzWaNesjgoBu5CFJrcHe9sb8dr7b7LLq", "G47WACh32JUcxyiCna7UYw45tyYSFKQ58yFpUmhmMybm", "G5r4XSC5D4Rw4NaWjbgBKnj6bNDsSGUvE46w9BYAT79r", "G7cSi3avELxMLTCossnsooLj6UNhnfER6kpnSx8NKHfM", "G7gAgJpRHnRvFhrUMA5khWMqHJ3tpWVWdpsBvCq6w6MY", "G7mFk3fX4xQmBV5je4926SzLCphWFoww8APYxQKfkNxn", "G7NoACjYzRcrLibw3dbNoDCVoBR67ijm3tYeL5zL5qKY", + "G9AHwpSz6gRb2PYQTj13oouRmpN1VAdHwXGzb1eoTfCT", + "GaAb7uwik3bGsMurHJNmbabF8G8k2cYJy4Wrv3tefuWc", + "GAPNvBD6MXboQmxP9XTCC4CMsT5gUpdFZWbnj4Tz2s7i", "GaTDMHvngmoJhuRYrLGcE2GMCofu7K8SLGwkCkDE1mYh", "Gb9j79QFprtbY4sieaLZGjQr4a5ifFxGdnU2qkjPbBJQ", + "Gbqh5eq7nVajFNocG1GZGikoiAqPstfQjHxdGBc3SD4M", + "GBtNh6c3Jbf7xMSA4VCAbjkJpBVJkB1QsZLh7iD6Hpq7", "GbtVg3D6bNFSjem21vrJBJTpUniwwEtmvs8mQkX5XS1V", + "Gbwg3HCbD9gzma2o6oTqYkDQnojeZ1z7Ygc95DPFA2me", "GC41PE9hngtmwzCbTuZQvCLN7C6xSLTC1AxRSAZSPRRf", "GcCuD1yJFSeDdbogBFrnKRfPcv84wNjzN2MAz5vrv25S", "GcibmF4zgb6Vr4bpZZYHGDPZNWiLnBDUHdpJZTsTDvwe", "GCPW2jinG8pk2KfALJA2FYNhLKwCR42Y8ccQPXz2PYg", + "GDoZFWJNuiQdP3DMupgBeGr6mQJYCcWuUvcrnr7xhSqj", + "Ge2SFnQj7BeJsVaNqSrMz3XFGBjoFMMZ8qThYZRYYNr3", "GEBvuMyPAM3Hmsr4UnGMqeeJNPiC6ZqPkCGKW6pADd8h", + "GEeL5VToy7H2oEFyLW5q4T1HeCDa6AA5YN8r4PiCnyW4", "GevceSyTLxHv55phyp2PirpdsdqNFZRZSYViRCrXmneh", "GFFuGhyHAr2fjH1DL42m9EWpAWXXdZ7R6PyzuMzDodLy", "Gft346NFxfieeCXCHuwdQ9TN6HyPLr5oyfwGS4DGQWGt", + "GFZ3w5CU4Byjjo6BrQxVsiq9mumADrR58vH2TT2KxmFC", + "GgfiMJLWSKHr9JPR122BiyEXgCDDmFQuzzkKMNbNkykk", "GgmneSMKWnEcavporN1vPpyTun2QRBzCjFCecQT5km8y", "GGQDNAb7hFMu9XnXPMw7NvyS6MA9BfeAmaZX8yp1QXFU", + "GGQFKUi8FWGSAnNWeoZdpwERRgaW4VMiBdiZbEETq5Qz", + "GgsyCG7aGFzXA2A2gc2VCH5qpDB6Fz7zS4isJ4rfkrx3", "GhBd6sozvfR9F2YrKTFwEMqTqhfUjxNUYKxye7ZvTr5Q", "GikkfYtVZgaUtcmreVpQ1Eamw7mrnf2jnDFGJBnhVQhG", + "GJbU1XAJAky6sPVH9BGux11AGyFtkqZwtiRVS1itqcao", "GJRLu6i8j4CJukLEQQXe3y3pdk3ynVkt7R7ttcfCZBoA", "GKEcqfeBS6rP3NUZBV8PWrmmBkPUNRpsV1CqqyqBUkAj", + "GkuEkzsEKyhgmomjiS9ZxruL9oc8tTWYQHaUv6Xa4Ch3", "GLKsDBjWBaXHkyMihjpU5ZdKyKWtUpJyE4W7PjEFSEHh", + "GLPe2gV9zG9kwJmNj9GFBrzEiqridEd5KYgeSiPrubGz", + "GMEMvNsUerrfShmMJMqZJGBs2MkUjimjD9RxJiXzkuiy", + "Gmw9GarCUcQNYnqePXNBREuLhcMUwXhQWZMAvxSUf6c2", + "GNDHwncRV2VpzXQLpGLvQUGkkfDFhA3g36Jg8ZthCNLm", "Gp96C7xbTVp9SU4YDK26zYjnzLHSuHkfcHpzXQXNNvNq", "GpowxwT8wY9x2uFLWhZtL3ELFdAMnpBxTrpqFgnEukVn", + "GQCgtjErUk8HCgayXyjuCFR15pnssi4saPnxD2Pm9oav", + "GqFWgFDHj6fgahisFk8TEngmsEdkSxbmk8ZktpgW5LaS", "GQnaJJu7h53SVVhVpg2ErkSKhYMtqYrqv1qr13MUobuq", "GqsnwvnnwfvevfovAfRu8XrJwGietC8h8t4dwyQerbfq", + "GraWXC11stqBmL86aJW8cBEncRhAFX86mX4E5pE2eKg1", + "GRJExQVWj3Hi5RoMVb5LKNfCGvocrdyXi3QY8aGQ49wN", "GRJohz4qkCV6YNjcSb3mRBYbtcTcN7GLG2cm6ffZqmW3", + "GRZ6gQmgkfYns4kqN2Unz5azpuuT6bwmanDmveRgoSgt", "GrZcGUJ7baE8r9KSmrNJAKtgYAMiD7p2YfxefkbgTng9", "GtU7wyz6vwTo7d82qNpFM6zsxWUnN7caxNMaxLwbwCEr", + "GufPaZ7pGsoqfkx4d2uUCU2E5HFeTMgx4k37YRZTn3Xn", "GuKn8nEJwUPjBfxpwyq2MXU2JNrSpj7gqnKptCZeEk7j", + "GUq7RT6MzPK5vswngQpJdJkvtUDWU9cLarM5Rec8VW8C", + "GurfodDZfiVyd889TL1g2kT3QtafH3j7QbThi52Jjvvc", + "GUYiwgZBbpoJTRYkqH5L6zL1DgHH1hDx9c7zYi78cS84", + "Gv4WPocqzi59G7sbGiVWZAtLwDohpB8xSMG4kg41g3U5", "GvB8xRCx7HFxuM675n41mSsC5adFre4Sn7CMq1V6v2MN", + "GvpfXu3ZecJRJWR6Gm2dND3DXe8CJvCtT8XHZXMsZ5Ap", + "GvRQCdtZZd7jVzwuestmUoj349jFVxL1Ja4GuxDYKB3p", + "GVS1B7hSXnUQMQWN78BWjT9Kr1h3k5UW5UTdFsDhtRGk", + "GvWoMZaf8ZbTnKKxrTGV5aGAjofinufohuqT827waAEy", "GvZQ6JUcGiw26huYVv2eDTgrgVh3rKtADPYLfBiznVda", "Gx6SwGTbYAFrUeBRMgMrgLUKaeGNeCKzkULXdEpwPSwc", "GXfJaLrWgQbiuutiLyN7ijRgBvAvunJy7bYzaV562VWP", "GXRPoAxz1gmqyNz3M21cKSJB47yAc3sJLGvenworEJn7", + "GZeAno1q7JYL493V8aED6XQzqNPjJRZMq84jtKdLhWNB", + "H1uSjkzRp86vK2xvC9tiYAhwbPShHUX8qPbj6RiVYWS8", "H2aCsvmJvG3tQZ58S4dTXBwmqia2GHvctgd5t2p1u2cV", "H4Dgb3KyCuYWKT8yKtp8qbY49cvaqZcisa2GDnroFsv6", "H4f9d4Ru1BV2CVDyYsTRShVWk1q99DjY62LxccREKZch", + "H5aQjanQGz5Rrh4s6g4TNHTAXDzdr4czBtEJPdeDJ1jo", + "H5FVid8iRWjQAo8Cbvc7CJJGMwtTSQgk25sknxnEKkYs", + "H5YdwNcDt6DNeAL816CP3Kn3fYXFoigMw3zaASvZc4rA", "H6djbzHAiv46Wxy3iwqD7LA8ART8YbgrWyxQPCNhnLPE", "H8ACGbhYk4UA2F2nALk4aSia6Z8Vfnk3U3SnNDdENMAh", "H8kdiUSyvHbxshcFmRqWTB1HZkQHKcQcagQ56TzLe8ib", "H8MUh74GVNbSqGrYkZviws6xCmdVS3VZF1rbhE3gSESQ", + "Has5UpRZwnb7TrhyFPDETgnGThLPe38BiMcqkvXBLsra", + "Hbw4rccbiqiQv46RaPUTpUyAkTsRKFhTsbDuSUBQVknT", + "Hd9NtFQuC7nw5v6dbijtCkgAJj7bjM3vaqqWLsMt6oAE", "HDitfpmCcy8WyJgNCQTrnZ8r71Nn7t7SjmVHnRwumGZi", "HE35aDYTJHJ6KA7kLEXvENiRBX8c5UG5xHzgeKiXyQno", "HEbMY624UhDGm1Qhy6neKSyi3bQjQ2RidSTyt7ARK8RW", "HeeEbBAkuLzqxsFLcbKUfWmeNizywy2uzAfvRg63LFT2", + "HEgDHQD4cZsVu3QEjLLE49EQmuDoojxiZZbPywgdX9dE", "HELPwwfg5W9LmXv3axe43EY1YGJjfVf3CcVjA8BZM82P", "HEVxZQExGLJbpySA7MVumiTv6qbeCzmAQJT42CthZwoX", "HfxzFiP19ymtxHagP4Zpga2zVo6ZgivpK6VkBKDowHRr", "HG2CuUSFhdkdcv31qeyR13aS6chRgCFdqZfoFfPmYn2a", + "HgF66KCFTqcs55WAK9co7o9f4ZuPuXmFQTCKvRWz9A3H", "HGHMEEHCfbVFjqB69Hu9oNW6SviukB8jUheEhYVZJKe2", + "HGkZ4BCw3mHYoo74ZwwzKJsSjnH8u5BW2poHVVMLTjuR", "HgmPwzNcY85HfrN3bYiqaypb6Nmf7ayaZEaivGY37913", + "Hgp3kh6Vv8iw5wHD86LqkW3H3JApJeW3F5XLaGXkZZW9", "HGthEYmVZTvoGptPHvgD1dPxxKMGXYSrPDcFAPSgdYYK", + "HgTXVg3dA51mGNSh9iPoeC6QLsy2cMEB8WPESRAQMBz7", "HhjxbH3vLpUNShQB34NuMCL1Qc3xoiNDbvALWrAMCCnb", + "HhVZQersDuNPpxQwhVpuXiWiahZ76QybhesPbGjbmEnf", "HiJ1pjUGoJ2G1fbrkYSNwPRrJ5ap3Q5usZMQB3pK7Zgs", "HiVDGAGPSxxydKTY6BkjuLE3CyabGKyEuMMHc1yMw5Qq", "HJ1hBuwYztBeiPhscJqQ1KKfLuhCcEKcPJQzKuZg14iw", "HJhcE1XDYTRoHaDWcfkmfGvJuDWPZLEK8YkuMw9FYpP3", + "Hjqfrrmd8s8FZzucmkFh3RGQEN7NjyzUBFAX5atwcF9h", "HjT9tCUEFWrUXFR37ahB383QTF4u53KWx9J29EWRfzdi", "Hkj4Y4QxFvyoCd2wzAswsDpwW4vD1vyC5vppVyDDhJ8F", "HKM7CfuNPKeU9crk7SWJCFgvFKvGh3VFgqT1sQVyeDDm", + "HkSTpiQR4YTP29yRBSactwZCKh3fp7NoLHLQrMK58xRE", "HKu753Hd2F1nWLPvcNZHX6RAGSXkg6AtywiVvRqDXxcP", + "HkXUfo7jkpymbV2LuekirjvJDzXEREB1c8hfy6cPxgLy", "HM2hzFLTd5TAhejGFjaXAm8LLjdmnj7bqQrzpRTaawdo", "HmD9baVLsNVxALt5mo3yzuH85ND625cTDuDnvVo3sf6R", + "HmEUD98iS9DFkt5dUjtrG3jDixVU53B3YPKPbGwSPrC5", "HmSU5YJr4XK2SYdF6dxNXtF9PQRzbXXupUXCVEaJZX37", "HNJTofxjz68XBQxPHoakD5rru3qnLBJRVoRKVTYxN4EL", + "Hnp981DgpWig4dBQASkdJG8r7KzrgNRvGVUo2Em4ZTAJ", + "HNX6Tba28Y4o4vfU33s5HkYefZcS8xNKY3R8zwxwor4z", "HoMBSLMokd6BUVDT4iGw21Tnxvp2G49MApewzGJr4rfe", + "HPisnDx9nRswcNBkqr6q5FSQ4saPj23GfdCAzNQcy5TY", "Hq5o5jjDysktyUQdUxFk3RqbXvjSDyzNn1XtXKexHUu2", + "Hq6Ctthk4tZ9tT62kWnpnBqJ17y6CLu6dQVaQDDjmtmC", "hQBS6cu8RHkXcCzE6N8mQxhgrtbNy4kivoRjTMzF2cA", + "HqXHSTtrUUraYZ4xcPuze9pX8LbRaV4wQGQ92Y2L26vN", + "HQZpRZLSzgDdPc21U7nCxXpK2VjMh8U4PE5G3YE9H2Y4", + "HrzzQ2MHeLXoMFFdhSCvuAV4y47VVDjVtDZSAenAoFat", "HsWUiXARLPhYitGMapLYyMdV7k27kW2xzy9Z6L77jKBC", "HSyQVMDPWiukduwnR4vS2fpuHZYWvK8LSnaY3UtrJoKa", "Hu7DW7BoXXuKbwaFJaAMEXpBv8pqBJPhfThMD96WHiJR", "HuJHVhpsf9nF4vbTWjgqgcCf2h97eFf4DnhAe3txLERo", "HUoud6qywaWj8kZwdHRTbEPkKmskHa6Md1KNvF1JQFYF", "HUrBwQeq52MqCih9c3yuixfrMWgYh1zuBZC8XNyk8TaR", + "HUtFMtq115zhNf1ecuHHhqhP3fJupC8vt1wkWevHf1Xr", "HVD6ZDBgzjqYKyDLNadSkzev3qwSUnYEs6k81JktNuom", "HVK74e6bD6cwDrK28hijNxiHZkvyXCXS8bd41tH6QwTk", "HwFvyMbGLkiTUaT66cfL1FnJ26c9VqtpqAg4UbWSXtdq", + "HWg4WYeJHENvTGZ1xyCpFUCJHk9b4nireN15tA8fHdJZ", + "HWKwGWgWpnt1HYo7kAbtpuNzi6PovGXz3oWW54GDEQWc", "HWvSRgESdWKDccWN91iRVQLN4rRyuCbuAHVWtPR1cJ1C", "HxBkCVtiYAymCCv4EYakNDSCPgog3vBJMZx54dCceSyS", "HxnjZ5Qg59nupGGXVo77idUxfsiRXPcBbBt2hw3Nt99c", + "HxsJQAfgMFVcTqf7hfLBg8UzcCq4rQJdo7g61Z4i6ExG", "HyCf5LyHfwnpnvwTQkfPWVdkqJJ2R2A8fBKb52m7cunf", "HYW69eojAvAqiPfebT3S8yUTvTDHnssZbTq1TMCm5LfP", "HZCUCLqV3P7QqG1oskLLMJW28zuckhxmRzEQ7UWaH2U4", "HzPFqFKsGRT3Yvd5Wgfng16c8q6e1bDe3W48fZbuuS9Q", + "HZX4MWsSDzRerGuV6kgtj5sGM3dcX9doaiN7qr5y9MAw", "ibwMFhkkeMTn9746FERTdb7rGuQwVcRXDbNYXB4QB8q", + "iMZEU6pbrLTCHZsaw6HkwABYvMTfn9pND9AqQgaEyuo", + "iwf4VCAm2WYfHMWkGC6p7PScW5vihfVrXs8UTnMuHuc", "J1Avgbk11jXQ7YfXcLiuh7zdLGwYujyGVixHLywWmSuC", + "J1RpwhRqrLGUpuwazwHn5yVuUUjQXZWC5pVRoU2YqTYx", + "J2crq7h1jqvXZiyNeTSpi6S112WjQHMmQe336qPqB3cF", + "j2p9Ccp7FBBfNzrW1dtF64aBDZzTVD874dqe5ZrZCgC", "J2VXfywh2oc5eT1LAtSApAqUVB1zJypFTYKTBdJg7BLW", "j3z8UNavf2RXACm5fTkfmkhb9SZZDBt9pyYX5ZyyZCj", + "J4FGK7xXt6E5pfxBtQhGfEX1djdgsLNhdrei4s5ghSaX", + "J5dVAuWTHSppRogVgdinqaEHgkkzzKYWdkSRZue5zpvi", "J5RKZBQ21j65Gn3cBBcFFtBaJW25Y3YEA3n1WjkKjNSE", + "J63rQazpR3qLHBz5DQLg5NB5xKDWAe3rLxGjpnJmZvmp", "J6ypmUL27j46uATh6RCC58m3yPDqbXfpE55oGsnYuSQ", "J78SNwDW6G86sMmh7djnBKGjewXNpjD74sJTjJ1iNgTH", "J7Jx6vDZNfCHUzMAXxbdjfNJY2WXcVsd5qMtVN1Pua6t", "J7v9ndmcoBuo9to2MnHegLnBkC9x3SAVbQBJo5MMJrN1", + "J8d11HHB1ttEf6wJpFdUhXvpyZefykDpAq2UDcPJsunW", + "J9Y9xwDkqFgiLypFoFmpC9MtAqkt7B9CTWrLUvyGZfKV", + "JAgqqfmUxsLzLxbuWpigwJ7Hz38vtgyzzt4eKE5K6nfX", + "JAXyzy7iCvCSTxyd2JwdTooQJAceQyj4Ajy92Nwn8Axy", + "JBtk3KGDoQXYedMDiUFDp6VXJr8MxY9tqXg6pZ8EkaT8", + "JE8vncP4cHCGhwJZQuzeBTfUVZ5tsFajkqUhcN4Ljk2f", + "JgcWmNdwrrmvJiuSo4apJCLx9MozajzKRTF5mQREiXH", + "JoeKdMCnk9rE2DkLnej7tw5repqdfCDetSLCUedhUVn", "JortrVgBnTJ764kQH5HHhv3ozUpGUDPKqWPavUmjEuv", "KdNhBD4WCm4Gd1fwi7Uf7Z3JD9KrZcnWWm8nSEZ6NEB", "kffvkDohANNa2rpj8Ti6KWZctCX3Ci6Rj1SnGHx2r63", @@ -735,17 +1337,29 @@ solana_sdk::pubkeys!( "mETnAkTMdDN41d9wSPYJWDFu7xehfoHyT5py2thcxHB", "mf1oPU95NbDVo1V4ca3QayhQg7WdS9iLWeEM88ex62B", "NHtR8X7dmwtCagm1FuuC6ngQ3wv52uJYqFvA79G47MX", + "Qfp5wD5TwLiecZZP64cn3SwfqvF7W4fzo2tjEy2c1MR", "qzCAEHbjb7AtpTPKaetY47LWNLCxFeHuFozjeXhj1k1", + "RAbGPTmaLVn1HkP38tqKYjGceejMWiyfDPEhenjH1Aw", + "rAEVgLDieWcb3N975fNsLbmpNenL2simANdvk35iLeh", + "reisen5FAkzw9iKy7RWZqyhRq5PrDVTKGAAQSB9McYg", + "ScN8WkfK7c5nmNvNh7SbFTQcNyw5poXv97h5KRFBRWL", + "SQJYmcjgo1bwJe2YxJwRDAH1JKFdrQM4AfLzuLi5TME", + "T6CVCqL6Mcea4wjRgvoZRkDSexzNP6fuNcyEdQZH21H", "TxChgiaHwnkdT18sBnSepLE5sGk7vsQ4CZnhwiHUMQw", "UdAZ7oz1WshdwyimF6e2VXiy1eSJ6UdHSRng9yRLtgY", + "UkQCtmg2gSygRMRJq3wHT8fZahqYzwRp2rHXE2hCTX1", "URnkWZGiuB7jXbfCSuNSwir1qkn7sXjiSPeLPaXys7b", + "uv25bCcVy1tEZo8wUjWfMZNF1eVXtT7ycwzxu2ZHBAz", + "vav8fy4UyYKf91g9uFZybjwZh1VS6hubfaKyFtbYcvT", "VbPq927r2eM2AiRcfibv9qdKtgbbpWJMCJcSSQjNWgL", "VCRrRTgSjDLHvo6UQKXy8VQbNVG2ioHNUEyS7oB7u3X", "VKoaYMffGUE92ZPve4wLCypUxoGsKYeo92F7BK1Rh61", "VTAZqz5HadKsUWyavErx3hhUeaDPerPVDssjB69hP8b", + "VzbHiqN7E7PcT1fD9B855E3kWhRpdcq2QU1VJEWrrFr", "VzZqDXSDF8KbfAQUvizj52NTjRg9cfXCAZ1cUJt5614", "X8UDqnz9DemEwsVTmKcboZERqQXe1gNHRXAmGFQsY5J", "xfCpo4ouRs5BP3WY5BdWhbr41pQxYGcXxz1sFyzPsZr", + "Xq8GX23YwT4pCroxiBS1KMLxbmBqmCRRFBb241cWsUz", "xViooFwobBZE416moj2yFV6RJwGVaBDbCQJRfCyz2FS", "YpopmpJ5ryYnLZKD7a2dEbPdPiiSLRARWVj3oAmgWLt", "YYYYW8eKkmwQFhVGUKdBAnDQPuhMTpG7zwm9nikNndC", @@ -760,36 +1374,42 @@ solana_sdk::pubkeys!( "12CUDzb3oe8RBQ4tYGqsuPsCbsVE4KWfktXRihXf8Ggq", "12oRmi8YDbqpkn326MdjwFeZ1bh3t7zVw8Nra2QK2SnR", "1EWZm7aZYxfZHbyiELXtTgN1yT2vU1HF9d8DWswX2Tp", + "21REqzFbJJb7itcv2QzkjvHuCR9PyzXV1uw9251JZq1V", "23SUe5fzmLws1M58AnGnvnUBRUKJmzCpnFQwv4M4b9Er", "245B9WFHUGuWycSXHagHXwsXGcxDkNYfxWBaeh7vAHDU", - "29rzUXiy2kYridD6zxc6nszsQpgVW8bknW9NMQEiQThi", + "25andkv98haPs8LeocDfmKwfNnpbNcHtsWoHDSBrWGYb", + "25V3EzQTGCSoLhihuH7f9jjyim1sxH6MR8MUtRLtLuoa", "2B5wMmBQkMHu9V5JbUyJuf2mJJUU286qKPsZzvQQjTNQ", + "2beSsAmYWPaNJ6GShkAECv617wmGfeH4sSuRFBYYX662", + "2BHDXBCcndLg6oYu7EvSd3TuVJjUQX5xmUiKb8fDMjRt", "2Ce1dmtdnvgDwD1MiZjP9wwGup6j8H7uXGuGMa9uZo5v", "2D2v7sMqDuq2ekZnFhaQm4k2ErWHemZQuYf5qaVTPFmg", "2dxz129YxB1xtf7Mx6HUT5JspexArNNtQt84FYueWZV7", + "2EUh4NtRwhJ69UUca8HuGEcABsn1MbvufqFUDF8XrZEe", "2Joxdac2pgGA6xqBCChWKZNKhWgnv6vkuEkGp14JFPzT", "2jS8AX38m8F9C5juToW1FmTufEbb1DfDzZJj9HSJcWwo", "2KnfYVJiAtxiSPfHYRzTsGGhUAhDTNrJXWn7n6K6giJU", "2LhJjdjNic2BcsRLN8opwd9KZSsabebQMnxj2PkU8ADQ", "2mDrrmhSzpSyaF12izGk8hnFjtKCGeCFPwQHpRiJDby2", "2mKEUVkcttHeMeaZK8jjwkuwGSdCBm23xDgzXgbhTdPe", - "2mMGsb5uy1Q4Dvezr8HK2E8SJoChcb2X7b61tJPaVHHd", "2NJZ1Ajcwtc7hZdVXTXrh2SAiAXnFkVm6MWcGjBZfPkS", + "2nZo1aiuRdTKYCfuoLR8jHQY1TxkTQxCw3d34K3uQkiy", "2oujYrRmtDDTF3b3JUgsZ34TkcyrozMjgRHBQE9R6K8i", "2PotfnmMDS2mdzoggBnNhNVu5NxZsQB8RaxjbDRPJhSA", "2pRZBEB1PX12cBAbgh58VfzTPV4ePJ3txXeDTM84ejbZ", "2tKR4mX7LzhjfdNsR6HfBaDDh2RM3wdpUrJqUU42aJTc", + "2U8iE8LGdAqep5y3toyrBaZF6DHP2kf3Ev2vLFggJqiW", "2Ue9zGmDnvYRrJNEjuAdNkbbickw6fKWtbeNM7T2rakg", "2us4ysyNvYJHkYi7CtRuW413Mwi34kjjFQGhZDch4DEN", "2UuVd6BMW97MRnj7mCpq8PhYmqmUYuojvDkqWstwASCm", "2VA3q6DbiLjbrLgnkiZ2fdyuRyVBkYRgqBDwA6qYiSDD", "2VAofLE2bYNM3ZPGfn6dJh2zqtnmNdD1bGqFet8mVYFN", - "2vj1Ggh29cQTCL8RGqKF3Mn1pUHd9GMUGz6VjPXfgaiH", "2xte5CBkCBEBLNviyAfvSaTkMy6tvg99Cy3XJj9EJJs2", "2zAbHUpE4MRgEwq1MWh3i9aJyzazSjUUPrmhNViqQn5W", "34t5CmGFfVkdpxQgKbAJKje1NLXa1AdWUerSjnomPgxH", "34viN9UrGJaVabrrSZDs8MnKwVt34nw2wv4Xkwk64shV", "35AidZv6XxjDg5NNPaKUdzUxv7C2Qi9k1tF9vhMgnHhR", + "3A8F9LjrMgY37qZVAhQ4ccWmrvpQ3oosXfATUtV9ozDA", "3B2mGaZoFwzAnWCoZ4EAKdps4FbYbDKQ48jo8u1XWynU", "3BKnjJPKQrC7zw7X3KtTKVtZtjLoLgzeJ9xZoAtZyVci", "3CxJofVghT3nbqtrSWo3oMQTFynpNyqtytBFawpAXi8E", @@ -807,33 +1427,47 @@ solana_sdk::pubkeys!( "3RXKQBRv7xKTQeNdLSPhCiD4QcUfxEQ12rtgUkMf5LnS", "3ScqKCyAKGN4B27S1mFNCCna4cf3ZBZf6diuXNPq8pBq", "3T2nqHFbmexvkhEo25SKnJsbaAjuyUGBjvdttczxDKgs", + "3VRzZfDDnNiSBzW259vKgtq5x1E4fzkvs5SZcVtpJEzj", "3vYPCtncFxQ1RVtSpB2HRg1udHfeVPWPpWALuJaMcLx3", "3xGDUST5CKZsiW4L5PPp9E63X3PdqRvCJfDM3q9EYcB4", "3xubywCu9F3ALaYRKgp6RVUnQZFf4npNcDZCH4Qjpmmd", + "3zaPajeDw8FxutdgFTUSVKp1cxWFqs62dtuCZNQ18TT9", "41iM1ZT5WYS8HgweopShefLJRfDD3jbB1MMJZiuEemvE", "42xkjBQvPyicSdCET8eTWwJayTBofFLGWyCiuodd1SoT", "44Bivyyp6Jv3Wm4RfJdtpCPBmbEXxvyotUoMDg91ZJPo", "452L4U6HbzT59EP9vLyPxddF99FBBZ7foCrcn5A9HLMK", "46yVA2WVP6ah5wLREZGHbGvvPaM55rcwDKDDikzn3BVV", + "48Dactjx3zuJujtBHZ5cd2w3maKFFGsKhn1sXzU59VGK", "4asJHLR6DbifuQK2MRGNx9iHujYDYkQ9pqQXsmbc7fyD", "4daH8Aotxpk68HsMvws3P5AQL3F1gVTA44jqLaB2GuGx", + "4e2KvSCgot2RGXsExfY48NdfykQSjgozV5FAXv13bUn1", + "4FozAhZhAo8ZTuzNHeAHMDDLqWmRwioWBhFqybZYHamV", "4h5muqwz35tyPQdAXkZMyVM5cnGN5oXouTZL2AFA1Fjh", "4i9CE2ewTTWwJkdMvNcia3D4yR5v4AKryae4nxeSULwL", "4JryygoiM1j324fYkeBzcQDcwRfd2WpgkEzUePFj1rJY", "4K5SSpWHqTbx5N5Ytjj9iWXNd5zZEZ6fkwjGA2KKafgv", + "4Kbcyn7JVPAWLRLPsNGTPmcNMvCkLTw51ZLRhqsUC6jP", "4Ko2GufbWc8hhDD9GL46He1Q1Z1s6VFfBZS4RX9yY5Rt", "4PRFPF7f9ERz9azkDFSFfgpye6yixPENCka994j8mQbj", "4QNekaDqrLmUENqkVhGCJrgHziPxkX9kridbKwunx9su", "4R8aXMwJUdE8WWtM7yE7sPFHJBfPBRd5o7ERVeQE7DKB", + "4rhWUqqNbdoVsGGw7P6Hzc81FB8C9jgR3UGF7aUgKAcY", "4tS3UZfuRHzXuPenvErtRPtnZfY1KHhT96JBCQsLzKqW", "4ULWSuaNnhQntP3DVxg1xa4yeNLNpDnAw3gTtrhPHzEA", + "4w2p2n83ter1rkk7Z6r63z3uNBKuBXzgV9KFDWBidSFQ", "4WgsjJxehCavDYXEMTBQX91KKh1szuQtzksuxZKgymq9", + "4xp7K2vkm9LnJh1aCXjJyyF6XxL1u5gmYkrVCxHU6Cnw", "4XspXDcJy3DWZsVdaXrt8pE1xhcLpXDKkhj9XyjmWWNy", "4YGgmwyqztpJeAi3pzHQ4Gf9cWrMHCjZaWeWoCK6zz6X", "4ZToBgveZ5m8NySrDyPA2fiGVRVBioaoMXD31KGidm65", + "55rEq5xznJEMEShWB9GFv5WNX5NfCZckLZbXXDyW2seb", "5cshESzkc3hmUA3qnp5ridF6WTeiVVLUYkZUaCPYfbid", + "5DhEjMqxWWAagBywA6kL7EczqTnDGvPmDfseottjyps4", "5GattKwm5oBpDJGgdL7ZZfCMthoETwrZFmQMDK5eq86a", + "5HXxjDZwm7MAZAm2aCgGcGRr3SKiwugcQymoByyd7pfv", "5hyHG7SnmJAHZmM9shuq6BXxLSG2EeosMH3ZohtaewFM", + "5iJmsXsq49ZqgP9QbP2gNQRU9C8PhwA2gbgNLdgSHbjh", + "5kMJh1BtYFrvr6EP56XjoCZmyxx3by61dEvGewYTMuxE", "5nU3v9FRnmTT7LrGKRj1reQoWWHHmPj5sfh2mTSti1uU", "5p8qKVyKthA9DUb1rwQDzjcmTkaZdwN97J3LiaEywUjd", "5Q1tTkjCtYFAtyZ4fxYYND8Ru65LLPuZSo8or9Keyq1h", @@ -843,26 +1477,43 @@ solana_sdk::pubkeys!( "5XKJwdKB2Hs7pkEXzifAysjSk6q7Rt6k5KfHwmAMPtoQ", "5ya8UPujuXN8cys1EaaqMMH3auby3HTHZki73Q4Yfkff", "63sJMcjh8r7W1oXjrNZrG4nc4UX6cQVonUHp9vGDUNdW", + "67iXZNZ4ytz3A23WueWr5B23WY7yHdESdRPGbVaPYkHw", "67joanjyAoVmb9nZLyX8p3Gx9tAxzXaUgHDe3kaUH4wf", "68EWE8SmV58oRG9JJELNDt8Y6MuWBbfraoT67KFw5Ath", "692qK4TkXnrGfDZfoSq9aFfnoE2WNW5TpMRnN7M3sumF", "69QvoqgDazuAqD4ygG6YsA31HBV31L497XqmQohF5ajH", "6BDNnr38moGRQyvx1Ehs9cM6tJDFK7LF6mUvLziyNrzW", "6cgsK8ph5tNUCiKG5WXLMZFX1CoL4jzuVouTPBwPC8fk", + "6DcjiCqt6E65w8P352JvAN7qDkdQc7YQJHBrGf5aVAze", "6GRLDLiAtx8ZjYgQgPo7UsYeJ9g1pLX5j3HK97tFmtXb", "6h9jyRgfpmgXNyaWpbDpbxbCoF56WEbzsruhMwDn2om4", "6hpMTcU76pCkSZHG1qdfw8ZrM9rFwFCpmKZWqBc5VXr3", - "6iSLGrFY1zCktMVkALyGqcQVqp4rUmKkozHG23EXwPwt", + "6iYUKnywYPEZt58hYs6WKA5tZjxL9MFupCxDkygX13dm", "6kDyGMHbuWekkcquroYNp8VRL5pQiUzEJ11gJ75qJsRy", + "6kfL3zErU3z9iofwdg6iM6UJDFZaVJNguAwy8SiN82db", "6M6aAEdhcWZGUeaRdc7cd81YFZ9UahgZP5D1x3MzXxmJ", + "6mdtNmZpSycAZKXKy9JkkfdEZCMcnuyuA7fzdnpUohZr", + "6npqjzgFPuL7dKdaUsgLwKokWLDe3wFyHeDaVT7Pro3d", + "6PbhWC1XRHsW3npzCHm374yE72B2mbjBrXdUAhouozqm", + "6poikjtKFzySv2zrfEJCQorTDJWmoqCLPbSXeNLHyvL3", "6pTM5pLcQFEzCnd2GxLqLm4jL97ZL8L1ThSpR5aVRfc2", + "6RFbeXzEyabceq71fMBLuBVofWedpZAu2nK9vNbZiqei", "6RmSMaZbyb7SGKLp34MXY2qkFePZh41Frs79xJgEg5R6", "6TK3XwxN4XMEM9urq1n15CQyF1Z19idts4x5Mfibhtq9", "6TkKqq15wXjqEjNg9zqTKADwuVATR9dW3rkNnsYme1ea", + "6UFmHMgP4ErpKrdjDgya1gAfHmcC6QhWK9w8wBT7iCNo", "6uHvWHjBYiMowwKUZ5waAc83x5bnLbmGfxDnXPVQYorh", + "6UvA3z3sKbxGXobTQngyL4iVX7kV9jeGYmCUGRMYZqcK", + "6vi5VzR8KURahz7WheNGRTgGWHnKJQiKkqNbfua1sJiC", "6wsSvrZPbjWeNNZ92KWtj94pdHj8v8sRbKsu1ZSpztpP", "6yf57R7U1J1VXszE7CobdYEWQNJMPRfWEgGfaRsVNk32", + "6YPgmQm44UeSEeLU4NRP4fE5HWrEoKupqtRMK72pDjbS", + "6YXp94DjBg2cP3tiVjHhgZYLP7juG62GxzU3yZFE4zoJ", + "6ZomMAu9XSu9fzkRjqM6HbAXagw6Jo9fTsfsAADTZMSp", + "6zxkTupZL736phbbktUkWQSqWTzeYKbTFoKMENoNnJW3", "72FqmsqNgpgF4XGueM7Ys8X7xd1ZqBzWvUQUq2Rv6bBv", + "72Sifwyudib2XE9nwDamapVEJr4rZLFthLtPH83ScupR", + "73A8PnqdHraWe9W5jiYzUBPEf4giyTxUG1CKZ6HkXbuz", "768DzddM8MGfosYS5JxjzpY2FcnqeNHUxMerFQHWmwKy", "776BzpbpsZU1rbCkNHizEP5r8RE8QL12Xqm49krkLPLy", "77xRWv8Z3kaQpD9K3Den7YWJ7sxsf1KTnw5MdcM7Gtnw", @@ -873,43 +1524,62 @@ solana_sdk::pubkeys!( "7DBhzr38xGJeALbZB9bpA5bnzVL8N9oGCjFfWuN6t8cG", "7ek3CDbxpGRdCVTJJpj6WPHmZrJqBCQ2RBQoqLHitx5L", "7Gjec4iDbTxLvVYNsRbZrrHdtyLByzdDJ1C5BmcMMBks", + "7jmCjQt2D5MVvSz7NhpHLCXGsh8Mu3qeBbA4LJAf3Xsd", "7N46n4N3tiX4AjVKBDPfAuxhgirAgDFknn5pkjYj3xz2", "7Np41oeYqPefeNQEHSv1UDhYrehxin3NStELsSKCT4K2", "7nYNfGS6VVxzCZmfbLGpsXYFm2LS9XRrva9hZahFmpqh", + "7oGVeZnNWa5yDH6V6ec3Ya1xzSFykc62nXKDGhnbphuY", "7PwCuKPmGF3ZqWHgn8zXPtsWJ7Ud2q1DFggRkzctwJnJ", "7R3t8NAU7NDAd64KLz62tj1TaFZLmSyVSMRJX8VT8Agp", + "7VWgBZJCLtuCTGdWoiWPrVqY1TaqiZQzEqmArzGzrwGN", + "7WLzboF5Bu9e26MvLVps4rPbSHBgme4J1w2Hr9G4WW5C", "7X1kgrfYRwFd94yyE8tvsQF5aTFHdmoLXnG4q79onDAg", "7XSCAfoJ11zrQxonjbGZHLUL8tqpF7yhkxiieLds9mdH", + "7ySQKwtxuvxQkhW8rKsa4Ch83tdX5fzfpd5hu6Av66ur", + "7zPjirsZ2GLwji87WB9EPyeLXTzigjeVAxH14nB1Ss2E", "81JyjgUyLbqs2f7iSvrEBwE7Grj3MsQwULMGTxXKnLvS", "81zNHWkYaQRL3TWHSNwzX6T64yUyVQ7qXzLLxvaaB6b3", "86fVAWdCDyrSHnv9y9dQCrfQCY5CrXptLP7gL5PyUDxR", + "8c95uL2WKofQGfr3rAMZHmJ4V1UJW5puTmpw96n47MX", "8CzsSMYcmtFmcGpYYv98kcoQQaWQLVtpQRUzEw6awPHU", "8D5rQbJD9qLNJm9HyTjFWV93CBc29ozAdGUia4hyMhw", "8DXdM93UpEfqXezv1QTPhuA7Rci8MZujhsXQHoAsx5cN", "8ebFZA8NPLBZD91CwsG1HWQsa2B5Ludgdyf5Hi3sYhhs", "8Ey5FDayWYgJdVoquScT2hJDKWk7nQQfqzfGBt1emJpx", + "8F75kvPrr9SVFDDbyNJQ9J8sxpEXDNh481McKJ1YzQB2", "8gcMmaEAXfRZKcXHeHV2x8R1ebb78inTr9xhhEuNNoTt", "8GLRbAstsabZuZUx73AoyfGi1FRCWSUhRgMugFyofEz7", - "8LSwP5qYbmuUfKLGwi8XaKJnai9HyZAJTnBovyWebRfd", + "8ne4sDgaaMtuwY1y4BaMYSGKXpDRVc1Po9PsJQvrZPMs", "8o59jbS8MEqSd1J72ryL7dghg9djh2BAUtpkZvTTYh4H", "8pyp3vfVPRziYdAYEyqkwytdBbdVbQmHqfQAVDcRV3w", + "8rVubWZEYjy4YfMvUr5CDPg6YgdFdzUfZtfw84cxGjaZ", "8sv24V7cy7tyFPwn8nqAKXiigeNmgYACoFmG2z2XBTZw", + "8tZuYRKd5kx4tNsZMZHA9ovsjKkQWf56kfe7ycLees9B", + "8VAyNMaz9DFLNxSFrhjiusKCcgQNKgnByiG6KfeCP8vE", + "8vnrJNMDERZRwWgUMSqwLyYHEPkQQg3Ww6BM9AH4uX5R", "8W6wYH4cQKroY9ymP38DQeC1wV8b92xb4BWT2foXSvGy", "8waGfnuaHHUgZT19xQo8LWsAY2CSrXiUMeMDUUrvkeu3", "8X4SDANraPWJYCDB1ZYCucraB1TKki7vTFcJMiSrspTY", + "8ynefSJ7MVkC6VnMdM9Cm3V1F57fiYg8ugkfNV95X8Zx", "914RFshndUeZaNPjf8UWDCyo49ahQ1XQ2w9BnEMwpHKF", + "93P67u58qai4kT5XoKX4Ti4g4H8nrgJ2PKnKgCuR39S4", "94HgFvsD8zm7UXJ5KJxN4zW5nsdhZWY2LFREjfaFEHwR", + "97MtLX5ajrR319PH8iLnctBpaLFoT3TNuUAtZfZaEn7U", + "98vNehxpVCPz1t5RjnZZVjxQ549a2VLUbkpNwqj1rSb8", "9bLEjf8SjumDs28WqUfoSnyh9Eg2VwQZGMrxxrWhvLWo", "9bxGPEvFjGHqpAHMkm97R5d8euFnpJ3ws83tMkTbcBUJ", "9CqDvvGvSNVZDo5RskCAv4fTubpFCs9RLTrjUxEYrvNA", "9cYyjirWYs68YKw39r83qXv3rQB52dyviteRDKrDX2GC", "9e7XGRqQqEvppx4Lkj6P1S7k65yWQpf3vcNzWecKSzDd", + "9FXD1NXrK6xFU8i4gLAgjj2iMEWTqJhSuQN8tQuDfm2e", "9gFxqsXbFyrKXUkqpAatonn47uYZ7sEZSnMxhzQoXrUJ", "9gJT69qJUibNmfnAPBgsqSmLqVxc3kxXpw1Vk7APDrso", - "9H61MfefWLuCr8aoUTrqLN94ZZnu68pEqMHhnYxFSB4y", + "9iZV3mMdBh9y17hM3eVXUVuH28pGLERSJBq2z64yfreM", "9JJQN1WpJ8QvH6XK1xAMbSpgHSiwqBgWaeCh3ViEFmtN", + "9jQ16ZjBN1vmXn3dpRDc5eZkRPU1kUTp5MPnJuSFWb1U", "9JZXYs5TisLa6gra2PDuNibbi2979AZuXNY9zSVGz4UA", "9mfDCev5UYzN15XS2AyRPXbcrDJ3HL1jkXj3mdP96Kam", + "9mo138RC4NbMYvWaVUE2cYPpY7KCCNGvgHhi4cVuCyM6", "9Mo3ap3jpuqQpLi75EsiXLWfTr1cbBhrJNumoq1wnVp6", "9nEBir5mYz5yAnAtuZsuBxwfC7SayWZTMnyCmmTsUknf", "9NEsaMNVEEPHCdgxUzeziZdgx85LS5za9LVs1rQDGBdC", @@ -917,74 +1587,100 @@ solana_sdk::pubkeys!( "9of5uVqC4EYZ4LLJzPecynNM4GsQWiT1WtHfA39VqcCR", "9ppJrpsbbuGNjiMhhD52Ueco4KXUzVfrtNQ6tAcDab4f", "9q16BB7WGmBxf1nJTdxH5zPnBUhtHqdqXqRFjSjuM4k7", + "9qbqAdFqJK4MhmZPQjbRUxRUi2NTpevAiZ8wj7SiCiTQ", "9QEY6sXPc88gt1jNEYUC7YWceWV8nRuuUi9WYrDngro4", + "9QsFvmFWH2weMKYivw31wy6qCztnSEAgbvgAA8gMoUWH", "9rVx9wo6d3Akaq9YBw4sFuwc9oFGtzs8GsTfkHE7EH6t", + "9SUf5VUSACNWW5CgbLs26hfezhUgeR5pNuTB1ZmUFxVg", "9uAtx66puPQZLaP6fMKNKKyRtGzonhaUEdbVopgeWcfS", - "9uEazQxpRTyYX1hHMMySDBWad7zb54K9PkHKeZemK2m7", + "9Ukj3PkyD3igEDJGt1QTj9ThzjK6hMiadQfa3gm7kjf1", + "9UsSQ9m5QemVf9NFmKncDDXGFGzUXMiFay9Fv7rUmjtV", "9UZ3NXk9jALnrhbWz91PXWya4Hvi94jceq5nojxHiKgL", "9vpYXvRdqNJD2YKRZ9q6Xm7fh4FdPGuc5PBZSusv8vbi", "9WuDnZMufE2nsuWsTnHjJsjavuKtswTpCy3i4A5kKT8c", - "9xg5FUCSJvvFkUF2dLoSHAR72DnA3qQqzTzWhHibZ33c", + "9XQyJ5emdYVDB3PjFDCmXFG3G4cAbPHJttF2hJczhZB8", + "9xsgKAU3pyKZZddPXdh5wLRqjdw2Fc93BL41JszhEpZz", "9XV114Dg7cQF5zbxEoEnTpuYXR9TgdjHSimZtkzh4epF", "9YDu59tKDysB9zT9P8cgvaogmuy64R1Nxbr57QHztNrW", + "9YGn24qD8ZCU1EKZEsPLSEv7SVAJ4kpbCTmj7fR5TXYv", "9yWfnPBofnWjXRvfEKLioMb4VrCkwqcYBbWj1uF9P6X8", "9z7sdEnttp9T9bzoRZumMcKWCU76RdmrFPi42km7Twb8", "a1phaKk6UbG1P2ZCpfMVFUeRM5E2EZhGvUjqWHRsrip", "A31PGH4i5xGn7SHWpsQRhpBYUwanRuqNrHBp8bSeCSEr", + "A5dCbrjh492SU7fg31wZd6CNmz2NYqH1gEE219qyXZbe", "A79u1awz7CqnxmNYEVtzWwSzup3eKPNW6w2Jrd56oZ3y", "AaDBW2BYPC1cpCM6bYf5hN9MCNFz79fMHbK7VLXwrW5x", "ABC1U4cf9DZMwqy8ktEr4WJj8VHmVBQibbC57gEJthwY", + "AcHVKQ3LoPUN51Zx2zyA2CeSZJUR2zJ5jsghTpWFgmHf", + "AEx4pxCHFpsVgZ2X9t38bt4CouZ85tXXcghjUkb6ypv7", "AfDqsQKrpuDEHy65fsHMzVVeh4FcsRomxoe5nDLPERE8", "AFNiQnEvAqjFJizDtSAh5ypdXvQDRxHN61kcect5wNEx", "AgG8obWYeVY6nSkPYqDHXfssxdcG68GkuBikkearYRv1", + "AGsrW2sp9mqNp7g9Nqw6g1A9wMyDYysqTN6d8oaw2W4s", "Agzs33CozCnr3eFvyHcYkb46nUZq4dL9SFMV9CNAxfGR", "AHg5MDTTPKvfCxYy8Zb3NpRYG7ixsx2uTT1MUs7DwzEu", "AhL6bJ2BF2qWSP9aE3VnFZFm9DXxWdfc4PjFBKPncsrV", "AJ6PKuAJmACTpB4tLPm61u5ScygG4c7EtU5HZmCQ7MDA", + "ALiZQ5XboU6qTGKSP7LExokaZcPGVYvmdFqqEw2bCJWp", "ALp2GdA1eJV8vZHMHazCtTxNXe3BLUSco9LDASgjDs8R", "AmhQFcGvH2hjkucP78rn6GMKSbstYwyFpCDVKZUwBGrG", + "Anara9qw9KCKMFPX19GUE2drgwiw49u2pQhHXwVRukhn", "AoUwfPuiEek2thVRDhMP7HbQb9rguyab4rDiz2NAfwwA", "AqGAaaACTDNGrVNVoiyCGiMZe8pcM1YjGUcUdVwgUtud", - "ArmanP3SBD1DVSoKCqK7d6S2kLpyncYey2Ybf3o5XkTn", + "ArA1pRBW7WFcx1StiRpwhktiXkdQTdoGSj519hzr8hdJ", "Arvv3uwEyDPKckw3wEFiCq9hgxMw8kawFU6doxFAWGYR", "ateamuvZX4iy2xYrNrMyGgwtTpWFiurNzHpmJMZwjar", + "ATwrKSHdF1JptZDFd2wUeU8reP6ftp9P695W9ipdB6TQ", "AU8CBLxah7B3HQE4rtjfkRDmqEGLqu1xKUPLSytQ8tLc", "AUa3iN7h4c3oSrtP5pmbRcXJv8QSo4HGHPqXT4WnHDnp", + "AUgbNHRcxPqfmq3CXQ5bvy3BpN8KZt5LtXBGSH3gV5B5", + "AvAivvbUcs2ewGpj2TxW39BXmA5t6d9ofQZFzwagqAzb", "AVUsR5RuxBcZDftuUNQqwUKwx9puBE5N1zmgLQZSxiuH", - "Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM", "AWHfhBSMN7rX7NEDrKNfjnUyR7W1xpfistTy6w9GqT6U", "AXXrLQ7bVy92pPxW7heQkAHfg8LuJ4uufUWCQgH12rmR", - "AyYqAhyCCxRrNGbm3dY4aGY9SyaQC9UvPTSHvMQK4YW2", "Azg1hC9mg7pkSq6Q5sAESsBsrxHoUuyRnUYSCJMJMdup", "B3SBGpUurKfrMAh3vykyueokU9vE7EiGDkvGFzxokQoa", + "B5GABybkqGaxxFE6bcN6TEejDF2tuz6yREciLhvvKyMp", "BAHNFqttxYMMZUDqMm1JQPnxkZoHLnpash1ouM7pnLEA", "Bb4BP3EvsPyBuqSAABx7KmYAp3mRqAZUYN1vChWsbjDc", + "BcMg5hjgLZF2mCJ6L2ah1p7TxHtH1euPWP1kZs3USRyC", "BFMufPp4wW276nFzB7FVHgtY8FTahzn53kxxJaNpPGu6", + "BfVzQge1z9dSZ2YiGyRXoPRUvowe3S2kvbLU61zXn1cV", "BGBxrNWZ9HDBQoXA9Zq6t4auRXa59qrLQkiTGtZK6FU9", "BJafMGt4t8A9BENBg9EcXEAqUSgBLaQZujQqvrsGMgtL", "BJevND8rrW5f9AHSysSGTtDWKcARxSpzTyDAvCm26Qms", "BLPBUAuWmjTKT8rF6nXnHJCcuK4ZonH4xdYSD1gZiET9", "BPX9WVx1MjVpSWY9fo3hGXfE74yKXcreS2XYKjpiyJfZ", + "BR9JfwDSvtP87kdJxeF5QJPCcj4bdmUNVDPtsK64DCkR", + "BsvtXMu1eGKrAhpP636EnNG8LWddxqmCDq8zcEG8CwY3", "BT9ZFvsDfX6WpLFqmWEYuLuE5i3SxzdSJ1Vzm9arbRub", "BvXzmS5rLW89nHD7K2qVjcERKigDdJ8Xhc74MQZumj9J", "Bx6R7GBNsFCt7KJUZw5yrbvdhL1EYcXPhkUBnS2t3c7E", "BxN7s13iMFRCZEumBRzUpH9E25V77dL7VgSig18MYKbm", "BZBKHmW1DhBaAPojxWBQ26vGz42Y7MtNviFZWpc6nGLb", "BzF3s3AGgYupiL5Mhbzr1tu1ySMZSR3SXU4DajV1Zwv9", + "C219S526JWRzpPUm3FfAzobwZMDuo3DM63phtJpqAz75", "C5oTsyGA1gBvgWpvS6EaWGAgEKDBkMpQaPeLTiaLuYKi", "CakcnaRDHka2gXyfbEd2d3xsvkJkqsLw2akB3zsN1D2S", "cami5ixFFZD3jLdX8Ef5tu8o21reSGoE3GpGRrQyP4z", - "CAo1dCGYrB6NhHh5xb1cGjUiu86iyCfMTENxgHumSve4", "CBUGET5PnvLc3HvEeFYj64iTvdKhYV6pujTPDdDh785K", - "Certusm1sa411sMpV9FPqU5dXAYhmmhygvxJ23S6hJ24", + "CcTtRsmLJEjqsv5iyfXSYwjaUJdfrRK7AU9cHMnQfTb3", + "CfBJ2yK6hbRD3mV5VoMux6nkki3mYa3NCmWWnfTeKw4B", "CGoT67sSerk9ckwin6yY4mna3ymLxxKNqdYeQZ9xNdkD", "CgwKP4kM6B9giquCADDXQxik7BJR5L7m2EC2ntPnQoJy", - "ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n", - "CJd4hGJaZ83WYnFu72EbwJoEXiTx57wdDkTdpS5vvJvK", + "CgX9oSHcpXjKKxUpLbrgNKWrk3ff4cQidfsf96AkMBke", + "Ch2UBdfwRY8UyAKCBzYksu7QYwjCXprkbUo7AY9CSRyS", "CjmXSapt1ouz3CZzgkRJckBEwMSo5fVdVrizLeRscwYD", + "CLarKkpJDBiJYLXqKLVEL8VJaAxRHnWRyjjxAunCFJ41", + "CmoYHsxUDcReka49CbnJqoQ3y9hHYkCoTBZ59jA6k6LF", + "Cn2GFnp4H4mejHtR4GbV6ga48qoceZefzxNU68PxCcK8", + "CouDacpBtF6mmN5K9aXyVds1XRkmo6J4t8ebFZxAecGh", "CRextgJEiudn3vzHZauno3a8BpfdweYEdLEjhpZ8C7Cm", "CsKpHvEjoSkdNduEjb7mfuJbzRKqc5ypde4yUW4iGMS1", "CsqwHrSUAWsWpN5kUcjcv4Nsq5JtgFiDJTckaY1zuXPf", + "Cu4M3yd2LfMoGhmYxKszhVH18SPgt6TQvqnE4AWjNKwd", + "Cu9Ls6dsTL6cxFHZdStHwVSh1uy2ynXz8qPJMS5FRq86", + "CUPryNhYfF8ChYyz3tahM84ppX5WSpvRxWqP7PEHMX36", "CVAAQGA8GBzKi4kLdmpDuJnpkSik6PMWSvRk3RDds9K8", "CVRr5oHCAAooVbYze7CvXtRp4FUtkMCSqBZU7MVu8v8e", "CwhdMezLucz7bcuWzStpLXgrzKGC2tBBiaVmJZjfprRN", @@ -995,23 +1691,26 @@ solana_sdk::pubkeys!( "D1KH1UwfTLaBW3NubpvNv4ze9S7SKW6jd5TLdrwZgLP1", "D2RV1q6FgePVVjrMa7AMzVbvvAeg5oS7TAV7qdNKSDsX", "D2UwKTrNJGNLM1h66qjoSDTK6CLcPSzmga84MFuiWdiS", + "D2Wa6JtXeyqFMdoacpKMo86Pbr4YpfdVCtAhem8HjqfF", "D32cBNvo9qmMyMSJzWqDPQ3ujYFuW9HHNjVkwxspezQr", "D6pUrfgc5ZyXSfgtCBYozydRSz92pse1S7AZP58muEYk", "DAHJgPKdmncYW8DmY6meaU953a7SktQ7eDGtWduC8W8m", "DAS6zKbzVe5DFSwyEgo1TisuWt31HjBdZxzgBF2ASiju", "DdgotKX6oyHpadPbAuiyAodHh3mk72SXBL4aeg4vLzZc", - "DDnAqxJVFo2GVTujibHt5cjevHMSE9bo8HJaydHoshdp", "DE1bawNcRJB9rVm3buyMVfr8mBEoyyu73NBovf2oXJsJ", "DE37cgN2bGR26a1yQPPY42CozC1wXwXnTXDyyURHRsm7", "DeCcxJU8AqxPRZ27wrKYKRgmNHwCUK7MvW5XPGWh8WZF", "Dfc73Czi5K3xa6yKMq98LHJx69PDsSzUvSdCSSqr8NDQ", + "DFKj3e6WeQmLxQkRBHq7nqcEXjwkcfJGCt22VySeDiKx", "Dgwcrprgu1WeCNEVG2stkiABqWjfSFfBuXeMrR7qwJHp", "DiFeTctQSaNczJNmZ5121kYqLaBe9wDpM9sjCzTELJLE", "Diman2GphWLwECE3swjrAEAJniezpYLxK1edUydiDZau", "DJHsoHQvqYjb8G2Ni6XSbBSHxmycMAsZksRDytQ2bntK", + "DkycekX1rxCUr9FtBHfacHEgCTfRdRLaKYTiz7fXHMQ7", + "DL6URBwusvYUFwEYZK4VEaoaynWSduUA9e4N7WDuZawf", + "DM52rUKVkCPDY3MFmSL76XAuhB5ZN7MuogAiQtEe1Uvn", "DNWsTLvsMixgUcbM93437U8DWmJ9bZikQeBxQLHbeH5L", "DohaxzeUj6ma9shCykxGxi7FbWnMyW9hzNjwQjZHEDV7", - "Dokia75SVtetShgapUBoVFfYjL99fQyr1twxKKyTZKa3", "Dq3piY2ZcBvNN84j2EhDLtTzRAw95za7Eau89pNcmSd5", "DqBvkYXi7HjdaKz78yakiDsaGuq1BKrQi3Z5JV6STctz", "DSV16KKYW34qjxTBcTzURTyJYgw7qLq4VLKq3UavQKCk", @@ -1020,60 +1719,85 @@ solana_sdk::pubkeys!( "DViARWAWKkxAzp4UCgbw5B9pLSrBY3PaztFErcwgVUKX", "E1SFkvPjU31xWMcvgnX6vhGvfRvb1zXvHFkqmQNEGZKK", "E6cyDdEH8fiyCTusmWcZVhapAvvp2LK24zMLg4KrrAkt", + "E8swCwkKkQ6HTMTA6e1ehoZxDKGkXqAM2o1TYgxNCX5d", "E96CSRSUT1WUGKzDb2Dug3zWRgj6qKffNTUZUihjFc7g", + "E9hD3ikumJx1GVswDjnpCt6Uu4WG5mz1PDWCqdE5uhmo", "ECd2JogmL8Qr4hox1zUNEn8aHFchE1tAh3JXM4UQBzzo", "Ecs6YWKzgZXu3oS4WCyWzWf4kw2g7XnE7eWwq83TQBaG", + "ECULsxDc7pqadpQSQxFaNWAsemLW3wQq1vDKXg5P7GjW", "EEN4pf92jyVoASZ6pQQMHcKXTF4d5T3cY1a942QhRasc", + "EEP7VNrtLt3tjM8DvqUVEsmwZxLKwHgEFWDtoMx2PQZB", "Eik59s9go24w5KoyXGoyKSK1rUNYN2MLTv1iW2DFWkS3", + "EJ38tGihX9LtQ3q2eYGxUvLm6pzx8w6rXG2Q8BNhbeiM", "EjcGVYigv2PA6MeZxRmgFot1P7eGQThFMS2Yh3cj5r2n", "EKyzkF3pdGDR3Wnhuf3ouMoofnJi3r2XKioUuGqwrqB1", "EqxCV4fz2unNzt8ydGrVyz24ngkH5n13x2wDSJ8DY6qi", "EssmJHKwUffWWDBK4mb84QRQjWJEnuDui41T8ufcoqeZ", + "EUVBn58XXTX9RBTm1R7Wd8n8JkvBMQfc9uSn5wPhbdBL", "Ev8D9dwYdfebkdLgAjwiJtCkqS882Uvrit5qN6NTeHMy", - "EvnRmnMrd69kFdbLMxWkTn1icZ7DCceRhvmb2SJXqDo4", + "EVkRg5yjkmBP5tAmSiM9zBcrfnFE82rv3fdVvTLR2sxL", + "EwHxEEjLrrhzB2TUNMEJh8kNcDSnfBYvuZzDizBHF51J", "EwUVzgSPe1zy2hfUGZxJAEP7Y1wheNgNsgratbzPELru", "EXCMwETx5Txcvxt6YYqxFmhSpQKH5BVjdat3NE5eJJ6a", "ExnG6VdMubFJfrB7qereo3YxUMyeZvTuZUVKfv7R1YZD", "ExyEA6EegthLVNEjkj37FDGUnokPHqtEEe58ncBAww7u", + "EyhATWGrsfmfRZtUpCDiyW8vH7CfkT5gy15RtvNPmqby", "EZUyrtZoBWMMoXTLzDo4RVTXi5XKYCs1kq7oESFLDvCP", - "F3LudCbGqu4DMqjduLq5WE2g3USYcjmVK3WR8KeNBhWz", "F84kSGMUy81sHVF7HtZn8nPMRBau2ZC9uqGAZGJtHXYJ", + "f8e1k1Qz7zsv9gp1kVPnzBHCV8kDkfM8Rad3ZfHxFZN", "Fa17nmHFt62kmerRQNGtgVWDxnuf7UD3PY2eeFfhpz2t", "FAAvB4WnbSPNT35oUXDvkYj932KFRPi24dNBtrSpPvCY", "FaGBP8LJrXE5h3cFRtPxz8x8695LQAYnvYn3NGtVtvrw", "FAwRZwJgi7h81ZphYhLauZKvBHvkr3Dbhh6R8DsaD4Xv", "FCQTtjeFK7Fj64aVDwdahfyxiG94uLnUjkmmXUq2esuf", + "FcWJ9zuq23C74KzeZtrSZMNXDnKFN9fQXwAyVDTZFpLr", "FCWkGAHDWK41ANjiaoPudkCZRkvTecaEkoZQugezUnpr", "Fd7btgySsrjuo25CJCj7oE7VPMyezDhnx7pZkj2v69Nk", - "Fd7btgySsrjuo25CJCj7oE7VPMyezDhnx7pZkj2v69Nk", + "FdmF6aFAy3pisaTwcQnvLf43z3pd1QnqrNzFMa2vKABA", "FEpYb3oJbdPf77DPdQagkcmcJ4SqcfRDeCydKVkks4HK", "FGiEdzde7Fco2WLpNQMat299hUVoykJdaA5hxdmCzHiS", "fishfishrD9BwrQQiAcG6YeYZVUYVJf3tb9QGQPMJqF", - "FopTvQaGp6K5FadWKZtsLJmrX7gnNGFS2fQ7rv5KHyE1", + "FoigPJ6kL6Gth5Er6t9d1Nkh96Skadqw63Ciyjxc1f8H", "forb5u56XgvzxiKfRt4FVNFQKJrd2LWAfNCsCqL6P7q", + "FPgkPgT5agEv7JY7czn1t5bKKewAkcBiPn1xQyiCcurc", "FshmkeZbT8Q7NveSjVpy8FZBgJ6sigK8Etz1cX7Jmyu5", + "FtyZ7aQneFyJs6wKsMTt6m9JFjYEkHwZ2DhhtS6gw3Th", "Fudp7uPDYNYQRxoq1Q4JiwJnzyxhVz37bGqRki3PBzS", + "FuSZq1Xkvkjkj16fGdhDtZb5ATtsZ8FH5a9KGsDhtZQP", + "FUyx2W6wDt7u363QgQRWQYuytE5uJWZLGJpuVh3RDiCa", "FvBrooVoGfi56ux9L49HZLDHMt4iznYVDAMtyH2ee2LD", + "FVmo2S1GZBRJLS5GM2F5uYF6bcg4rM47dtkxHABKES1b", "FVsjR8faKFZSisBatLNVo5bSH1jvHz3JvneVbyVTiV9K", "FvTY9S6vutvfyP1eJG7RLiR1er6nk6cNTSP8q9WRpvWA", "Fw3khiQmpKLSdNyoVR52WxbTWT9gjFSXFsZewknwLGb5", + "FxE69xVkPAUYh3Y2QCHJVWwVB8x1F3wbHnfKGoUvXn81", "FZktwGYwu2JfbECgs4ELaXXJtz9oZyJEfiWa234is6By", + "G1X8Jrrfz3gfqZQo29jjwZ844XwjdPPEBaqNDWoAfEA", + "G2ccHPYPic368N1b6dHJBnE3tBmiceHGZefjk2FZuVfT", "G2TBEh2ahNGS9tGnuBNyDduNjyfUtGhMcssgRb8b6KfH", + "G4BaMbWsp9uX7EX7dtv848xqfxtmoi6d44cNk4irEtvZ", "G4dd6rLMW5aQEKFcYFHw148T8afjyavMyabsc86zACmq", "G5rEbYLuVsTiXP1hUWkeJp9PptSujTGa3mFRqrxXgMx4", - "G7gAgJpRHnRvFhrUMA5khWMqHJ3tpWVWdpsBvCq6w6MY", "GAvEvUKfVkGjMgFhQjC6WAtzzM6YbHGvfC4PoCtrrfVz", - "GbtVg3D6bNFSjem21vrJBJTpUniwwEtmvs8mQkX5XS1V", + "GbpomwGXpPBfYpiMz6KWXBa23usJM6Cf426NQGzAKV2W", "GdnSyH3YtwcxFvQrVVJMm1JhTS4QVX7MFsX56uJLUfiZ", "Gdo6FaCtTQqGYsmDQrX2icSZeqDCdVizGzBDNbiqCGbJ", "Ge8M91yymAsZmjCyeAVCpxGA9k62CR8cxf4AhP1c38UP", + "GejR6mt9gxHtJ8Xe3cqamG7AL3Hx74w226o8p61QA5oG", "GfCNi6GaCfEKyvpS8fs1D6VUdQcPJg4VrULvFFEBLFW6", "GhuhvJcGcS4USKAt4JQsiYyZvcAj1MNzBhRiqR87wNv8", "GinTsSooU4QChoqXFvEWnMLsM6iheLmLFFv681s9CeqY", "GKu2xfGZopa8C9K11wduQWgP4W4H7EEcaNdsUb7mxhyr", + "GLh2T3KmWH4dUxGEVFuExffK4TLotmfeSCGg6fP7g78z", "GLmkRrNqTSVi9ykTBQVX9M8cV9EwiCABknXVweS4QVxj", + "GmZ7xFQ4GHDbQw4CSnFT9pAHa15u35qTN8e259UrBh9D", "GosJ8GHbSUunTQPY5xEyjhY2Eg5a9qSuPhNC4Ctztr7y", "GRmtMtAeSL8HgX1p815ATQjaYU4Sk7XCP21i4yoFd3KS", + "GSXEn8TVBgiqfiLP5c8VmUrdAX8sWDZVE9M6p2kZFehG", + "GUFNuRw9JEAQwrJR71mRa2LbMRyrUfziYUzqY3KQwAXv", + "GUvRSvhhQRA1PhTpMaqW5hELHcPP9QP4W45tHFEbtqRi", + "GVjtyyy9HrgBmQ62TxjyUuaQDvuHNbnnrZgi9gqwpVYv", + "GWccEztr4AFmiMvvvXcib4feDZXCi6U13XrxponUHFWC", "GwHH8ciFhR8vejWCqmg8FWZUCNtubPY2esALvy5tBvji", "GwnQsVbbVsMhGqWV3gcVCF1364LRmftggyc5SmsYMLrY", "Gyxhb2GGcB3s5h1HcHekRJYyAHRXEDP1FfX5fncE9edA", @@ -1084,52 +1808,58 @@ solana_sdk::pubkeys!( "H5FD5GxuE46hqi5paMkcqt9WeHvszZA7RgBfMoMpfNDK", "H7tpxumDErX9DXNTKKBFTDmynXBF32QQ8n3QjiQfLn2i", "HahmUFR44BXFP7fVLsnd4pyaE7GoN1KKV1hdL2eVUpok", - "HavuVVDXXsJqMzPwQ4KcF5kFm2xqjbChhyi1bgGeCQif", "heoSbe83UDZA5LNR6F33QHVk9iZXpxPKcNRtVpDczXL", "Hj2jzpAp57KyM3SmnYwJbDVrQ8tTWizMon2hhzYzwxet", - "HJFFKSJxhoeRCoe3xJPyCaS6sGCnknXcLfH3Vg7fBh2M", "Hmube7PkFmvattfygQgEyWe4GfZ9Dskj4Zc9YFfRj9JU", "HoXANZnWTGeePertqWkMEnnhgXjTjzpfWaT2kja2ZgVU", "Hp9NMFfFhDQJCkgUHLajoWqTjm1xxdvQEyUnoYn1GWFX", "HPca8Y8aMnkrTi87MJFHDvMtJVkvyMxFvkuLsoFEoELy", + "HPdVgk7NhewBTKDCyqKYFXLu3pErnUo5W9KLSLd18JJv", "HPN8ANgpeF5mcViFW6fTCXGWPdQ6LhRWvcx86JVEkFC2", "HQATLHBJcU7DET6b6NWNMpfsZVZ1PRnbfkCtcFCT5reo", "Hqc2qT3vXvBSKozmz7Rd7gLF6jUJHuQEGBTnCHzEBnqk", "HS8DF8wP3A8qSHPnzbE6pSvXub4FeZQ6xMNSJuqArAHS", + "Hsb14Gkhjbp6FinDcPRwrThS8BYsTnrhGY8Ui4f3EY7V", "HsGWhcLqVsue45i4vMxku3YEVGF5cYxUSTaa6gwieSGN", - "Hu5owBhvDBeNKKp231TzCxLyHpWSa3ikTv7nWkpze83Y", + "HtzxUabNfYNJR43FUmcpkgmtANZahbq5iASB5oiboXzF", "HU8uKcH8b2GRD5T2LYKDzV82H7CiRLTQLwYTNxAD7b62", "HVWzcQZ12pvZN726XXgA2jBfDuj6wDFwSFVdzxooi7qx", + "HW4zorvt6xDwhU36RqjcWNwU8YMj9tiqnAafBKW4cqV", "HW7ntfUHapD5o7McDuPfGvkfzrPcmuPSbZMMoe2gksKQ", "HwdfNWCqP2vXRvaHqQhoVUM2uPndaY8DDJzzBxCoPNHU", "HzrEstnLfzsijhaD6z5frkSE2vWZEH5EUfn3bU9swo1f", "HzvGtvXFzMeJwNYcUu5pw8yyRxF2tLEvDSSFsAEBcBK2", - "irKsY8c3sQur1XaYuQ811hzsEQJ5Hq3Yu3AAoXYnp8W", + "HZzEML2w5Qs6cccMd7o57Vx2sQUuuTaiecEcMU2xceSK", + "J2jV3gQsvX2htBXHeNStAVvMJaPe3RgNotwfav9pyS6y", "J5MroSHMzcPUTXcnpXygeCFnXJ39NBG5RRQRYD29ZwNy", + "J711fLahrgkEnPaCrbGUCpGemmeVg2LsaZhmC6HNPbXk", "J78SNwDW6G86sMmh7djnBKGjewXNpjD74sJTjJ1iNgTH", - "J7v9ndmcoBuo9to2MnHegLnBkC9x3SAVbQBJo5MMJrN1", + "J7jT9mGjhfXdGFdp9XT97Sd8ynFMe5e2L8DqXJQr8qgs", "JCmdhNCyzypryaQUGLGxbCM366dScTD4tVy5ooSWyaBZ", "JCZNdppKyur55JqY5VBctkVjxEcgoQq3RmBuVbQxSCEq", "JD6R8sK3cu7tphNMyhzxSGnSf9DcFUacXBgqycvGAjai", + "JdJWLp5edqUHQB7K9mA3L1gFbFcrS8aeaar6hQ6M2Su", "JDScHrbfa4DEmqA4j5JTVc3wtwqXXeDCrjntRo2pWoLP", "JokerEfTSznB2aTmowy4QPqjyajLMuYM6Jd4TDnKPNc", "LunaowJnt875WWoqDkhHhE93SNYHa6tfFNVn1rqc57c", "MCFmmmXdzTKjBEoMggi8JGFJmd856uYSowuH2sCU5kx", "N55CrumveNQrXFn9oL4xkFDB9K8q5PxwrYaAomrxemz", "narPxmKTwkUxvcXhueccHT8xbE8og2Vb7NrLBm8kcrh", - "nCN1wrDwLYg3zBZ8DbDt6dDHJAtWk6Ms1VK5neZ1fQt", + "nSGZ3tv2UhskkPqiB666yDVj7PTi9qKgDqvjHyw5JgM", "nVmiYamBpwzEqxykaGBWvY9W4R7rmK1JudonPRhmkAw", + "Pidptxt5SEqgeK9HgqacrY2KEEnMqtUFSmAucLwnTPG", + "PR3GtaLUjL8rSGcAQtV9NYGTWhpys5kYRDUSCMF5N4j", "pZ2nxiW1M78Ez9Tk5DcQyw1nWpmPFVZk8KDGKWbvvUV", "RBFiUqjYuy4mupzZaU96ctXJBy23sRBRsL3KivDAsFM", "rusx3KV69WGvsEbWa2HxjXp9GfHpjojM94BqsnfxKhx", "SFundNVpuWk89g211WKUZGkuu4BsKSp7PbnmRsPZLos", + "siriXy5CcarNiz4XL8ssBQGiy2PwReVLny3Bcxq6Ymb", + "SoLiDDAQBEK1LK8apZcZqY7FunqeqDY6nYXMabQZvPB", "spcti6GQVvinbtHU9UAkbXhjTcBJaba1NVx4tmK4M5F", - "superCMS6AucZe9aykaks7kUAj3oqB52yMMV81A8exa", "uEhHSnCXvWgtgvVaYscPHjG13G3peMmngQQ2ghC54i3", "WJguyKr593U9gDSdUu9Rcr1TMKo6pTse9LFZfCArR5K", "wpd8e4dzFVnCQY3jPuB9J7pKYQd6Mstz5tjWzF97Kxb", - "WUNoB9YQXmXXRcJsjY1G8PfVag5aAfnyGmFd6YwJVwp", - "XkCriyrNwS3G4rzAXtG5B1nnvb5Ka1JtCku93VqeKAr", + "Zb8KTJXwFvBTnbEDbyxWs8AVWjjipbXNFAfc1Jsbhey", "zeroT6PTAEjipvZuACTh1mbGCqTHgA6i1ped9DcuidX", ] ); diff --git a/stake-o-matic/src/validators_app.rs b/stake-o-matic/src/validators_app.rs new file mode 100644 index 0000000000..4d8a9b588c --- /dev/null +++ b/stake-o-matic/src/validators_app.rs @@ -0,0 +1,196 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +#[allow(dead_code)] +#[derive(Debug)] +pub enum ClusterJson { + MainnetBeta, + Testnet, +} + +impl Default for ClusterJson { + fn default() -> Self { + Self::MainnetBeta + } +} + +impl AsRef for ClusterJson { + fn as_ref(&self) -> &str { + match self { + Self::MainnetBeta => "mainnet.json", + Self::Testnet => "testnet.json", + } + } +} + +const DEFAULT_BASE_URL: &str = "https://www.validators.app/api/v1/"; +const TOKEN_HTTP_HEADER_NAME: &str = "Token"; + +#[derive(Debug)] +pub struct ClientConfig { + pub base_url: String, + pub cluster: ClusterJson, + pub api_token: String, +} + +impl Default for ClientConfig { + fn default() -> Self { + Self { + base_url: DEFAULT_BASE_URL.to_string(), + cluster: ClusterJson::default(), + api_token: String::default(), + } + } +} + +#[derive(Debug)] +enum Endpoint { + Ping, + Validators, +} + +impl Endpoint { + fn with_cluster(path: &str, cluster: &ClusterJson) -> String { + format!("{}/{}", path, cluster.as_ref()) + } + pub fn path(&self, cluster: &ClusterJson) -> String { + match self { + Self::Ping => "ping.json".to_string(), + Self::Validators => Self::with_cluster("validators", cluster), + } + } +} + +#[derive(Debug, Deserialize, Serialize)] +struct PingResponse { + answer: String, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct ValidatorsResponseEntry { + pub account: Option, + pub active_stake: Option, + pub commission: Option, + pub created_at: Option, + pub data_center_concentration_score: Option, + pub data_center_host: Option, + pub data_center_key: Option, + pub delinquent: Option, + pub details: Option, + pub keybase_id: Option, + pub name: Option, + pub network: Option, + pub ping_time: Option, + pub published_information_score: Option, + pub root_distance_score: Option, + pub security_report_score: Option, + pub skipped_slot_percent: Option, + pub skipped_slot_score: Option, + pub skipped_slots: Option, + pub software_version: Option, + pub software_version_score: Option, + pub stake_concentration_score: Option, + pub total_score: Option, + pub updated_at: Option, + pub url: Option, + pub vote_account: Option, + pub vote_distance_score: Option, + pub www_url: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct ValidatorsResponse(Vec); + +impl AsRef> for ValidatorsResponse { + fn as_ref(&self) -> &Vec { + &self.0 + } +} + +#[allow(dead_code)] +#[derive(Debug, Clone, Copy)] +pub enum SortKind { + Score, + Name, + Stake, +} + +impl std::fmt::Display for SortKind { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::Score => write!(f, "score"), + Self::Name => write!(f, "name"), + Self::Stake => write!(f, "stake"), + } + } +} + +pub type Limit = u32; + +pub struct Client { + base_url: reqwest::Url, + cluster: ClusterJson, + api_token: String, + client: reqwest::blocking::Client, +} + +impl Client { + pub fn new>(api_token: T) -> Self { + let config = ClientConfig { + api_token: api_token.as_ref().to_string(), + ..ClientConfig::default() + }; + Self::new_with_config(config) + } + + pub fn new_with_config(config: ClientConfig) -> Self { + let ClientConfig { + base_url, + cluster, + api_token, + } = config; + Self { + base_url: reqwest::Url::parse(&base_url).unwrap(), + cluster, + api_token, + client: reqwest::blocking::Client::new(), + } + } + + fn request( + &self, + endpoint: Endpoint, + query: &HashMap, + ) -> reqwest::Result { + let url = self.base_url.join(&endpoint.path(&self.cluster)).unwrap(); + let request = self + .client + .get(url) + .header(TOKEN_HTTP_HEADER_NAME, &self.api_token) + .query(&query) + .build()?; + self.client.execute(request) + } + + #[allow(dead_code)] + pub fn ping(&self) -> reqwest::Result<()> { + let response = self.request(Endpoint::Ping, &HashMap::new())?; + response.json::().map(|_| ()) + } + + pub fn validators( + &self, + sort: Option, + limit: Option, + ) -> reqwest::Result { + let mut query = HashMap::new(); + if let Some(sort) = sort { + query.insert("sort".into(), sort.to_string()); + } + if let Some(limit) = limit { + query.insert("limit".into(), limit.to_string()); + } + let response = self.request(Endpoint::Validators, &query)?; + response.json::() + } +} diff --git a/stake-o-matic/stake-o-matic.sh b/stake-o-matic/stake-o-matic.sh index 44fc3dd8be..2e68e0127b 100755 --- a/stake-o-matic/stake-o-matic.sh +++ b/stake-o-matic/stake-o-matic.sh @@ -2,6 +2,7 @@ # # Downloads and runs the latest stake-o-matic binary # +set -e solana_version=edge curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/velas-install-init.sh \ @@ -10,7 +11,9 @@ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/ve --data-dir ./solana-install \ --config ./solana-install/config.yml -export PATH="$PWD/solana-install/releases/$solana_version/solana-release/bin/:$PATH" +PATH="$(realpath "$PWD"/solana-install/releases/"$solana_version"*/solana-release/bin/):$PATH" +echo PATH="$PATH" set -x +solana --version exec solana-stake-o-matic "$@" diff --git a/storage-bigtable/Cargo.toml b/storage-bigtable/Cargo.toml index ec76335bda..86ac03cccc 100644 --- a/storage-bigtable/Cargo.toml +++ b/storage-bigtable/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-storage-bigtable" -version = "1.5.19" +version = "1.6.14" description = "Solana Storage BigTable" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,28 +11,30 @@ edition = "2018" [dependencies] arc-swap = "0.4.8" -backoff = { version = "0.2.1", features = ["tokio"] } +backoff = { version = "0.3.0", features = ["tokio"] } bincode = "1.2.1" bzip2 = "0.3.3" enum-iterator = "0.6.0" flate2 = "1.0.14" -goauth = "0.8.1" +goauth = "0.9.0" log = "0.4.11" -prost = "0.6.1" -prost-types = "0.6.1" -serde = "1.0.118" +prost = "0.7.0" +prost-types = "0.7.0" +rand_core = "0.6.2" +serde = "1.0.122" serde_derive = "1.0.103" -smpl_jwt = "0.5.0" -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-storage-proto = { path = "../storage-proto", version = "=1.5.19" } -solana-transaction-status = { path = "../transaction-status", version = "=1.5.19" } -evm-state = { path = "../evm-utils/evm-state" } -evm-rpc = { path = "../evm-utils/evm-rpc" } +smpl_jwt = "0.6.0" +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-storage-proto = { path = "../storage-proto", version = "=1.6.14" } +solana-transaction-status = { path = "../transaction-status", version = "=1.6.14" } thiserror = "1.0" futures = "0.3.8" -tonic = { version = "0.3.0", features = ["tls", "transport"] } +tonic = { version = "0.4.0", features = ["tls", "transport"] } zstd = "0.5.1" +evm-state = { path = "../evm-utils/evm-state" } +evm-rpc = { path = "../evm-utils/evm-rpc" } + [lib] crate-type = ["lib"] name = "solana_storage_bigtable" diff --git a/storage-bigtable/build-proto/Cargo.lock b/storage-bigtable/build-proto/Cargo.lock index 9bf0169e87..26e50e30bb 100644 --- a/storage-bigtable/build-proto/Cargo.lock +++ b/storage-bigtable/build-proto/Cargo.lock @@ -1,10 +1,12 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "anyhow" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" +checksum = "15af2628f6890fe2609a3b91bef4c83450512802e59489f9c1cb1fa5df064a61" [[package]] name = "autocfg" @@ -20,9 +22,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "bytes" -version = "0.5.6" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" [[package]] name = "cfg-if" @@ -44,9 +46,9 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "getrandom" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ "cfg-if", "libc", @@ -55,24 +57,24 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.9.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" [[package]] name = "heck" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" dependencies = [ "unicode-segmentation", ] [[package]] name = "indexmap" -version = "1.6.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" +checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" dependencies = [ "autocfg", "hashbrown", @@ -80,18 +82,18 @@ dependencies = [ [[package]] name = "itertools" -version = "0.8.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484" +checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" dependencies = [ "either", ] [[package]] name = "libc" -version = "0.2.89" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "538c092e5586f4cdd7dd8078c4a79220e3e168880218124dcbce860f0ea938c6" +checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6" [[package]] name = "log" @@ -126,18 +128,18 @@ checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] name = "proc-macro2" -version = "1.0.24" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" dependencies = [ "unicode-xid", ] [[package]] name = "prost" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212" +checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" dependencies = [ "bytes", "prost-derive", @@ -145,9 +147,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b10678c913ecbd69350e8535c3aef91a8676c0773fc1d7b95cdd196d7f2f26" +checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3" dependencies = [ "bytes", "heck", @@ -163,9 +165,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72" +checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" dependencies = [ "anyhow", "itertools", @@ -176,9 +178,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1834f67c0697c001304b75be76f67add9c89742eda3a085ad8ee0bb38c3417aa" +checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" dependencies = [ "bytes", "prost", @@ -186,7 +188,7 @@ dependencies = [ [[package]] name = "proto" -version = "1.5.19" +version = "1.6.14" dependencies = [ "tonic-build", ] @@ -202,9 +204,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ "libc", "rand_chacha", @@ -214,9 +216,9 @@ dependencies = [ [[package]] name = "rand_chacha" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", @@ -224,27 +226,27 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ "getrandom", ] [[package]] name = "rand_hc" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" dependencies = [ "rand_core", ] [[package]] name = "redox_syscall" -version = "0.2.5" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" +checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" dependencies = [ "bitflags", ] @@ -260,9 +262,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.64" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fd9d1e9976102a03c542daa2eff1b43f9d72306342f3f8b3ed5fb8908195d6f" +checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" dependencies = [ "proc-macro2", "quote", @@ -285,9 +287,9 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.2.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d8d21cb568e802d77055ab7fcd43f0992206de5028de95c8d3a41118d32e8e" +checksum = "c695de27302f4697191dda1c7178131a8cb805463dda02864acb80fe1322fdcf" dependencies = [ "proc-macro2", "prost-build", @@ -297,15 +299,15 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" +checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" [[package]] name = "unicode-xid" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "wasi" @@ -315,10 +317,11 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "which" -version = "3.1.1" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" +checksum = "b55551e42cbdf2ce2bedd2203d0cc08dba002c27510f86dab6d0ce304cba3dfe" dependencies = [ + "either", "libc", ] diff --git a/storage-bigtable/build-proto/Cargo.toml b/storage-bigtable/build-proto/Cargo.toml index d9b9c071fe..f0214bc527 100644 --- a/storage-bigtable/build-proto/Cargo.toml +++ b/storage-bigtable/build-proto/Cargo.toml @@ -7,9 +7,9 @@ license = "Apache-2.0" name = "proto" publish = false repository = "https://github.com/solana-labs/solana" -version = "1.5.19" +version = "1.6.14" [workspace] [dependencies] -tonic-build = "0.2.0" +tonic-build = "0.4.0" diff --git a/storage-bigtable/proto/google.api.rs b/storage-bigtable/proto/google.api.rs index a9f7d00700..54fa00517b 100644 --- a/storage-bigtable/proto/google.api.rs +++ b/storage-bigtable/proto/google.api.rs @@ -7,7 +7,7 @@ pub struct Http { /// /// **NOTE:** All service configuration rules follow "last one wins" order. #[prost(message, repeated, tag = "1")] - pub rules: ::std::vec::Vec, + pub rules: ::prost::alloc::vec::Vec, /// When set to true, URL path parameters will be fully URI-decoded except in /// cases of single segment matches in reserved expansion, where "%2F" will be /// left encoded. @@ -292,7 +292,7 @@ pub struct HttpRule { /// /// Refer to [selector][google.api.DocumentationRule.selector] for syntax details. #[prost(string, tag = "1")] - pub selector: std::string::String, + pub selector: ::prost::alloc::string::String, /// The name of the request field whose value is mapped to the HTTP request /// body, or `*` for mapping all request fields not captured by the path /// pattern to the HTTP body, or omitted for not having any HTTP request body. @@ -300,7 +300,7 @@ pub struct HttpRule { /// NOTE: the referred field must be present at the top-level of the request /// message type. #[prost(string, tag = "7")] - pub body: std::string::String, + pub body: ::prost::alloc::string::String, /// Optional. The name of the response field whose value is mapped to the HTTP /// response body. When omitted, the entire response message will be used /// as the HTTP response body. @@ -308,18 +308,19 @@ pub struct HttpRule { /// NOTE: The referred field must be present at the top-level of the response /// message type. #[prost(string, tag = "12")] - pub response_body: std::string::String, + pub response_body: ::prost::alloc::string::String, /// Additional HTTP bindings for the selector. Nested bindings must /// not contain an `additional_bindings` field themselves (that is, /// the nesting may only be one level deep). #[prost(message, repeated, tag = "11")] - pub additional_bindings: ::std::vec::Vec, + pub additional_bindings: ::prost::alloc::vec::Vec, /// Determines the URL pattern is matched by this rules. This pattern can be /// used with any of the {get|put|post|delete|patch} methods. A custom method /// can be defined using the 'custom' field. #[prost(oneof = "http_rule::Pattern", tags = "2, 3, 4, 5, 6, 8")] - pub pattern: ::std::option::Option, + pub pattern: ::core::option::Option, } +/// Nested message and enum types in `HttpRule`. pub mod http_rule { /// Determines the URL pattern is matched by this rules. This pattern can be /// used with any of the {get|put|post|delete|patch} methods. A custom method @@ -329,19 +330,19 @@ pub mod http_rule { /// Maps to HTTP GET. Used for listing and getting information about /// resources. #[prost(string, tag = "2")] - Get(std::string::String), + Get(::prost::alloc::string::String), /// Maps to HTTP PUT. Used for replacing a resource. #[prost(string, tag = "3")] - Put(std::string::String), + Put(::prost::alloc::string::String), /// Maps to HTTP POST. Used for creating a resource or performing an action. #[prost(string, tag = "4")] - Post(std::string::String), + Post(::prost::alloc::string::String), /// Maps to HTTP DELETE. Used for deleting a resource. #[prost(string, tag = "5")] - Delete(std::string::String), + Delete(::prost::alloc::string::String), /// Maps to HTTP PATCH. Used for updating a resource. #[prost(string, tag = "6")] - Patch(std::string::String), + Patch(::prost::alloc::string::String), /// The custom pattern is used for specifying an HTTP method that is not /// included in the `pattern` field, such as HEAD, or "*" to leave the /// HTTP method unspecified for this rule. The wild-card rule is useful @@ -355,10 +356,10 @@ pub mod http_rule { pub struct CustomHttpPattern { /// The name of this custom HTTP verb. #[prost(string, tag = "1")] - pub kind: std::string::String, + pub kind: ::prost::alloc::string::String, /// The path matched by this custom verb. #[prost(string, tag = "2")] - pub path: std::string::String, + pub path: ::prost::alloc::string::String, } /// An indicator of the behavior of a given field (for example, that a field /// is required in requests, or given as output but ignored as input). @@ -514,7 +515,7 @@ pub struct ResourceDescriptor { /// should use PascalCase (UpperCamelCase). The maximum number of /// characters allowed for the `resource_type_kind` is 100. #[prost(string, tag = "1")] - pub r#type: std::string::String, + pub r#type: ::prost::alloc::string::String, /// Optional. The relative resource name pattern associated with this resource /// type. The DNS prefix of the full resource name shouldn't be specified here. /// @@ -535,11 +536,11 @@ pub struct ResourceDescriptor { /// the same component name (e.g. "project") refers to IDs of the same /// type of resource. #[prost(string, repeated, tag = "2")] - pub pattern: ::std::vec::Vec, + pub pattern: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Optional. The field on the resource that designates the resource name /// field. If omitted, this is assumed to be "name". #[prost(string, tag = "3")] - pub name_field: std::string::String, + pub name_field: ::prost::alloc::string::String, /// Optional. The historical or future-looking state of the resource pattern. /// /// Example: @@ -566,13 +567,19 @@ pub struct ResourceDescriptor { /// Note: The plural form is required even for singleton resources. See /// https://aip.dev/156 #[prost(string, tag = "5")] - pub plural: std::string::String, + pub plural: ::prost::alloc::string::String, /// The same concept of the `singular` field in k8s CRD spec /// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ /// Such as "project" for the `resourcemanager.googleapis.com/Project` type. #[prost(string, tag = "6")] - pub singular: std::string::String, + pub singular: ::prost::alloc::string::String, + /// Style flag(s) for this resource. + /// These indicate that a resource is expected to conform to a given + /// style. See the specific style flags for additional information. + #[prost(enumeration = "resource_descriptor::Style", repeated, tag = "10")] + pub style: ::prost::alloc::vec::Vec, } +/// Nested message and enum types in `ResourceDescriptor`. pub mod resource_descriptor { /// A description of the historical or future-looking state of the /// resource pattern. @@ -589,6 +596,22 @@ pub mod resource_descriptor { /// that from being necessary once there are multiple patterns.) FutureMultiPattern = 2, } + /// A flag representing a specific style that a resource claims to conform to. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum Style { + /// The unspecified value. Do not use. + Unspecified = 0, + /// This resource is intended to be "declarative-friendly". + /// + /// Declarative-friendly resources must be more strictly consistent, and + /// setting this to true communicates to tools that this resource should + /// adhere to declarative-friendly expectations. + /// + /// Note: This is used by the API linter (linter.aip.dev) to enable + /// additional checks. + DeclarativeFriendly = 1, + } } /// Defines a proto annotation that describes a string field that refers to /// an API resource. @@ -615,7 +638,7 @@ pub struct ResourceReference { /// }]; /// } #[prost(string, tag = "1")] - pub r#type: std::string::String, + pub r#type: ::prost::alloc::string::String, /// The resource type of a child collection that the annotated field /// references. This is useful for annotating the `parent` field that /// doesn't have a fixed resource type. @@ -628,5 +651,5 @@ pub struct ResourceReference { /// }; /// } #[prost(string, tag = "2")] - pub child_type: std::string::String, + pub child_type: ::prost::alloc::string::String, } diff --git a/storage-bigtable/proto/google.bigtable.v2.rs b/storage-bigtable/proto/google.bigtable.v2.rs index 7042d5ff5d..9936e2d1d4 100644 --- a/storage-bigtable/proto/google.bigtable.v2.rs +++ b/storage-bigtable/proto/google.bigtable.v2.rs @@ -5,12 +5,12 @@ pub struct Row { /// The unique key which identifies this row within its table. This is the same /// key that's used to identify the row in, for example, a MutateRowRequest. /// May contain any non-empty byte string up to 4KiB in length. - #[prost(bytes, tag = "1")] - pub key: std::vec::Vec, + #[prost(bytes = "vec", tag = "1")] + pub key: ::prost::alloc::vec::Vec, /// May be empty, but only if the entire row is empty. /// The mutual ordering of column families is not specified. #[prost(message, repeated, tag = "2")] - pub families: ::std::vec::Vec, + pub families: ::prost::alloc::vec::Vec, } /// Specifies (some of) the contents of a single row/column family intersection /// of a table. @@ -23,10 +23,10 @@ pub struct Family { /// produce cells in a sentinel family with an empty name. /// Must be no greater than 64 characters in length. #[prost(string, tag = "1")] - pub name: std::string::String, + pub name: ::prost::alloc::string::String, /// Must not be empty. Sorted in order of increasing "qualifier". #[prost(message, repeated, tag = "2")] - pub columns: ::std::vec::Vec, + pub columns: ::prost::alloc::vec::Vec, } /// Specifies (some of) the contents of a single row/column intersection of a /// table. @@ -37,11 +37,11 @@ pub struct Column { /// which sets its `column_qualifier_regex_filter` field. /// May contain any byte string, including the empty string, up to 16kiB in /// length. - #[prost(bytes, tag = "1")] - pub qualifier: std::vec::Vec, + #[prost(bytes = "vec", tag = "1")] + pub qualifier: ::prost::alloc::vec::Vec, /// Must not be empty. Sorted in order of decreasing "timestamp_micros". #[prost(message, repeated, tag = "2")] - pub cells: ::std::vec::Vec, + pub cells: ::prost::alloc::vec::Vec, } /// Specifies (some of) the contents of a single row/column/timestamp of a table. #[derive(Clone, PartialEq, ::prost::Message)] @@ -57,11 +57,11 @@ pub struct Cell { /// The value stored in the cell. /// May contain any byte string, including the empty string, up to 100MiB in /// length. - #[prost(bytes, tag = "2")] - pub value: std::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, /// Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter]. #[prost(string, repeated, tag = "3")] - pub labels: ::std::vec::Vec, + pub labels: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Specifies a contiguous range of rows. #[derive(Clone, PartialEq, ::prost::Message)] @@ -69,12 +69,13 @@ pub struct RowRange { /// The row key at which to start the range. /// If neither field is set, interpreted as the empty string, inclusive. #[prost(oneof = "row_range::StartKey", tags = "1, 2")] - pub start_key: ::std::option::Option, + pub start_key: ::core::option::Option, /// The row key at which to end the range. /// If neither field is set, interpreted as the infinite row key, exclusive. #[prost(oneof = "row_range::EndKey", tags = "3, 4")] - pub end_key: ::std::option::Option, + pub end_key: ::core::option::Option, } +/// Nested message and enum types in `RowRange`. pub mod row_range { /// The row key at which to start the range. /// If neither field is set, interpreted as the empty string, inclusive. @@ -82,10 +83,10 @@ pub mod row_range { pub enum StartKey { /// Used when giving an inclusive lower bound for the range. #[prost(bytes, tag = "1")] - StartKeyClosed(std::vec::Vec), + StartKeyClosed(::prost::alloc::vec::Vec), /// Used when giving an exclusive lower bound for the range. #[prost(bytes, tag = "2")] - StartKeyOpen(std::vec::Vec), + StartKeyOpen(::prost::alloc::vec::Vec), } /// The row key at which to end the range. /// If neither field is set, interpreted as the infinite row key, exclusive. @@ -93,21 +94,21 @@ pub mod row_range { pub enum EndKey { /// Used when giving an exclusive upper bound for the range. #[prost(bytes, tag = "3")] - EndKeyOpen(std::vec::Vec), + EndKeyOpen(::prost::alloc::vec::Vec), /// Used when giving an inclusive upper bound for the range. #[prost(bytes, tag = "4")] - EndKeyClosed(std::vec::Vec), + EndKeyClosed(::prost::alloc::vec::Vec), } } /// Specifies a non-contiguous set of rows. #[derive(Clone, PartialEq, ::prost::Message)] pub struct RowSet { /// Single rows included in the set. - #[prost(bytes, repeated, tag = "1")] - pub row_keys: ::std::vec::Vec>, + #[prost(bytes = "vec", repeated, tag = "1")] + pub row_keys: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, /// Contiguous row ranges included in the set. #[prost(message, repeated, tag = "2")] - pub row_ranges: ::std::vec::Vec, + pub row_ranges: ::prost::alloc::vec::Vec, } /// Specifies a contiguous range of columns within a single column family. /// The range spans from <column_family>:<start_qualifier> to @@ -117,16 +118,17 @@ pub struct RowSet { pub struct ColumnRange { /// The name of the column family within which this range falls. #[prost(string, tag = "1")] - pub family_name: std::string::String, + pub family_name: ::prost::alloc::string::String, /// The column qualifier at which to start the range (within `column_family`). /// If neither field is set, interpreted as the empty string, inclusive. #[prost(oneof = "column_range::StartQualifier", tags = "2, 3")] - pub start_qualifier: ::std::option::Option, + pub start_qualifier: ::core::option::Option, /// The column qualifier at which to end the range (within `column_family`). /// If neither field is set, interpreted as the infinite string, exclusive. #[prost(oneof = "column_range::EndQualifier", tags = "4, 5")] - pub end_qualifier: ::std::option::Option, + pub end_qualifier: ::core::option::Option, } +/// Nested message and enum types in `ColumnRange`. pub mod column_range { /// The column qualifier at which to start the range (within `column_family`). /// If neither field is set, interpreted as the empty string, inclusive. @@ -134,10 +136,10 @@ pub mod column_range { pub enum StartQualifier { /// Used when giving an inclusive lower bound for the range. #[prost(bytes, tag = "2")] - StartQualifierClosed(std::vec::Vec), + StartQualifierClosed(::prost::alloc::vec::Vec), /// Used when giving an exclusive lower bound for the range. #[prost(bytes, tag = "3")] - StartQualifierOpen(std::vec::Vec), + StartQualifierOpen(::prost::alloc::vec::Vec), } /// The column qualifier at which to end the range (within `column_family`). /// If neither field is set, interpreted as the infinite string, exclusive. @@ -145,10 +147,10 @@ pub mod column_range { pub enum EndQualifier { /// Used when giving an inclusive upper bound for the range. #[prost(bytes, tag = "4")] - EndQualifierClosed(std::vec::Vec), + EndQualifierClosed(::prost::alloc::vec::Vec), /// Used when giving an exclusive upper bound for the range. #[prost(bytes, tag = "5")] - EndQualifierOpen(std::vec::Vec), + EndQualifierOpen(::prost::alloc::vec::Vec), } } /// Specified a contiguous range of microsecond timestamps. @@ -167,12 +169,13 @@ pub struct ValueRange { /// The value at which to start the range. /// If neither field is set, interpreted as the empty string, inclusive. #[prost(oneof = "value_range::StartValue", tags = "1, 2")] - pub start_value: ::std::option::Option, + pub start_value: ::core::option::Option, /// The value at which to end the range. /// If neither field is set, interpreted as the infinite string, exclusive. #[prost(oneof = "value_range::EndValue", tags = "3, 4")] - pub end_value: ::std::option::Option, + pub end_value: ::core::option::Option, } +/// Nested message and enum types in `ValueRange`. pub mod value_range { /// The value at which to start the range. /// If neither field is set, interpreted as the empty string, inclusive. @@ -180,10 +183,10 @@ pub mod value_range { pub enum StartValue { /// Used when giving an inclusive lower bound for the range. #[prost(bytes, tag = "1")] - StartValueClosed(std::vec::Vec), + StartValueClosed(::prost::alloc::vec::Vec), /// Used when giving an exclusive lower bound for the range. #[prost(bytes, tag = "2")] - StartValueOpen(std::vec::Vec), + StartValueOpen(::prost::alloc::vec::Vec), } /// The value at which to end the range. /// If neither field is set, interpreted as the infinite string, exclusive. @@ -191,10 +194,10 @@ pub mod value_range { pub enum EndValue { /// Used when giving an inclusive upper bound for the range. #[prost(bytes, tag = "3")] - EndValueClosed(std::vec::Vec), + EndValueClosed(::prost::alloc::vec::Vec), /// Used when giving an exclusive upper bound for the range. #[prost(bytes, tag = "4")] - EndValueOpen(std::vec::Vec), + EndValueOpen(::prost::alloc::vec::Vec), } } /// Takes a row as input and produces an alternate view of the row based on @@ -238,8 +241,9 @@ pub struct RowFilter { oneof = "row_filter::Filter", tags = "1, 2, 3, 16, 17, 18, 4, 14, 5, 6, 7, 8, 9, 15, 10, 11, 12, 13, 19" )] - pub filter: ::std::option::Option, + pub filter: ::core::option::Option, } +/// Nested message and enum types in `RowFilter`. pub mod row_filter { /// A RowFilter which sends rows through several RowFilters in sequence. #[derive(Clone, PartialEq, ::prost::Message)] @@ -248,7 +252,7 @@ pub mod row_filter { /// in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row /// The full chain is executed atomically. #[prost(message, repeated, tag = "1")] - pub filters: ::std::vec::Vec, + pub filters: ::prost::alloc::vec::Vec, } /// A RowFilter which sends each row to each of several component /// RowFilters and interleaves the results. @@ -280,7 +284,7 @@ pub mod row_filter { /// /// All interleaved filters are executed atomically. #[prost(message, repeated, tag = "1")] - pub filters: ::std::vec::Vec, + pub filters: ::prost::alloc::vec::Vec, } /// A RowFilter which evaluates one of two possible RowFilters, depending on /// whether or not a predicate RowFilter outputs any cells from the input row. @@ -294,16 +298,16 @@ pub mod row_filter { /// If `predicate_filter` outputs any cells, then `true_filter` will be /// evaluated on the input row. Otherwise, `false_filter` will be evaluated. #[prost(message, optional, boxed, tag = "1")] - pub predicate_filter: ::std::option::Option<::std::boxed::Box>, + pub predicate_filter: ::core::option::Option<::prost::alloc::boxed::Box>, /// The filter to apply to the input row if `predicate_filter` returns any /// results. If not provided, no results will be returned in the true case. #[prost(message, optional, boxed, tag = "2")] - pub true_filter: ::std::option::Option<::std::boxed::Box>, + pub true_filter: ::core::option::Option<::prost::alloc::boxed::Box>, /// The filter to apply to the input row if `predicate_filter` does not /// return any results. If not provided, no results will be returned in the /// false case. #[prost(message, optional, boxed, tag = "3")] - pub false_filter: ::std::option::Option<::std::boxed::Box>, + pub false_filter: ::core::option::Option<::prost::alloc::boxed::Box>, } /// Which of the possible RowFilter types to apply. If none are set, this /// RowFilter returns all cells in the input row. @@ -320,7 +324,7 @@ pub mod row_filter { /// Applies one of two possible RowFilters to the data based on the output of /// a predicate RowFilter. #[prost(message, tag = "3")] - Condition(Box), + Condition(::prost::alloc::boxed::Box), /// ADVANCED USE ONLY. /// Hook for introspection into the RowFilter. Outputs all cells directly to /// the output of the read rather than to any parent filter. Consider the @@ -398,7 +402,7 @@ pub mod row_filter { /// will not match the new line character `\n`, which may be present in a /// binary key. #[prost(bytes, tag = "4")] - RowKeyRegexFilter(std::vec::Vec), + RowKeyRegexFilter(::prost::alloc::vec::Vec), /// Matches all cells from a row with probability p, and matches no cells /// from the row with probability 1-p. #[prost(double, tag = "14")] @@ -410,7 +414,7 @@ pub mod row_filter { /// `\n`, it is sufficient to use `.` as a full wildcard when matching /// column family names. #[prost(string, tag = "5")] - FamilyNameRegexFilter(std::string::String), + FamilyNameRegexFilter(::prost::alloc::string::String), /// Matches only cells from columns whose qualifiers satisfy the given RE2 /// regex. /// Note that, since column qualifiers can contain arbitrary bytes, the `\C` @@ -418,7 +422,7 @@ pub mod row_filter { /// character will not match the new line character `\n`, which may be /// present in a binary qualifier. #[prost(bytes, tag = "6")] - ColumnQualifierRegexFilter(std::vec::Vec), + ColumnQualifierRegexFilter(::prost::alloc::vec::Vec), /// Matches only cells from columns within the given range. #[prost(message, tag = "7")] ColumnRangeFilter(super::ColumnRange), @@ -431,7 +435,7 @@ pub mod row_filter { /// will not match the new line character `\n`, which may be present in a /// binary value. #[prost(bytes, tag = "9")] - ValueRegexFilter(std::vec::Vec), + ValueRegexFilter(::prost::alloc::vec::Vec), /// Matches only cells with values that fall within the given range. #[prost(message, tag = "15")] ValueRangeFilter(super::ValueRange), @@ -470,7 +474,7 @@ pub mod row_filter { /// will be applied to separate copies of the input. This may be relaxed in /// the future. #[prost(string, tag = "19")] - ApplyLabelTransformer(std::string::String), + ApplyLabelTransformer(::prost::alloc::string::String), } } /// Specifies a particular change to be made to the contents of a row. @@ -478,8 +482,9 @@ pub mod row_filter { pub struct Mutation { /// Which of the possible Mutation types to apply. #[prost(oneof = "mutation::Mutation", tags = "1, 2, 3, 4")] - pub mutation: ::std::option::Option, + pub mutation: ::core::option::Option, } +/// Nested message and enum types in `Mutation`. pub mod mutation { /// A Mutation which sets the value of the specified cell. #[derive(Clone, PartialEq, ::prost::Message)] @@ -487,11 +492,11 @@ pub mod mutation { /// The name of the family into which new data should be written. /// Must match `[-_.a-zA-Z0-9]+` #[prost(string, tag = "1")] - pub family_name: std::string::String, + pub family_name: ::prost::alloc::string::String, /// The qualifier of the column into which new data should be written. /// Can be any byte string, including the empty string. - #[prost(bytes, tag = "2")] - pub column_qualifier: std::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub column_qualifier: ::prost::alloc::vec::Vec, /// The timestamp of the cell into which new data should be written. /// Use -1 for current Bigtable server time. /// Otherwise, the client should set this value itself, noting that the @@ -500,8 +505,8 @@ pub mod mutation { #[prost(int64, tag = "3")] pub timestamp_micros: i64, /// The value to be written into the specified cell. - #[prost(bytes, tag = "4")] - pub value: std::vec::Vec, + #[prost(bytes = "vec", tag = "4")] + pub value: ::prost::alloc::vec::Vec, } /// A Mutation which deletes cells from the specified column, optionally /// restricting the deletions to a given timestamp range. @@ -510,14 +515,14 @@ pub mod mutation { /// The name of the family from which cells should be deleted. /// Must match `[-_.a-zA-Z0-9]+` #[prost(string, tag = "1")] - pub family_name: std::string::String, + pub family_name: ::prost::alloc::string::String, /// The qualifier of the column from which cells should be deleted. /// Can be any byte string, including the empty string. - #[prost(bytes, tag = "2")] - pub column_qualifier: std::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub column_qualifier: ::prost::alloc::vec::Vec, /// The range of timestamps within which cells should be deleted. #[prost(message, optional, tag = "3")] - pub time_range: ::std::option::Option, + pub time_range: ::core::option::Option, } /// A Mutation which deletes all cells from the specified column family. #[derive(Clone, PartialEq, ::prost::Message)] @@ -525,7 +530,7 @@ pub mod mutation { /// The name of the family from which cells should be deleted. /// Must match `[-_.a-zA-Z0-9]+` #[prost(string, tag = "1")] - pub family_name: std::string::String, + pub family_name: ::prost::alloc::string::String, } /// A Mutation which deletes all cells from the containing row. #[derive(Clone, PartialEq, ::prost::Message)] @@ -554,17 +559,18 @@ pub struct ReadModifyWriteRule { /// The name of the family to which the read/modify/write should be applied. /// Must match `[-_.a-zA-Z0-9]+` #[prost(string, tag = "1")] - pub family_name: std::string::String, + pub family_name: ::prost::alloc::string::String, /// The qualifier of the column to which the read/modify/write should be /// applied. /// Can be any byte string, including the empty string. - #[prost(bytes, tag = "2")] - pub column_qualifier: std::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub column_qualifier: ::prost::alloc::vec::Vec, /// The rule used to determine the column's new latest value from its current /// latest value. #[prost(oneof = "read_modify_write_rule::Rule", tags = "3, 4")] - pub rule: ::std::option::Option, + pub rule: ::core::option::Option, } +/// Nested message and enum types in `ReadModifyWriteRule`. pub mod read_modify_write_rule { /// The rule used to determine the column's new latest value from its current /// latest value. @@ -574,7 +580,7 @@ pub mod read_modify_write_rule { /// If the targeted cell is unset, it will be treated as containing the /// empty string. #[prost(bytes, tag = "3")] - AppendValue(std::vec::Vec), + AppendValue(::prost::alloc::vec::Vec), /// Rule specifying that `increment_amount` be added to the existing value. /// If the targeted cell is unset, it will be treated as containing a zero. /// Otherwise, the targeted cell must contain an 8-byte value (interpreted @@ -590,18 +596,18 @@ pub struct ReadRowsRequest { /// Values are of the form /// `projects//instances//tables/`. #[prost(string, tag = "1")] - pub table_name: std::string::String, + pub table_name: ::prost::alloc::string::String, /// This value specifies routing for replication. If not specified, the /// "default" application profile will be used. #[prost(string, tag = "5")] - pub app_profile_id: std::string::String, + pub app_profile_id: ::prost::alloc::string::String, /// The row keys and/or ranges to read. If not specified, reads from all rows. #[prost(message, optional, tag = "2")] - pub rows: ::std::option::Option, + pub rows: ::core::option::Option, /// The filter to apply to the contents of the specified row(s). If unset, /// reads the entirety of each row. #[prost(message, optional, tag = "3")] - pub filter: ::std::option::Option, + pub filter: ::core::option::Option, /// The read will terminate after committing to N rows' worth of results. The /// default (zero) is to return all results. #[prost(int64, tag = "4")] @@ -612,7 +618,7 @@ pub struct ReadRowsRequest { pub struct ReadRowsResponse { /// A collection of a row's contents as part of the read request. #[prost(message, repeated, tag = "1")] - pub chunks: ::std::vec::Vec, + pub chunks: ::prost::alloc::vec::Vec, /// Optionally the server might return the row key of the last row it /// has scanned. The client can use this to construct a more /// efficient retry request if needed: any row keys or portions of @@ -620,9 +626,10 @@ pub struct ReadRowsResponse { /// This is primarily useful for cases where the server has read a /// lot of data that was filtered out since the last committed row /// key, allowing the client to skip that work on a retry. - #[prost(bytes, tag = "2")] - pub last_scanned_row_key: std::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub last_scanned_row_key: ::prost::alloc::vec::Vec, } +/// Nested message and enum types in `ReadRowsResponse`. pub mod read_rows_response { /// Specifies a piece of a row's contents returned as part of the read /// response stream. @@ -632,8 +639,8 @@ pub mod read_rows_response { /// this CellChunk is a continuation of the same row as the previous /// CellChunk in the response stream, even if that CellChunk was in a /// previous ReadRowsResponse message. - #[prost(bytes, tag = "1")] - pub row_key: std::vec::Vec, + #[prost(bytes = "vec", tag = "1")] + pub row_key: ::prost::alloc::vec::Vec, /// The column family name for this chunk of data. If this message /// is not present this CellChunk is a continuation of the same column /// family as the previous CellChunk. The empty string can occur as a @@ -641,14 +648,14 @@ pub mod read_rows_response { /// explicitly for the presence of this message, not just for /// `family_name.value` being non-empty. #[prost(message, optional, tag = "2")] - pub family_name: ::std::option::Option<::std::string::String>, + pub family_name: ::core::option::Option<::prost::alloc::string::String>, /// The column qualifier for this chunk of data. If this message /// is not present, this CellChunk is a continuation of the same column /// as the previous CellChunk. Column qualifiers may be empty so /// clients must check for the presence of this message, not just /// for `qualifier.value` being non-empty. #[prost(message, optional, tag = "3")] - pub qualifier: ::std::option::Option<::std::vec::Vec>, + pub qualifier: ::core::option::Option<::prost::alloc::vec::Vec>, /// The cell's stored timestamp, which also uniquely identifies it /// within its column. Values are always expressed in /// microseconds, but individual tables may set a coarser @@ -663,14 +670,14 @@ pub mod read_rows_response { /// [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set /// on the first CellChunk per cell. #[prost(string, repeated, tag = "5")] - pub labels: ::std::vec::Vec, + pub labels: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// The value stored in the cell. Cell values can be split across /// multiple CellChunks. In that case only the value field will be /// set in CellChunks after the first: the timestamp and labels /// will only be present in the first CellChunk, even if the first /// CellChunk came in a previous ReadRowsResponse. - #[prost(bytes, tag = "6")] - pub value: std::vec::Vec, + #[prost(bytes = "vec", tag = "6")] + pub value: ::prost::alloc::vec::Vec, /// If this CellChunk is part of a chunked cell value and this is /// not the final chunk of that cell, value_size will be set to the /// total length of the cell value. The client can use this size @@ -679,8 +686,9 @@ pub mod read_rows_response { pub value_size: i32, /// Signals to the client concerning previous CellChunks received. #[prost(oneof = "cell_chunk::RowStatus", tags = "8, 9")] - pub row_status: ::std::option::Option, + pub row_status: ::core::option::Option, } + /// Nested message and enum types in `CellChunk`. pub mod cell_chunk { /// Signals to the client concerning previous CellChunks received. #[derive(Clone, PartialEq, ::prost::Oneof)] @@ -703,11 +711,11 @@ pub struct SampleRowKeysRequest { /// Values are of the form /// `projects//instances//tables/
`. #[prost(string, tag = "1")] - pub table_name: std::string::String, + pub table_name: ::prost::alloc::string::String, /// This value specifies routing for replication. If not specified, the /// "default" application profile will be used. #[prost(string, tag = "2")] - pub app_profile_id: std::string::String, + pub app_profile_id: ::prost::alloc::string::String, } /// Response message for Bigtable.SampleRowKeys. #[derive(Clone, PartialEq, ::prost::Message)] @@ -719,8 +727,8 @@ pub struct SampleRowKeysResponse { /// Note that row keys in this list may not have ever been written to or read /// from, and users should therefore not make any assumptions about the row key /// structure that are specific to their use case. - #[prost(bytes, tag = "1")] - pub row_key: std::vec::Vec, + #[prost(bytes = "vec", tag = "1")] + pub row_key: ::prost::alloc::vec::Vec, /// Approximate total storage space used by all rows in the table which precede /// `row_key`. Buffering the contents of all rows between two subsequent /// samples would require space roughly equal to the difference in their @@ -735,19 +743,19 @@ pub struct MutateRowRequest { /// Values are of the form /// `projects//instances//tables/
`. #[prost(string, tag = "1")] - pub table_name: std::string::String, + pub table_name: ::prost::alloc::string::String, /// This value specifies routing for replication. If not specified, the /// "default" application profile will be used. #[prost(string, tag = "4")] - pub app_profile_id: std::string::String, + pub app_profile_id: ::prost::alloc::string::String, /// Required. The key of the row to which the mutation should be applied. - #[prost(bytes, tag = "2")] - pub row_key: std::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub row_key: ::prost::alloc::vec::Vec, /// Required. Changes to be atomically applied to the specified row. Entries are applied /// in order, meaning that earlier mutations can be masked by later ones. /// Must contain at least one entry and at most 100000. #[prost(message, repeated, tag = "3")] - pub mutations: ::std::vec::Vec, + pub mutations: ::prost::alloc::vec::Vec, } /// Response message for Bigtable.MutateRow. #[derive(Clone, PartialEq, ::prost::Message)] @@ -757,32 +765,33 @@ pub struct MutateRowResponse {} pub struct MutateRowsRequest { /// Required. The unique name of the table to which the mutations should be applied. #[prost(string, tag = "1")] - pub table_name: std::string::String, + pub table_name: ::prost::alloc::string::String, /// This value specifies routing for replication. If not specified, the /// "default" application profile will be used. #[prost(string, tag = "3")] - pub app_profile_id: std::string::String, + pub app_profile_id: ::prost::alloc::string::String, /// Required. The row keys and corresponding mutations to be applied in bulk. /// Each entry is applied as an atomic mutation, but the entries may be /// applied in arbitrary order (even between entries for the same row). /// At least one entry must be specified, and in total the entries can /// contain at most 100000 mutations. #[prost(message, repeated, tag = "2")] - pub entries: ::std::vec::Vec, + pub entries: ::prost::alloc::vec::Vec, } +/// Nested message and enum types in `MutateRowsRequest`. pub mod mutate_rows_request { /// A mutation for a given row. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Entry { /// The key of the row to which the `mutations` should be applied. - #[prost(bytes, tag = "1")] - pub row_key: std::vec::Vec, + #[prost(bytes = "vec", tag = "1")] + pub row_key: ::prost::alloc::vec::Vec, /// Required. Changes to be atomically applied to the specified row. Mutations are /// applied in order, meaning that earlier mutations can be masked by /// later ones. /// You must specify at least one mutation. #[prost(message, repeated, tag = "2")] - pub mutations: ::std::vec::Vec, + pub mutations: ::prost::alloc::vec::Vec, } } /// Response message for BigtableService.MutateRows. @@ -790,8 +799,9 @@ pub mod mutate_rows_request { pub struct MutateRowsResponse { /// One or more results for Entries from the batch request. #[prost(message, repeated, tag = "1")] - pub entries: ::std::vec::Vec, + pub entries: ::prost::alloc::vec::Vec, } +/// Nested message and enum types in `MutateRowsResponse`. pub mod mutate_rows_response { /// The result of applying a passed mutation in the original request. #[derive(Clone, PartialEq, ::prost::Message)] @@ -805,7 +815,7 @@ pub mod mutate_rows_response { /// for one Entry to fail due to an error with another Entry. In the event /// that this occurs, the same error will be reported for both entries. #[prost(message, optional, tag = "2")] - pub status: ::std::option::Option, + pub status: ::core::option::Option, } } /// Request message for Bigtable.CheckAndMutateRow. @@ -816,34 +826,34 @@ pub struct CheckAndMutateRowRequest { /// Values are of the form /// `projects//instances//tables/
`. #[prost(string, tag = "1")] - pub table_name: std::string::String, + pub table_name: ::prost::alloc::string::String, /// This value specifies routing for replication. If not specified, the /// "default" application profile will be used. #[prost(string, tag = "7")] - pub app_profile_id: std::string::String, + pub app_profile_id: ::prost::alloc::string::String, /// Required. The key of the row to which the conditional mutation should be applied. - #[prost(bytes, tag = "2")] - pub row_key: std::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub row_key: ::prost::alloc::vec::Vec, /// The filter to be applied to the contents of the specified row. Depending /// on whether or not any results are yielded, either `true_mutations` or /// `false_mutations` will be executed. If unset, checks that the row contains /// any values at all. #[prost(message, optional, tag = "6")] - pub predicate_filter: ::std::option::Option, + pub predicate_filter: ::core::option::Option, /// Changes to be atomically applied to the specified row if `predicate_filter` /// yields at least one cell when applied to `row_key`. Entries are applied in /// order, meaning that earlier mutations can be masked by later ones. /// Must contain at least one entry if `false_mutations` is empty, and at most /// 100000. #[prost(message, repeated, tag = "4")] - pub true_mutations: ::std::vec::Vec, + pub true_mutations: ::prost::alloc::vec::Vec, /// Changes to be atomically applied to the specified row if `predicate_filter` /// does not yield any cells when applied to `row_key`. Entries are applied in /// order, meaning that earlier mutations can be masked by later ones. /// Must contain at least one entry if `true_mutations` is empty, and at most /// 100000. #[prost(message, repeated, tag = "5")] - pub false_mutations: ::std::vec::Vec, + pub false_mutations: ::prost::alloc::vec::Vec, } /// Response message for Bigtable.CheckAndMutateRow. #[derive(Clone, PartialEq, ::prost::Message)] @@ -861,26 +871,26 @@ pub struct ReadModifyWriteRowRequest { /// Values are of the form /// `projects//instances//tables/
`. #[prost(string, tag = "1")] - pub table_name: std::string::String, + pub table_name: ::prost::alloc::string::String, /// This value specifies routing for replication. If not specified, the /// "default" application profile will be used. #[prost(string, tag = "4")] - pub app_profile_id: std::string::String, + pub app_profile_id: ::prost::alloc::string::String, /// Required. The key of the row to which the read/modify/write rules should be applied. - #[prost(bytes, tag = "2")] - pub row_key: std::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub row_key: ::prost::alloc::vec::Vec, /// Required. Rules specifying how the specified row's contents are to be transformed /// into writes. Entries are applied in order, meaning that earlier rules will /// affect the results of later ones. #[prost(message, repeated, tag = "3")] - pub rules: ::std::vec::Vec, + pub rules: ::prost::alloc::vec::Vec, } /// Response message for Bigtable.ReadModifyWriteRow. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReadModifyWriteRowResponse { /// A Row containing the new contents of all cells modified by the request. #[prost(message, optional, tag = "1")] - pub row: ::std::option::Option, + pub row: ::core::option::Option, } #[doc = r" Generated client implementations."] pub mod bigtable_client { diff --git a/storage-bigtable/proto/google.rpc.rs b/storage-bigtable/proto/google.rpc.rs index 37900a4d58..807097efe9 100644 --- a/storage-bigtable/proto/google.rpc.rs +++ b/storage-bigtable/proto/google.rpc.rs @@ -14,9 +14,9 @@ pub struct Status { /// user-facing error message should be localized and sent in the /// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. #[prost(string, tag = "2")] - pub message: std::string::String, + pub message: ::prost::alloc::string::String, /// A list of messages that carry the error details. There is a common set of /// message types for APIs to use. #[prost(message, repeated, tag = "3")] - pub details: ::std::vec::Vec<::prost_types::Any>, + pub details: ::prost::alloc::vec::Vec<::prost_types::Any>, } diff --git a/storage-bigtable/src/bigtable.rs b/storage-bigtable/src/bigtable.rs index bc248ba6b7..964b4f60a0 100644 --- a/storage-bigtable/src/bigtable.rs +++ b/storage-bigtable/src/bigtable.rs @@ -214,12 +214,11 @@ impl BigTableConnection { where T: serde::ser::Serialize, { - use backoff::{future::FutureOperation as _, ExponentialBackoff}; - (|| async { + use backoff::{future::retry, ExponentialBackoff}; + retry(ExponentialBackoff::default(), || async { let mut client = self.client(); Ok(client.put_bincode_cells(table, cells).await?) }) - .retry(ExponentialBackoff::default()) .await } @@ -231,12 +230,11 @@ impl BigTableConnection { where T: prost::Message, { - use backoff::{future::FutureOperation as _, ExponentialBackoff}; - (|| async { + use backoff::{future::retry, ExponentialBackoff}; + retry(ExponentialBackoff::default(), || async { let mut client = self.client(); Ok(client.put_protobuf_cells(table, cells).await?) }) - .retry(ExponentialBackoff::default()) .await } } @@ -686,6 +684,7 @@ mod tests { log_messages: Some(vec![]), pre_token_balances: Some(vec![]), post_token_balances: Some(vec![]), + rewards: Some(vec![]), }), }; let block = ConfirmedBlock { @@ -695,6 +694,7 @@ mod tests { previous_blockhash: Hash::default().to_string(), rewards: vec![], block_time: Some(1_234_567_890), + block_height: Some(1), }; let bincode_block = compress_best( &bincode::serialize::(&block.clone().into()).unwrap(), @@ -737,6 +737,7 @@ mod tests { meta.log_messages = None; // Legacy bincode implementation does not support log_messages meta.pre_token_balances = None; // Legacy bincode implementation does not support token balances meta.post_token_balances = None; // Legacy bincode implementation does not support token balances + meta.rewards = None; // Legacy bincode implementation does not support rewards } assert_eq!(block, bincode_block.into()); } else { diff --git a/storage-bigtable/src/lib.rs b/storage-bigtable/src/lib.rs index 5bc8b0ad15..5d78c4b5ee 100644 --- a/storage-bigtable/src/lib.rs +++ b/storage-bigtable/src/lib.rs @@ -3,6 +3,7 @@ use log::*; use serde::{Deserialize, Serialize}; use solana_sdk::{ clock::{Slot, UnixTimestamp}, + deserialize_utils::default_on_eof, pubkey::Pubkey, signature::Signature, sysvar::is_sysvar_id, @@ -82,6 +83,9 @@ fn key_to_slot(key: &str) -> Option { // StoredConfirmedBlock holds the same contents as ConfirmedBlock, but is slightly compressed and avoids // some serde JSON directives that cause issues with bincode // +// Note: in order to continue to support old bincode-serialized bigtable entries, if new fields are +// added to ConfirmedBlock, they must either be excluded or set to `default_on_eof` here +// #[derive(Serialize, Deserialize)] struct StoredConfirmedBlock { previous_blockhash: String, @@ -90,6 +94,8 @@ struct StoredConfirmedBlock { transactions: Vec, rewards: StoredConfirmedBlockRewards, block_time: Option, + #[serde(deserialize_with = "default_on_eof")] + block_height: Option, } impl From for StoredConfirmedBlock { @@ -101,6 +107,7 @@ impl From for StoredConfirmedBlock { transactions, rewards, block_time, + block_height, } = confirmed_block; Self { @@ -110,6 +117,7 @@ impl From for StoredConfirmedBlock { transactions: transactions.into_iter().map(|tx| tx.into()).collect(), rewards: rewards.into_iter().map(|reward| reward.into()).collect(), block_time, + block_height, } } } @@ -123,6 +131,7 @@ impl From for ConfirmedBlock { transactions, rewards, block_time, + block_height, } = confirmed_block; Self { @@ -132,6 +141,7 @@ impl From for ConfirmedBlock { transactions: transactions.into_iter().map(|tx| tx.into()).collect(), rewards: rewards.into_iter().map(|reward| reward.into()).collect(), block_time, + block_height, } } } @@ -189,6 +199,7 @@ impl From for TransactionStatusMeta { log_messages: None, pre_token_balances: None, post_token_balances: None, + rewards: None, } } } diff --git a/storage-proto/Cargo.toml b/storage-proto/Cargo.toml index 328441abf9..16d8a6ef4b 100644 --- a/storage-proto/Cargo.toml +++ b/storage-proto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-storage-proto" -version = "1.5.19" +version = "1.6.14" description = "Solana Storage Protobuf Definitions" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,14 +12,15 @@ edition = "2018" [dependencies] bincode = "1.2.1" bs58 = "0.3.1" -prost = "0.6.1" -serde = "1.0.118" +prost = "0.7.0" +serde = "1.0.122" serde_derive = "1.0.103" -solana-account-decoder = { path = "../account-decoder", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-transaction-status = { path = "../transaction-status", version = "=1.5.19" } -evm-state = { path = "../evm-utils/evm-state" } +solana-account-decoder = { path = "../account-decoder", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-transaction-status = { path = "../transaction-status", version = "=1.6.14" } + rlp = "0.5.0" +evm-state = { path = "../evm-utils/evm-state" } [lib] crate-type = ["lib"] diff --git a/storage-proto/build-proto/Cargo.lock b/storage-proto/build-proto/Cargo.lock index 9bf0169e87..26e50e30bb 100644 --- a/storage-proto/build-proto/Cargo.lock +++ b/storage-proto/build-proto/Cargo.lock @@ -1,10 +1,12 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "anyhow" -version = "1.0.38" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" +checksum = "15af2628f6890fe2609a3b91bef4c83450512802e59489f9c1cb1fa5df064a61" [[package]] name = "autocfg" @@ -20,9 +22,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "bytes" -version = "0.5.6" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" [[package]] name = "cfg-if" @@ -44,9 +46,9 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "getrandom" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ "cfg-if", "libc", @@ -55,24 +57,24 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.9.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" [[package]] name = "heck" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" dependencies = [ "unicode-segmentation", ] [[package]] name = "indexmap" -version = "1.6.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" +checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" dependencies = [ "autocfg", "hashbrown", @@ -80,18 +82,18 @@ dependencies = [ [[package]] name = "itertools" -version = "0.8.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484" +checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" dependencies = [ "either", ] [[package]] name = "libc" -version = "0.2.89" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "538c092e5586f4cdd7dd8078c4a79220e3e168880218124dcbce860f0ea938c6" +checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6" [[package]] name = "log" @@ -126,18 +128,18 @@ checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] name = "proc-macro2" -version = "1.0.24" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" dependencies = [ "unicode-xid", ] [[package]] name = "prost" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212" +checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" dependencies = [ "bytes", "prost-derive", @@ -145,9 +147,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b10678c913ecbd69350e8535c3aef91a8676c0773fc1d7b95cdd196d7f2f26" +checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3" dependencies = [ "bytes", "heck", @@ -163,9 +165,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72" +checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" dependencies = [ "anyhow", "itertools", @@ -176,9 +178,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1834f67c0697c001304b75be76f67add9c89742eda3a085ad8ee0bb38c3417aa" +checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" dependencies = [ "bytes", "prost", @@ -186,7 +188,7 @@ dependencies = [ [[package]] name = "proto" -version = "1.5.19" +version = "1.6.14" dependencies = [ "tonic-build", ] @@ -202,9 +204,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ "libc", "rand_chacha", @@ -214,9 +216,9 @@ dependencies = [ [[package]] name = "rand_chacha" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", @@ -224,27 +226,27 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ "getrandom", ] [[package]] name = "rand_hc" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" dependencies = [ "rand_core", ] [[package]] name = "redox_syscall" -version = "0.2.5" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" +checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" dependencies = [ "bitflags", ] @@ -260,9 +262,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.64" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fd9d1e9976102a03c542daa2eff1b43f9d72306342f3f8b3ed5fb8908195d6f" +checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" dependencies = [ "proc-macro2", "quote", @@ -285,9 +287,9 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.2.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d8d21cb568e802d77055ab7fcd43f0992206de5028de95c8d3a41118d32e8e" +checksum = "c695de27302f4697191dda1c7178131a8cb805463dda02864acb80fe1322fdcf" dependencies = [ "proc-macro2", "prost-build", @@ -297,15 +299,15 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" +checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" [[package]] name = "unicode-xid" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "wasi" @@ -315,10 +317,11 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "which" -version = "3.1.1" +version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" +checksum = "b55551e42cbdf2ce2bedd2203d0cc08dba002c27510f86dab6d0ce304cba3dfe" dependencies = [ + "either", "libc", ] diff --git a/storage-proto/build-proto/Cargo.toml b/storage-proto/build-proto/Cargo.toml index d9b9c071fe..f0214bc527 100644 --- a/storage-proto/build-proto/Cargo.toml +++ b/storage-proto/build-proto/Cargo.toml @@ -7,9 +7,9 @@ license = "Apache-2.0" name = "proto" publish = false repository = "https://github.com/solana-labs/solana" -version = "1.5.19" +version = "1.6.14" [workspace] [dependencies] -tonic-build = "0.2.0" +tonic-build = "0.4.0" diff --git a/storage-proto/proto/solana.storage.confirmed_block.rs b/storage-proto/proto/solana.storage.confirmed_block.rs index 97bd92dfe4..d2253b04ed 100644 --- a/storage-proto/proto/solana.storage.confirmed_block.rs +++ b/storage-proto/proto/solana.storage.confirmed_block.rs @@ -1,42 +1,44 @@ #[derive(Clone, PartialEq, ::prost::Message)] pub struct ConfirmedBlock { #[prost(string, tag = "1")] - pub previous_blockhash: std::string::String, + pub previous_blockhash: ::prost::alloc::string::String, #[prost(string, tag = "2")] - pub blockhash: std::string::String, + pub blockhash: ::prost::alloc::string::String, #[prost(uint64, tag = "3")] pub parent_slot: u64, #[prost(message, repeated, tag = "4")] - pub transactions: ::std::vec::Vec, + pub transactions: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "5")] - pub rewards: ::std::vec::Vec, + pub rewards: ::prost::alloc::vec::Vec, #[prost(message, optional, tag = "6")] - pub block_time: ::std::option::Option, + pub block_time: ::core::option::Option, + #[prost(message, optional, tag = "7")] + pub block_height: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct ConfirmedTransaction { #[prost(message, optional, tag = "1")] - pub transaction: ::std::option::Option, + pub transaction: ::core::option::Option, #[prost(message, optional, tag = "2")] - pub meta: ::std::option::Option, + pub meta: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct Transaction { - #[prost(bytes, repeated, tag = "1")] - pub signatures: ::std::vec::Vec>, + #[prost(bytes = "vec", repeated, tag = "1")] + pub signatures: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, #[prost(message, optional, tag = "2")] - pub message: ::std::option::Option, + pub message: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct Message { #[prost(message, optional, tag = "1")] - pub header: ::std::option::Option, - #[prost(bytes, repeated, tag = "2")] - pub account_keys: ::std::vec::Vec>, - #[prost(bytes, tag = "3")] - pub recent_blockhash: std::vec::Vec, + pub header: ::core::option::Option, + #[prost(bytes = "vec", repeated, tag = "2")] + pub account_keys: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + #[prost(bytes = "vec", tag = "3")] + pub recent_blockhash: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "4")] - pub instructions: ::std::vec::Vec, + pub instructions: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct MessageHeader { @@ -50,51 +52,53 @@ pub struct MessageHeader { #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionStatusMeta { #[prost(message, optional, tag = "1")] - pub err: ::std::option::Option, + pub err: ::core::option::Option, #[prost(uint64, tag = "2")] pub fee: u64, #[prost(uint64, repeated, tag = "3")] - pub pre_balances: ::std::vec::Vec, + pub pre_balances: ::prost::alloc::vec::Vec, #[prost(uint64, repeated, tag = "4")] - pub post_balances: ::std::vec::Vec, + pub post_balances: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "5")] - pub inner_instructions: ::std::vec::Vec, + pub inner_instructions: ::prost::alloc::vec::Vec, #[prost(string, repeated, tag = "6")] - pub log_messages: ::std::vec::Vec, + pub log_messages: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, #[prost(message, repeated, tag = "7")] - pub pre_token_balances: ::std::vec::Vec, + pub pre_token_balances: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "8")] - pub post_token_balances: ::std::vec::Vec, + pub post_token_balances: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "9")] + pub rewards: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionError { - #[prost(bytes, tag = "1")] - pub err: std::vec::Vec, + #[prost(bytes = "vec", tag = "1")] + pub err: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct InnerInstructions { #[prost(uint32, tag = "1")] pub index: u32, #[prost(message, repeated, tag = "2")] - pub instructions: ::std::vec::Vec, + pub instructions: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompiledInstruction { #[prost(uint32, tag = "1")] pub program_id_index: u32, - #[prost(bytes, tag = "2")] - pub accounts: std::vec::Vec, - #[prost(bytes, tag = "3")] - pub data: std::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub accounts: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "3")] + pub data: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct TokenBalance { #[prost(uint32, tag = "1")] pub account_index: u32, #[prost(string, tag = "2")] - pub mint: std::string::String, + pub mint: ::prost::alloc::string::String, #[prost(message, optional, tag = "3")] - pub ui_token_amount: ::std::option::Option, + pub ui_token_amount: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct UiTokenAmount { @@ -103,14 +107,14 @@ pub struct UiTokenAmount { #[prost(uint32, tag = "2")] pub decimals: u32, #[prost(string, tag = "3")] - pub amount: std::string::String, + pub amount: ::prost::alloc::string::String, #[prost(string, tag = "4")] - pub ui_amount_string: std::string::String, + pub ui_amount_string: ::prost::alloc::string::String, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct Reward { #[prost(string, tag = "1")] - pub pubkey: std::string::String, + pub pubkey: ::prost::alloc::string::String, #[prost(int64, tag = "2")] pub lamports: i64, #[prost(uint64, tag = "3")] @@ -121,13 +125,18 @@ pub struct Reward { #[derive(Clone, PartialEq, ::prost::Message)] pub struct Rewards { #[prost(message, repeated, tag = "1")] - pub rewards: ::std::vec::Vec, + pub rewards: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct UnixTimestamp { #[prost(int64, tag = "1")] pub timestamp: i64, } +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockHeight { + #[prost(uint64, tag = "1")] + pub block_height: u64, +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum RewardType { diff --git a/storage-proto/proto/solana.storage.transaction_by_addr.rs b/storage-proto/proto/solana.storage.transaction_by_addr.rs index 77d1d39d28..5425eaab4e 100644 --- a/storage-proto/proto/solana.storage.transaction_by_addr.rs +++ b/storage-proto/proto/solana.storage.transaction_by_addr.rs @@ -1,32 +1,32 @@ #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionByAddr { #[prost(message, repeated, tag = "1")] - pub tx_by_addrs: ::std::vec::Vec, + pub tx_by_addrs: ::prost::alloc::vec::Vec, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionByAddrInfo { - #[prost(bytes, tag = "1")] - pub signature: std::vec::Vec, + #[prost(bytes = "vec", tag = "1")] + pub signature: ::prost::alloc::vec::Vec, #[prost(message, optional, tag = "2")] - pub err: ::std::option::Option, + pub err: ::core::option::Option, #[prost(uint32, tag = "3")] pub index: u32, #[prost(message, optional, tag = "4")] - pub memo: ::std::option::Option, + pub memo: ::core::option::Option, #[prost(message, optional, tag = "5")] - pub block_time: ::std::option::Option, + pub block_time: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct Memo { #[prost(string, tag = "1")] - pub memo: std::string::String, + pub memo: ::prost::alloc::string::String, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionError { #[prost(enumeration = "TransactionErrorType", tag = "1")] pub transaction_error: i32, #[prost(message, optional, tag = "2")] - pub instruction_error: ::std::option::Option, + pub instruction_error: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct InstructionError { @@ -35,7 +35,7 @@ pub struct InstructionError { #[prost(enumeration = "InstructionErrorType", tag = "2")] pub error: i32, #[prost(message, optional, tag = "3")] - pub custom: ::std::option::Option, + pub custom: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct UnixTimestamp { @@ -66,6 +66,7 @@ pub enum TransactionErrorType { InvalidProgramForExecution = 13, SanitizeFailure = 14, ClusterMaintenance = 15, + AccountBorrowOutstandingTx = 16, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] @@ -117,4 +118,7 @@ pub enum InstructionErrorType { BorshIoError = 44, AccountNotRentExempt = 45, InvalidAccountOwner = 46, + ArithmeticOverflow = 47, + UnsupportedSysvar = 48, + IllegalOwner = 49, } diff --git a/storage-proto/src/confirmed_block.proto b/storage-proto/src/confirmed_block.proto index d492a359d6..6e0d88f3cf 100644 --- a/storage-proto/src/confirmed_block.proto +++ b/storage-proto/src/confirmed_block.proto @@ -9,6 +9,7 @@ message ConfirmedBlock { repeated ConfirmedTransaction transactions = 4; repeated Reward rewards = 5; UnixTimestamp block_time = 6; + BlockHeight block_height = 7; } message ConfirmedTransaction { @@ -43,6 +44,7 @@ message TransactionStatusMeta { repeated string log_messages = 6; repeated TokenBalance pre_token_balances = 7; repeated TokenBalance post_token_balances = 8; + repeated Reward rewards = 9; } message TransactionError { @@ -95,3 +97,7 @@ message Rewards { message UnixTimestamp { int64 timestamp = 1; } + +message BlockHeight { + uint64 block_height = 1; +} diff --git a/storage-proto/src/convert.rs b/storage-proto/src/convert.rs index 8520e14314..1039cec8db 100644 --- a/storage-proto/src/convert.rs +++ b/storage-proto/src/convert.rs @@ -125,6 +125,7 @@ impl From for generated::ConfirmedBlock { transactions, rewards, block_time, + block_height, } = confirmed_block; Self { @@ -134,6 +135,7 @@ impl From for generated::ConfirmedBlock { transactions: transactions.into_iter().map(|tx| tx.into()).collect(), rewards: rewards.into_iter().map(|r| r.into()).collect(), block_time: block_time.map(|timestamp| generated::UnixTimestamp { timestamp }), + block_height: block_height.map(|block_height| generated::BlockHeight { block_height }), } } } @@ -150,6 +152,7 @@ impl TryFrom for ConfirmedBlock { transactions, rewards, block_time, + block_height, } = confirmed_block; Ok(Self { @@ -162,6 +165,7 @@ impl TryFrom for ConfirmedBlock { .collect::, Self::Error>>()?, rewards: rewards.into_iter().map(|r| r.into()).collect(), block_time: block_time.map(|generated::UnixTimestamp { timestamp }| timestamp), + block_height: block_height.map(|generated::BlockHeight { block_height }| block_height), }) } } @@ -274,6 +278,7 @@ impl From for generated::TransactionStatusMeta { log_messages, pre_token_balances, post_token_balances, + rewards, } = value; let err = match status { Ok(()) => None, @@ -297,6 +302,11 @@ impl From for generated::TransactionStatusMeta { .into_iter() .map(|balance| balance.into()) .collect(); + let rewards = rewards + .unwrap_or_default() + .into_iter() + .map(|reward| reward.into()) + .collect(); Self { err, @@ -307,6 +317,7 @@ impl From for generated::TransactionStatusMeta { log_messages, pre_token_balances, post_token_balances, + rewards, } } } @@ -331,6 +342,7 @@ impl TryFrom for TransactionStatusMeta { log_messages, pre_token_balances, post_token_balances, + rewards, } = value; let status = match &err { None => Ok(()), @@ -355,6 +367,7 @@ impl TryFrom for TransactionStatusMeta { .map(|balance| balance.into()) .collect(), ); + let rewards = Some(rewards.into_iter().map(|reward| reward.into()).collect()); Ok(Self { status, fee, @@ -364,6 +377,7 @@ impl TryFrom for TransactionStatusMeta { log_messages, pre_token_balances, post_token_balances, + rewards, }) } } @@ -508,6 +522,8 @@ impl TryFrom for TransactionError { 44 => InstructionError::BorshIoError(String::new()), 45 => InstructionError::AccountNotRentExempt, 46 => InstructionError::InvalidAccountOwner, + 47 => InstructionError::ArithmeticOverflow, + 48 => InstructionError::UnsupportedSysvar, _ => return Err("Invalid InstructionError"), }; @@ -534,6 +550,7 @@ impl TryFrom for TransactionError { 13 => TransactionError::InvalidProgramForExecution, 14 => TransactionError::SanitizeFailure, 15 => TransactionError::ClusterMaintenance, + 16 => TransactionError::AccountBorrowOutstanding, _ => return Err("Invalid TransactionError"), }) } @@ -589,6 +606,9 @@ impl From for tx_by_addr::TransactionError { TransactionError::InstructionError(_, _) => { tx_by_addr::TransactionErrorType::InstructionError } + TransactionError::AccountBorrowOutstanding => { + tx_by_addr::TransactionErrorType::AccountBorrowOutstandingTx + } } as i32, instruction_error: match transaction_error { TransactionError::InstructionError(index, ref instruction_error) => { @@ -734,6 +754,15 @@ impl From for tx_by_addr::TransactionError { InstructionError::InvalidAccountOwner => { tx_by_addr::InstructionErrorType::InvalidAccountOwner } + InstructionError::ArithmeticOverflow => { + tx_by_addr::InstructionErrorType::ArithmeticOverflow + } + InstructionError::UnsupportedSysvar => { + tx_by_addr::InstructionErrorType::UnsupportedSysvar + } + InstructionError::IllegalOwner => { + tx_by_addr::InstructionErrorType::IllegalOwner + } } as i32, custom: match instruction_error { InstructionError::Custom(custom) => { @@ -935,6 +964,7 @@ impl From for generated_evm::TransactionReceipt { impl TryFrom for evm_state::TransactionReceipt { type Error = &'static str; + fn try_from(tx: generated_evm::TransactionReceipt) -> Result { let logs: Result, _> = tx.logs.into_iter().map(TryFrom::try_from).collect(); Ok(Self { diff --git a/storage-proto/src/lib.rs b/storage-proto/src/lib.rs index 1a4148987f..a132a8797f 100644 --- a/storage-proto/src/lib.rs +++ b/storage-proto/src/lib.rs @@ -150,6 +150,8 @@ pub struct StoredTransactionStatusMeta { pub pre_token_balances: Option>, #[serde(deserialize_with = "default_on_eof")] pub post_token_balances: Option>, + #[serde(deserialize_with = "default_on_eof")] + pub rewards: Option>, } impl From for TransactionStatusMeta { @@ -163,6 +165,7 @@ impl From for TransactionStatusMeta { log_messages, pre_token_balances, post_token_balances, + rewards, } = value; Self { status, @@ -175,6 +178,8 @@ impl From for TransactionStatusMeta { .map(|balances| balances.into_iter().map(|balance| balance.into()).collect()), post_token_balances: post_token_balances .map(|balances| balances.into_iter().map(|balance| balance.into()).collect()), + rewards: rewards + .map(|rewards| rewards.into_iter().map(|reward| reward.into()).collect()), } } } @@ -190,6 +195,7 @@ impl From for StoredTransactionStatusMeta { log_messages, pre_token_balances, post_token_balances, + rewards, } = value; Self { status, @@ -202,6 +208,8 @@ impl From for StoredTransactionStatusMeta { .map(|balances| balances.into_iter().map(|balance| balance.into()).collect()), post_token_balances: post_token_balances .map(|balances| balances.into_iter().map(|balance| balance.into()).collect()), + rewards: rewards + .map(|rewards| rewards.into_iter().map(|reward| reward.into()).collect()), } } } diff --git a/storage-proto/src/transaction_by_addr.proto b/storage-proto/src/transaction_by_addr.proto index ca46b09605..7394a3aa92 100644 --- a/storage-proto/src/transaction_by_addr.proto +++ b/storage-proto/src/transaction_by_addr.proto @@ -30,7 +30,7 @@ enum TransactionErrorType { PROGRAM_ACCOUNT_NOT_FOUND = 3; INSUFFICIENT_FUNDS_FOR_FEE = 4; INVALID_ACCOUNT_FOR_FEE = 5; - DUPLICATE_SIGNATURE = 6; + ALREADY_PROCESSED = 6; BLOCKHASH_NOT_FOUND = 7; INSTRUCTION_ERROR = 8; CALL_CHAIN_TOO_DEEP = 9; @@ -40,6 +40,7 @@ enum TransactionErrorType { INVALID_PROGRAM_FOR_EXECUTION = 13; SANITIZE_FAILURE = 14; CLUSTER_MAINTENANCE = 15; + ACCOUNT_BORROW_OUTSTANDING_TX = 16; } message InstructionError { @@ -96,6 +97,8 @@ enum InstructionErrorType { BORSH_IO_ERROR = 44; ACCOUNT_NOT_RENT_EXEMPT = 45; INVALID_ACCOUNT_OWNER = 46; + ARITHMETIC_OVERFLOW = 47; + UNSUPPORTED_SYSVAR = 48; } message UnixTimestamp { diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index 608a699719..4a605c871d 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-streamer" -version = "1.5.19" +version = "1.6.14" description = "Solana Streamer" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,14 +11,14 @@ edition = "2018" [dependencies] log = "0.4.11" -solana-metrics = { path = "../metrics", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } thiserror = "1.0" -solana-measure = { path = "../measure", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } +solana-measure = { path = "../measure", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } libc = "0.2.81" nix = "0.19.0" -solana-perf = { path = "../perf", version = "=1.5.19" } +solana-perf = { path = "../perf", version = "=1.6.14" } [dev-dependencies] diff --git a/sys-tuner/Cargo.toml b/sys-tuner/Cargo.toml index a9af67e505..bfc486daf3 100644 --- a/sys-tuner/Cargo.toml +++ b/sys-tuner/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-sys-tuner" description = "The solana cluster system tuner daemon" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,9 +14,9 @@ publish = true clap = "2.33.1" log = "0.4.11" libc = "0.2.81" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-version = { path = "../version" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } [target."cfg(unix)".dependencies] unix_socket2 = "0.5.4" diff --git a/system-test/automation_utils.sh b/system-test/automation_utils.sh index bdf2263c27..e8b0171330 100755 --- a/system-test/automation_utils.sh +++ b/system-test/automation_utils.sh @@ -140,6 +140,18 @@ function collect_performance_statistics { --data-urlencode "db=${TESTNET_TAG}" \ --data-urlencode "q=$q_mean_tps;$q_max_tps;$q_mean_confirmation;$q_max_confirmation;$q_99th_confirmation;$q_max_tower_distance_observed;$q_last_tower_distance_observed" | python "${REPO_ROOT}"/system-test/testnet-automation-json-parser.py >>"$RESULT_FILE" + + declare q_dropped_vote_hash_count=' + SELECT sum("count") as "sum_dropped_vote_hash" + FROM "'$TESTNET_TAG'"."autogen"."dropped-vote-hash" + WHERE time > now() - '"$TEST_DURATION_SECONDS"'s' + + # store in variable to be returned + dropped_vote_hash_count=$( \ + curl -G "${INFLUX_HOST}/query?u=ro&p=topsecret" \ + --data-urlencode "db=${TESTNET_TAG}" \ + --data-urlencode "q=$q_dropped_vote_hash_count" | + python "${REPO_ROOT}"/system-test/testnet-automation-json-parser-missing.py) } function upload_results_to_slack() { diff --git a/system-test/stake-operations-testcases/offline_stake_operations.sh b/system-test/stake-operations-testcases/offline_stake_operations.sh index 7635ee2eb4..513a168ab2 100755 --- a/system-test/stake-operations-testcases/offline_stake_operations.sh +++ b/system-test/stake-operations-testcases/offline_stake_operations.sh @@ -29,7 +29,7 @@ if [[ -n "$1" ]]; then fi if [[ -z "$url" ]]; then - echo Provide complete URL, ex: "$0" http://devnet.solana.com:8899 + echo Provide complete URL, ex: "$0" http://api.devnet.solana.com:8899 exit 1 fi solana config set --url $url diff --git a/system-test/testnet-automation-json-parser-missing.py b/system-test/testnet-automation-json-parser-missing.py new file mode 100644 index 0000000000..47790c956f --- /dev/null +++ b/system-test/testnet-automation-json-parser-missing.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +import sys, json + +data=json.load(sys.stdin) + +# this code is designed for influx queries where 'no data' means 0 +if 'results' in data: + for result in data['results']: + val = "0" + if 'series' in result: + val = str(result['series'][0]['values'][0][1]) + print(val) +else: + print("No results returned from CURL request") diff --git a/system-test/testnet-automation.sh b/system-test/testnet-automation.sh index 251a62b635..3c12e26641 100755 --- a/system-test/testnet-automation.sh +++ b/system-test/testnet-automation.sh @@ -197,8 +197,15 @@ function launch_testnet() { execution_step "Average slot rate: $SLOTS_PER_SECOND slots/second over $((SLOT_COUNT_END_SECONDS - SLOT_COUNT_START_SECONDS)) seconds" if [[ "$SKIP_PERF_RESULTS" = "false" ]]; then + declare -g dropped_vote_hash_count + collect_performance_statistics echo "slots_per_second: $SLOTS_PER_SECOND" >>"$RESULT_FILE" + + if [[ $dropped_vote_hash_count -gt 0 ]]; then + execution_step "Checking for dropped vote hash count" + exit 1 + fi fi RESULT_DETAILS=$(<"$RESULT_FILE") diff --git a/tokens/Cargo.toml b/tokens/Cargo.toml index 3efb5391ad..6381bb7105 100644 --- a/tokens/Cargo.toml +++ b/tokens/Cargo.toml @@ -3,7 +3,7 @@ name = "solana-tokens" description = "Blockchain, Rebuilt for Scale" authors = ["Solana Maintainers "] edition = "2018" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -20,23 +20,23 @@ indexmap = "1.5.1" indicatif = "0.15.0" pickledb = "0.4.1" serde = { version = "1.0", features = ["derive"] } -solana-account-decoder = { path = "../account-decoder", version = "=1.5.19" } -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-cli-config = { path = "../cli-config", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } -solana-transaction-status = { path = "../transaction-status", version = "=1.5.19" } -solana-version = { path = "../version" } +solana-account-decoder = { path = "../account-decoder", version = "=1.6.14" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-cli-config = { path = "../cli-config", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } +solana-transaction-status = { path = "../transaction-status", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } spl-associated-token-account-v1-0 = { package = "spl-associated-token-account", version = "=1.0.2" } -spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] } +spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] } tempfile = "3.1.0" thiserror = "1.0" [dev-dependencies] bincode = "1.3.1" -solana-core = { path = "../core", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-program-test = { path = "../program-test", version = "=1.5.19" } +solana-core = { path = "../core", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-program-test = { path = "../program-test", version = "=1.6.14" } diff --git a/tokens/src/arg_parser.rs b/tokens/src/arg_parser.rs index 708e0b52f7..0df4ae8f53 100644 --- a/tokens/src/arg_parser.rs +++ b/tokens/src/arg_parser.rs @@ -37,7 +37,7 @@ where .global(true) .takes_value(true) .value_name("URL") - .help("RPC entrypoint address. i.e. http://devnet.solana.com"), + .help("RPC entrypoint address. i.e. http://api.devnet.solana.com"), ) .subcommand( SubCommand::with_name("distribute-tokens") diff --git a/tokens/src/commands.rs b/tokens/src/commands.rs index ddb2dfa0e9..866ab03e3e 100644 --- a/tokens/src/commands.rs +++ b/tokens/src/commands.rs @@ -715,6 +715,7 @@ fn check_payer_balances( (args.sender_keypair.pubkey(), None) }; + let fee_payer_balance = client.get_balance(&args.fee_payer.pubkey())?; if let Some((unlocked_sol_source, total_unlocked_sol)) = unlocked_sol_source { let staker_balance = client.get_balance(&distribution_source)?; if staker_balance < undistributed_tokens { @@ -724,15 +725,13 @@ fn check_payer_balances( )); } if args.fee_payer.pubkey() == unlocked_sol_source { - let balance = client.get_balance(&args.fee_payer.pubkey())?; - if balance < fees + total_unlocked_sol { + if fee_payer_balance < fees + total_unlocked_sol { return Err(Error::InsufficientFunds( vec![FundingSource::SystemAccount, FundingSource::FeePayer].into(), lamports_to_sol(fees + total_unlocked_sol).to_string(), )); } } else { - let fee_payer_balance = client.get_balance(&args.fee_payer.pubkey())?; if fee_payer_balance < fees { return Err(Error::InsufficientFunds( vec![FundingSource::FeePayer].into(), @@ -748,15 +747,13 @@ fn check_payer_balances( } } } else if args.fee_payer.pubkey() == distribution_source { - let balance = client.get_balance(&args.fee_payer.pubkey())?; - if balance < fees + undistributed_tokens { + if fee_payer_balance < fees + undistributed_tokens { return Err(Error::InsufficientFunds( vec![FundingSource::SystemAccount, FundingSource::FeePayer].into(), lamports_to_sol(fees + undistributed_tokens).to_string(), )); } } else { - let fee_payer_balance = client.get_balance(&args.fee_payer.pubkey())?; if fee_payer_balance < fees { return Err(Error::InsufficientFunds( vec![FundingSource::FeePayer].into(), @@ -1287,7 +1284,7 @@ mod tests { )); // Same recipient, same lockups } - const SET_LOCKUP_INDEX: usize = 4; + const SET_LOCKUP_INDEX: usize = 5; #[test] fn test_set_stake_lockup() { diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index 12d89ce6cc..efd029d392 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-transaction-status" -version = "1.5.19" +version = "1.6.14" description = "Solana transaction status types" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -15,18 +15,17 @@ bincode = "1.3.1" bs58 = "0.3.1" Inflector = "0.11.4" lazy_static = "1.4.0" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" serde_json = "1.0.56" -solana-account-decoder = { path = "../account-decoder", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-stake-program = { path = "../programs/stake", version = "=1.5.19" } -solana-vote-program = { path = "../programs/vote", version = "=1.5.19" } +solana-account-decoder = { path = "../account-decoder", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-stake-program = { path = "../programs/stake", version = "=1.6.14" } +solana-vote-program = { path = "../programs/vote", version = "=1.6.14" } spl-associated-token-account-v1-0 = { package = "spl-associated-token-account", version = "=1.0.2", features = ["no-entrypoint"] } -spl-memo-v1-0 = { package = "spl-memo", version = "=2.0.1", features = ["no-entrypoint"] } -spl-memo-v3-0 = { package = "spl-memo", version = "=3.0.0", features = ["no-entrypoint"] } -spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] } +spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } +spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] } thiserror = "1.0" [package.metadata.docs.rs] diff --git a/transaction-status/src/lib.rs b/transaction-status/src/lib.rs index 942e581326..84a90d5317 100644 --- a/transaction-status/src/lib.rs +++ b/transaction-status/src/lib.rs @@ -185,6 +185,8 @@ pub struct TransactionStatusMeta { pub pre_token_balances: Option>, #[serde(deserialize_with = "default_on_eof")] pub post_token_balances: Option>, + #[serde(deserialize_with = "default_on_eof")] + pub rewards: Option, } impl Default for TransactionStatusMeta { @@ -198,6 +200,7 @@ impl Default for TransactionStatusMeta { log_messages: None, pre_token_balances: None, post_token_balances: None, + rewards: None, } } } @@ -215,6 +218,7 @@ pub struct UiTransactionStatusMeta { pub log_messages: Option>, pub pre_token_balances: Option>, pub post_token_balances: Option>, + pub rewards: Option, } impl UiTransactionStatusMeta { @@ -237,6 +241,7 @@ impl UiTransactionStatusMeta { post_token_balances: meta .post_token_balances .map(|balance| balance.into_iter().map(|balance| balance.into()).collect()), + rewards: meta.rewards, } } } @@ -259,6 +264,7 @@ impl From for UiTransactionStatusMeta { post_token_balances: meta .post_token_balances .map(|balance| balance.into_iter().map(|balance| balance.into()).collect()), + rewards: meta.rewards, } } } @@ -346,6 +352,7 @@ pub struct ConfirmedBlock { pub transactions: Vec, pub rewards: Rewards, pub block_time: Option, + pub block_height: Option, } impl ConfirmedBlock { @@ -361,6 +368,7 @@ impl ConfirmedBlock { .collect(), rewards: self.rewards, block_time: self.block_time, + block_height: self.block_height, } } @@ -403,6 +411,7 @@ impl ConfirmedBlock { None }, block_time: self.block_time, + block_height: self.block_height, } } } @@ -416,6 +425,7 @@ pub struct EncodedConfirmedBlock { pub transactions: Vec, pub rewards: Rewards, pub block_time: Option, + pub block_height: Option, } #[derive(Debug, PartialEq, Serialize, Deserialize)] @@ -431,6 +441,7 @@ pub struct UiConfirmedBlock { #[serde(default, skip_serializing_if = "Option::is_none")] pub rewards: Option, pub block_time: Option, + pub block_height: Option, } impl From for UiConfirmedBlock { @@ -443,6 +454,7 @@ impl From for UiConfirmedBlock { signatures: None, rewards: Some(block.rewards), block_time: block.block_time, + block_height: block.block_height, } } } @@ -456,6 +468,7 @@ impl From for EncodedConfirmedBlock { transactions: block.transactions.unwrap_or_default(), rewards: block.rewards.unwrap_or_default(), block_time: block.block_time, + block_height: block.block_height, } } } diff --git a/transaction-status/src/parse_accounts.rs b/transaction-status/src/parse_accounts.rs index ebef018e71..20bd74a56b 100644 --- a/transaction-status/src/parse_accounts.rs +++ b/transaction-status/src/parse_accounts.rs @@ -13,7 +13,7 @@ pub fn parse_accounts(message: &Message) -> Vec { for (i, account_key) in message.account_keys.iter().enumerate() { accounts.push(ParsedAccount { pubkey: account_key.to_string(), - writable: message.is_writable(i), + writable: message.is_writable(i, /*demote_sysvar_write_locks=*/ true), signer: message.is_signer(i), }); } diff --git a/transaction-status/src/parse_associated_token.rs b/transaction-status/src/parse_associated_token.rs index a50268a00d..c2c47c8358 100644 --- a/transaction-status/src/parse_associated_token.rs +++ b/transaction-status/src/parse_associated_token.rs @@ -3,12 +3,11 @@ use crate::parse_instruction::{ }; use serde_json::json; use solana_sdk::{instruction::CompiledInstruction, pubkey::Pubkey}; -use std::str::FromStr; // A helper function to convert spl_associated_token_account_v1_0::id() as spl_sdk::pubkey::Pubkey // to solana_sdk::pubkey::Pubkey pub fn spl_associated_token_id_v1_0() -> Pubkey { - Pubkey::from_str(&spl_associated_token_account_v1_0::id().to_string()).unwrap() + Pubkey::new_from_array(spl_associated_token_account_v1_0::id().to_bytes()) } pub fn parse_associated_token( @@ -58,7 +57,7 @@ mod test { }; fn convert_pubkey(pubkey: Pubkey) -> SplAssociatedTokenPubkey { - SplAssociatedTokenPubkey::from_str(&pubkey.to_string()).unwrap() + SplAssociatedTokenPubkey::new_from_array(pubkey.to_bytes()) } fn convert_compiled_instruction( diff --git a/transaction-status/src/parse_instruction.rs b/transaction-status/src/parse_instruction.rs index dea1c2133e..4e2874ee54 100644 --- a/transaction-status/src/parse_instruction.rs +++ b/transaction-status/src/parse_instruction.rs @@ -10,20 +10,15 @@ use inflector::Inflector; use serde_json::Value; use solana_account_decoder::parse_token::spl_token_id_v2_0; use solana_sdk::{instruction::CompiledInstruction, pubkey::Pubkey, system_program}; -use std::{ - collections::HashMap, - str::{from_utf8, FromStr}, -}; +use std::{collections::HashMap, str::from_utf8}; use thiserror::Error; lazy_static! { static ref ASSOCIATED_TOKEN_PROGRAM_ID: Pubkey = spl_associated_token_id_v1_0(); static ref BPF_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader::id(); static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id(); - static ref MEMO_V1_PROGRAM_ID: Pubkey = - Pubkey::from_str(&spl_memo_v1_0::id().to_string()).unwrap(); - static ref MEMO_V3_PROGRAM_ID: Pubkey = - Pubkey::from_str(&spl_memo_v3_0::id().to_string()).unwrap(); + static ref MEMO_V1_PROGRAM_ID: Pubkey = Pubkey::new_from_array(spl_memo::v1::id().to_bytes()); + static ref MEMO_V3_PROGRAM_ID: Pubkey = Pubkey::new_from_array(spl_memo::id().to_bytes()); static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id(); static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id(); static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v2_0(); diff --git a/transaction-status/src/parse_stake.rs b/transaction-status/src/parse_stake.rs index a0f4ed7d81..1f733459e3 100644 --- a/transaction-status/src/parse_stake.rs +++ b/transaction-status/src/parse_stake.rs @@ -309,21 +309,25 @@ mod test { ); assert!(parse_stake(&message.instructions[0], &keys[0..5]).is_err()); - let instructions = stake_instruction::split(&keys[2], &keys[0], lamports, &keys[1]); + // This looks wrong, but in an actual compiled instruction, the order is: + // * split account (signer, allocate + assign first) + // * stake authority (signer) + // * stake account + let instructions = stake_instruction::split(&keys[2], &keys[1], lamports, &keys[0]); let message = Message::new(&instructions, None); assert_eq!( - parse_stake(&message.instructions[1], &keys[0..3]).unwrap(), + parse_stake(&message.instructions[2], &keys[0..3]).unwrap(), ParsedInstructionEnum { instruction_type: "split".to_string(), info: json!({ "stakeAccount": keys[2].to_string(), - "newSplitAccount": keys[1].to_string(), - "stakeAuthority": keys[0].to_string(), + "newSplitAccount": keys[0].to_string(), + "stakeAuthority": keys[1].to_string(), "lamports": lamports, }), } ); - assert!(parse_stake(&message.instructions[1], &keys[0..2]).is_err()); + assert!(parse_stake(&message.instructions[2], &keys[0..2]).is_err()); let instruction = stake_instruction::withdraw(&keys[1], &keys[0], &keys[2], lamports, None); let message = Message::new(&[instruction], None); diff --git a/transaction-status/src/token_balances.rs b/transaction-status/src/token_balances.rs index 9153abb43b..ab3bfa87ed 100644 --- a/transaction-status/src/token_balances.rs +++ b/transaction-status/src/token_balances.rs @@ -3,7 +3,7 @@ use solana_account_decoder::parse_token::{ spl_token_id_v2_0, spl_token_v2_0_native_mint, token_amount_to_ui_amount, UiTokenAmount, }; use solana_runtime::{bank::Bank, transaction_batch::TransactionBatch}; -use solana_sdk::pubkey::Pubkey; +use solana_sdk::{account::ReadableAccount, pubkey::Pubkey}; use spl_token_v2_0::{ solana_program::program_pack::Pack, state::{Account as TokenAccount, Mint}, @@ -40,7 +40,7 @@ fn get_mint_decimals(bank: &Bank, mint: &Pubkey) -> Option { } else { let mint_account = bank.get_account(mint)?; - let decimals = Mint::unpack(&mint_account.data) + let decimals = Mint::unpack(&mint_account.data()) .map(|mint| mint.decimals) .ok()?; @@ -91,7 +91,7 @@ pub fn collect_token_balance_from_account( ) -> Option<(String, UiTokenAmount)> { let account = bank.get_account(account_id)?; - let token_account = TokenAccount::unpack(&account.data).ok()?; + let token_account = TokenAccount::unpack(&account.data()).ok()?; let mint_string = &token_account.mint.to_string(); let mint = &Pubkey::from_str(&mint_string).unwrap_or_default(); diff --git a/upload-perf/Cargo.toml b/upload-perf/Cargo.toml index 8d1a20db71..8606edf535 100644 --- a/upload-perf/Cargo.toml +++ b/upload-perf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-upload-perf" -version = "1.5.19" +version = "1.6.14" description = "Metrics Upload Utility" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,7 +11,7 @@ publish = false [dependencies] serde_json = "1.0.56" -solana-metrics = { path = "../metrics", version = "=1.5.19" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } [[bin]] name = "solana-upload-perf" diff --git a/validator/Cargo.toml b/validator/Cargo.toml index cdec38662e..8a969eb748 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -3,40 +3,45 @@ authors = ["Solana Maintainers "] edition = "2018" name = "velas-validator" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" -homepage = "https://solana.com/" +homepage = "https://velas.com/" default-run = "velas-validator" [dependencies] base64 = "0.12.3" bincode = "1.3.1" -clap = "2.33.1" chrono = { version = "0.4.11", features = ["serde"] } +clap = "2.33.1" console = "0.11.3" core_affinity = "0.5.10" -fd-lock = "1.1.1" +fd-lock = "2.0.0" indicatif = "0.15.0" +jsonrpc-core = "17.1.0" +jsonrpc-core-client = { version = "17.1.0", features = ["ipc", "ws"] } +jsonrpc-derive = "17.1.0" +jsonrpc-ipc-server = "17.1.0" +jsonrpc-server-utils= "17.1.0" log = "0.4.11" num_cpus = "1.13.0" rand = "0.7.0" -serde_json = "1.0.56" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-cli-config = { path = "../cli-config", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-core = { path = "../core", version = "=1.5.19" } -solana-download-utils = { path = "../download-utils", version = "=1.5.19" } -solana-faucet = { path = "../faucet", version = "=1.5.19" } -solana-ledger = { path = "../ledger", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-perf = { path = "../perf", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } -solana-net-utils = { path = "../net-utils", version = "=1.5.19" } -solana-runtime = { path = "../runtime", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-version = { path = "../version" } -solana-vote-program = { path = "../programs/vote", version = "=1.5.19" } +serde = "1.0.112" +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-cli-config = { path = "../cli-config", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-core = { path = "../core", version = "=1.6.14" } +solana-download-utils = { path = "../download-utils", version = "=1.6.14" } +solana-faucet = { path = "../faucet", version = "=1.6.14" } +solana-ledger = { path = "../ledger", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } +solana-net-utils = { path = "../net-utils", version = "=1.6.14" } +solana-perf = { path = "../perf", version = "=1.6.14" } +solana-runtime = { path = "../runtime", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } +solana-vote-program = { path = "../programs/vote", version = "=1.6.14" } symlink = "0.1.0" [target."cfg(unix)".dependencies] diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs new file mode 100644 index 0000000000..90bb631f08 --- /dev/null +++ b/validator/src/admin_rpc_service.rs @@ -0,0 +1,195 @@ +use { + jsonrpc_core::{MetaIoHandler, Metadata, Result}, + jsonrpc_core_client::{transports::ipc, RpcError}, + jsonrpc_derive::rpc, + jsonrpc_ipc_server::{RequestContext, ServerBuilder}, + jsonrpc_server_utils::tokio, + log::*, + solana_core::validator::{ValidatorExit, ValidatorStartProgress}, + solana_sdk::signature::{read_keypair_file, Keypair, Signer}, + std::{ + net::SocketAddr, + path::Path, + sync::{Arc, RwLock}, + thread::{self, Builder}, + time::{Duration, SystemTime}, + }, +}; + +#[derive(Clone)] +pub struct AdminRpcRequestMetadata { + pub rpc_addr: Option, + pub start_time: SystemTime, + pub start_progress: Arc>, + pub validator_exit: Arc>, + pub authorized_voter_keypairs: Arc>>>, +} +impl Metadata for AdminRpcRequestMetadata {} + +#[rpc] +pub trait AdminRpc { + type Metadata; + + #[rpc(meta, name = "exit")] + fn exit(&self, meta: Self::Metadata) -> Result<()>; + + #[rpc(meta, name = "rpcAddress")] + fn rpc_addr(&self, meta: Self::Metadata) -> Result>; + + #[rpc(name = "setLogFilter")] + fn set_log_filter(&self, filter: String) -> Result<()>; + + #[rpc(meta, name = "startTime")] + fn start_time(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "startProgress")] + fn start_progress(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "addAuthorizedVoter")] + fn add_authorized_voter(&self, meta: Self::Metadata, keypair_file: String) -> Result<()>; + + #[rpc(meta, name = "removeAllAuthorizedVoters")] + fn remove_all_authorized_voters(&self, meta: Self::Metadata) -> Result<()>; +} + +pub struct AdminRpcImpl; +impl AdminRpc for AdminRpcImpl { + type Metadata = AdminRpcRequestMetadata; + + fn exit(&self, meta: Self::Metadata) -> Result<()> { + debug!("exit admin rpc request received"); + + thread::spawn(move || { + // Delay exit signal until this RPC request completes, otherwise the caller of `exit` might + // receive a confusing error as the validator shuts down before a response is sent back. + thread::sleep(Duration::from_millis(100)); + + warn!("validator exit requested"); + meta.validator_exit.write().unwrap().exit(); + + // TODO: Debug why ValidatorExit doesn't always cause the validator to fully exit + // (rocksdb background processing or some other stuck thread perhaps?). + // + // If the process is still alive after five seconds, exit harder + thread::sleep(Duration::from_secs(5)); + warn!("validator exit timeout"); + std::process::exit(0); + }); + Ok(()) + } + + fn rpc_addr(&self, meta: Self::Metadata) -> Result> { + debug!("rpc_addr admin rpc request received"); + Ok(meta.rpc_addr) + } + + fn set_log_filter(&self, filter: String) -> Result<()> { + debug!("set_log_filter admin rpc request received"); + solana_logger::setup_with(&filter); + Ok(()) + } + + fn start_time(&self, meta: Self::Metadata) -> Result { + debug!("start_time admin rpc request received"); + Ok(meta.start_time) + } + + fn start_progress(&self, meta: Self::Metadata) -> Result { + debug!("start_progress admin rpc request received"); + Ok(*meta.start_progress.read().unwrap()) + } + + fn add_authorized_voter(&self, meta: Self::Metadata, keypair_file: String) -> Result<()> { + debug!("add_authorized_voter request received"); + + let authorized_voter = read_keypair_file(keypair_file) + .map_err(|err| jsonrpc_core::error::Error::invalid_params(format!("{}", err)))?; + + let mut authorized_voter_keypairs = meta.authorized_voter_keypairs.write().unwrap(); + + if authorized_voter_keypairs + .iter() + .any(|x| x.pubkey() == authorized_voter.pubkey()) + { + Err(jsonrpc_core::error::Error::invalid_params( + "Authorized voter already present", + )) + } else { + authorized_voter_keypairs.push(Arc::new(authorized_voter)); + Ok(()) + } + } + + fn remove_all_authorized_voters(&self, meta: Self::Metadata) -> Result<()> { + debug!("remove_all_authorized_voters received"); + let mut a = meta.authorized_voter_keypairs.write().unwrap(); + + error!("authorized_voter_keypairs pre len: {}", a.len()); + a.clear(); + error!("authorized_voter_keypairs post len: {}", a.len()); + + //meta.authorized_voter_keypairs.write().unwrap().clear(); + Ok(()) + } +} + +// Start the Admin RPC interface +pub fn run(ledger_path: &Path, metadata: AdminRpcRequestMetadata) { + let admin_rpc_path = ledger_path.join("admin.rpc"); + + let event_loop = tokio::runtime::Builder::new() + .threaded_scheduler() + .enable_all() + .thread_name("sol-adminrpc-el") + .build() + .unwrap(); + + Builder::new() + .name("solana-adminrpc".to_string()) + .spawn(move || { + let mut io = MetaIoHandler::default(); + io.extend_with(AdminRpcImpl.to_delegate()); + + let validator_exit = metadata.validator_exit.clone(); + let server = ServerBuilder::with_meta_extractor(io, move |_req: &RequestContext| { + metadata.clone() + }) + .event_loop_executor(event_loop.handle().clone()) + .start(&format!("{}", admin_rpc_path.display())); + + match server { + Err(err) => { + warn!("Unable to start admin rpc service: {:?}", err); + } + Ok(server) => { + let close_handle = server.close_handle(); + validator_exit + .write() + .unwrap() + .register_exit(Box::new(move || { + close_handle.close(); + })); + + server.wait(); + } + } + }) + .unwrap(); +} + +// Connect to the Admin RPC interface +pub async fn connect(ledger_path: &Path) -> std::result::Result { + let admin_rpc_path = ledger_path.join("admin.rpc"); + if !admin_rpc_path.exists() { + Err(RpcError::Client(format!( + "{} does not exist", + admin_rpc_path.display() + ))) + } else { + ipc::connect::<_, gen_client::Client>(&format!("{}", admin_rpc_path.display())).await + } +} + +pub fn runtime() -> jsonrpc_server_utils::tokio::runtime::Runtime { + jsonrpc_server_utils::tokio::runtime::Runtime::new().expect("new tokio runtime") +} diff --git a/validator/src/bin/velas-test-validator.rs b/validator/src/bin/velas-test-validator.rs index a3b4b99823..f82df7e99e 100644 --- a/validator/src/bin/velas-test-validator.rs +++ b/validator/src/bin/velas-test-validator.rs @@ -1,24 +1,21 @@ -#![allow(clippy::integer_arithmetic)] use { clap::{value_t, value_t_or_exit, App, Arg}, - console::style, fd_lock::FdLock, - indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle}, solana_clap_utils::{ - input_parsers::{pubkey_of, pubkeys_of}, + input_parsers::{pubkey_of, pubkeys_of, value_of}, input_validators::{ is_pubkey, is_pubkey_or_keypair, is_slot, is_url_or_moniker, normalize_to_url_if_moniker, }, }, - solana_client::{client_error, rpc_client::RpcClient, rpc_request}, + solana_client::rpc_client::RpcClient, solana_core::rpc::JsonRpcConfig, solana_faucet::faucet::{run_local_faucet_with_port, FAUCET_PORT}, solana_sdk::{ - account::Account, - clock::{Slot, DEFAULT_TICKS_PER_SLOT, MS_PER_TICK}, - commitment_config::CommitmentConfig, - native_token::{sol_to_lamports, Sol}, + account::AccountSharedData, + clock::Slot, + epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH}, + native_token::sol_to_lamports, pubkey::Pubkey, rpc_port, signature::{read_keypair_file, write_keypair_file, Keypair, Signer}, @@ -31,12 +28,22 @@ use { path::{Path, PathBuf}, process::exit, sync::mpsc::channel, - thread, - time::{Duration, Instant, SystemTime, UNIX_EPOCH}, + time::{Duration, SystemTime, UNIX_EPOCH}, + }, + velas_validator::{ + admin_rpc_service, dashboard::Dashboard, println_name_value, redirect_stderr_to_file, + test_validator::*, }, - velas_validator::{redirect_stderr_to_file, test_validator::*}, }; +/* 10,000 was derived empirically by watching the size + * of the rocksdb/ directory self-limit itself to the + * 40MB-150MB range when running `solana-test-validator` + */ +const DEFAULT_MAX_LEDGER_SHREDS: u64 = 10_000; + +const DEFAULT_FAUCET_SOL: f64 = 1_000_000.; + #[derive(PartialEq)] enum Output { None, @@ -44,25 +51,13 @@ enum Output { Dashboard, } -/// Creates a new process bar for processing that will take an unknown amount of time -fn new_spinner_progress_bar() -> ProgressBar { - let progress_bar = ProgressBar::new(42); - progress_bar.set_draw_target(ProgressDrawTarget::stdout()); - progress_bar - .set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}")); - progress_bar.enable_steady_tick(100); - progress_bar -} - -/// Pretty print a "name value" -fn println_name_value(name: &str, value: &str) { - println!("{} {}", style(name).bold(), value); -} - fn main() { let default_rpc_port = rpc_port::DEFAULT_RPC_PORT.to_string(); + let default_faucet_port = FAUCET_PORT.to_string(); + let default_limit_ledger_size = DEFAULT_MAX_LEDGER_SHREDS.to_string(); + let default_faucet_sol = DEFAULT_FAUCET_SOL.to_string(); - let matches = App::new("velas-test-validator") + let matches = App::new("solana-test-validator") .about("Test Validator") .version(solana_version::version!()) .arg({ @@ -137,6 +132,15 @@ fn main() { .conflicts_with("quiet") .help("Log mode: stream the validator log"), ) + .arg( + Arg::with_name("faucet_port") + .long("faucet-port") + .value_name("PORT") + .takes_value(true) + .default_value(&default_faucet_port) + .validator(velas_validator::port_validator) + .help("Enable the faucet on this port"), + ) .arg( Arg::with_name("rpc_port") .long("rpc-port") @@ -144,7 +148,7 @@ fn main() { .takes_value(true) .default_value(&default_rpc_port) .validator(velas_validator::port_validator) - .help("Use this port for JSON RPC and the next port for the RPC websocket"), + .help("Enable JSON RPC on this port, and the next port for the RPC websocket"), ) .arg( Arg::with_name("bpf_program") @@ -164,6 +168,66 @@ fn main() { .takes_value(false) .help("Disable the just-in-time compiler and instead use the interpreter for BPF"), ) + .arg( + Arg::with_name("slots_per_epoch") + .long("slots-per-epoch") + .value_name("SLOTS") + .validator(|value| { + value + .parse::() + .map_err(|err| format!("error parsing '{}': {}", value, err)) + .and_then(|slot| { + if slot < MINIMUM_SLOTS_PER_EPOCH { + Err(format!("value must be >= {}", MINIMUM_SLOTS_PER_EPOCH)) + } else { + Ok(()) + } + }) + }) + .takes_value(true) + .help( + "Override the number of slots in an epoch. \ + If the ledger already exists then this parameter is silently ignored", + ), + ) + .arg( + Arg::with_name("gossip_port") + .long("gossip-port") + .value_name("PORT") + .takes_value(true) + .help("Gossip port number for the validator"), + ) + .arg( + Arg::with_name("gossip_host") + .long("gossip-host") + .value_name("HOST") + .takes_value(true) + .validator(solana_net_utils::is_host) + .help( + "Gossip DNS name or IP address for the validator to advertise in gossip \ + [default: 127.0.0.1]", + ), + ) + .arg( + Arg::with_name("dynamic_port_range") + .long("dynamic-port-range") + .value_name("MIN_PORT-MAX_PORT") + .takes_value(true) + .validator(velas_validator::port_range_validator) + .help( + "Range to use for dynamically assigned ports \ + [default: 1024-65535]", + ), + ) + .arg( + Arg::with_name("bind_address") + .long("bind-address") + .value_name("HOST") + .takes_value(true) + .validator(solana_net_utils::is_host) + .default_value("0.0.0.0") + .help("IP address to bind the validator ports [default: 0.0.0.0]"), + ) .arg( Arg::with_name("clone_account") .long("clone") @@ -195,6 +259,25 @@ fn main() { referenced by the --url argument will be used", ), ) + .arg( + Arg::with_name("limit_ledger_size") + .long("limit-ledger-size") + .value_name("SHRED_COUNT") + .takes_value(true) + .default_value(default_limit_ledger_size.as_str()) + .help("Keep this amount of shreds in root slots."), + ) + .arg( + Arg::with_name("faucet_sol") + .long("faucet-sol") + .takes_value(true) + .value_name("SOL") + .default_value(default_faucet_sol.as_str()) + .help( + "Give the faucet address this much SOL in genesis. \ + If the ledger already exists then this parameter is silently ignored", + ), + ) .get_matches(); let cli_config = if let Some(config_file) = matches.value_of("config_file") { @@ -207,11 +290,13 @@ fn main() { .map(normalize_to_url_if_moniker) .map(RpcClient::new); - let mint_address = pubkey_of(&matches, "mint_address").unwrap_or_else(|| { - read_keypair_file(&cli_config.keypair_path) - .unwrap_or_else(|_| Keypair::new()) - .pubkey() - }); + let (mint_address, random_mint) = pubkey_of(&matches, "mint_address") + .map(|pk| (pk, false)) + .unwrap_or_else(|| { + read_keypair_file(&cli_config.keypair_path) + .map(|kp| (kp.pubkey(), false)) + .unwrap_or_else(|_| (Keypair::new().pubkey(), true)) + }); let ledger_path = value_t_or_exit!(matches, "ledger_path", PathBuf); let reset_ledger = matches.is_present("reset"); @@ -223,9 +308,31 @@ fn main() { Output::Dashboard }; let rpc_port = value_t_or_exit!(matches, "rpc_port", u16); + let faucet_port = value_t_or_exit!(matches, "faucet_port", u16); + let slots_per_epoch = value_t!(matches, "slots_per_epoch", Slot).ok(); + let gossip_host = matches.value_of("gossip_host").map(|gossip_host| { + solana_net_utils::parse_host(gossip_host).unwrap_or_else(|err| { + eprintln!("Failed to parse --gossip-host: {}", err); + exit(1); + }) + }); + let gossip_port = value_t!(matches, "gossip_port", u16).ok(); + let dynamic_port_range = matches.value_of("dynamic_port_range").map(|port_range| { + solana_net_utils::parse_port_range(port_range).unwrap_or_else(|| { + eprintln!("Failed to parse --dynamic-port-range"); + exit(1); + }) + }); + let bind_address = matches.value_of("bind_address").map(|bind_address| { + solana_net_utils::parse_host(bind_address).unwrap_or_else(|err| { + eprintln!("Failed to parse --bind-address: {}", err); + exit(1); + }) + }); + let faucet_addr = Some(SocketAddr::new( IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), - FAUCET_PORT, + faucet_port, )); let bpf_jit = !matches.is_present("no_bpf_jit"); @@ -297,7 +404,7 @@ fn main() { let mut ledger_fd_lock = FdLock::new(fs::File::open(&ledger_path).unwrap()); let _ledger_lock = ledger_fd_lock.try_lock().unwrap_or_else(|_| { println!( - "Error: Unable to lock {} directory. Check if another velas-test-validator is running", + "Error: Unable to lock {} directory. Check if another validator is running", ledger_path.display() ); exit(1); @@ -336,7 +443,7 @@ fn main() { }; let _logger_thread = redirect_stderr_to_file(logfile); - let faucet_lamports = sol_to_lamports(1_000_000.); + let faucet_lamports = sol_to_lamports(value_of(&matches, "faucet_sol").unwrap()); let faucet_keypair_file = ledger_path.join("faucet-keypair.json"); if !faucet_keypair_file.exists() { write_keypair_file(&Keypair::new(), faucet_keypair_file.to_str().unwrap()).unwrap_or_else( @@ -350,6 +457,7 @@ fn main() { }, ); } + let faucet_keypair = read_keypair_file(faucet_keypair_file.to_str().unwrap()).unwrap_or_else(|err| { println!( @@ -359,57 +467,7 @@ fn main() { ); exit(1); }); - - let validator_start = Instant::now(); - - let test_validator = { - let _progress_bar = if output == Output::Dashboard { - println_name_value("Mint address:", &mint_address.to_string()); - println_name_value("Ledger location:", &format!("{}", ledger_path.display())); - println_name_value("Log:", &format!("{}", validator_log_symlink.display())); - let progress_bar = new_spinner_progress_bar(); - progress_bar.set_message("Initializing..."); - Some(progress_bar) - } else { - None - }; - - let mut genesis = TestValidatorGenesis::default(); - genesis - .ledger_path(&ledger_path) - .add_account( - faucet_keypair.pubkey(), - Account::new(faucet_lamports, 0, &system_program::id()), - ) - .rpc_config(JsonRpcConfig { - enable_validator_exit: true, - enable_rpc_transaction_history: true, - enable_cpi_and_log_storage: true, - faucet_addr, - ..JsonRpcConfig::default() - }) - .bpf_jit(bpf_jit) - .rpc_port(rpc_port) - .add_programs_with_path(&programs); - - if !clone_accounts.is_empty() { - genesis.clone_accounts( - clone_accounts, - cluster_rpc_client - .as_ref() - .expect("bug: --url argument missing?"), - ); - } - - if let Some(warp_slot) = warp_slot { - genesis.warp_slot(warp_slot); - } - genesis.start_with_mint_address(mint_address) - } - .unwrap_or_else(|err| { - println!("Error: failed to start validator: {}", err); - exit(1); - }); + let faucet_pubkey = faucet_keypair.pubkey(); if let Some(faucet_addr) = &faucet_addr { let (sender, receiver) = channel(); @@ -420,138 +478,129 @@ fn main() { }); } - if output == Output::Dashboard { - let rpc_client = test_validator.rpc_client().0; - let identity = &rpc_client.get_identity().expect("get_identity"); - println_name_value("Identity:", &identity.to_string()); + if TestValidatorGenesis::ledger_exists(&ledger_path) { + for (name, long) in &[ + ("bpf_program", "--bpf-program"), + ("clone_account", "--clone"), + ("mint_address", "--mint"), + ("slots_per_epoch", "--slots-per-epoch"), + ("faucet_sol", "--faucet-sol"), + ] { + if matches.is_present(name) { + println!("{} argument ignored, ledger already exists", long); + } + } + } else if random_mint { println_name_value( - "Version:", - &rpc_client.get_version().expect("get_version").solana_core, + "\nNotice!", + "No wallet available. `solana airdrop` localnet SOL after creating one\n", ); - println_name_value("JSON RPC URL:", &test_validator.rpc_url()); - println_name_value( - "JSON RPC PubSub Websocket:", - &test_validator.rpc_pubsub_url(), + } + + let mut genesis = TestValidatorGenesis::default(); + genesis.max_ledger_shreds = value_of(&matches, "limit_ledger_size"); + + admin_rpc_service::run( + &ledger_path, + admin_rpc_service::AdminRpcRequestMetadata { + rpc_addr: Some(SocketAddr::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + rpc_port, + )), + start_progress: genesis.start_progress.clone(), + start_time: std::time::SystemTime::now(), + validator_exit: genesis.validator_exit.clone(), + authorized_voter_keypairs: genesis.authorized_voter_keypairs.clone(), + }, + ); + let dashboard = if output == Output::Dashboard { + Some( + Dashboard::new( + &ledger_path, + Some(&validator_log_symlink), + Some(&mut genesis.validator_exit.write().unwrap()), + ) + .unwrap(), + ) + } else { + None + }; + + genesis + .ledger_path(&ledger_path) + .add_account( + faucet_pubkey, + AccountSharedData::new(faucet_lamports, 0, &system_program::id()), + ) + .rpc_config(JsonRpcConfig { + enable_rpc_transaction_history: true, + enable_cpi_and_log_storage: true, + faucet_addr, + ..JsonRpcConfig::default() + }) + .bpf_jit(bpf_jit) + .rpc_port(rpc_port) + .add_programs_with_path(&programs); + + if !clone_accounts.is_empty() { + genesis.clone_accounts( + clone_accounts, + cluster_rpc_client + .as_ref() + .expect("bug: --url argument missing?"), ); - println_name_value("Gossip Address:", &test_validator.gossip().to_string()); - println_name_value("TPU Address:", &test_validator.tpu().to_string()); - if let Some(faucet_addr) = &faucet_addr { - println_name_value( - "Faucet Address:", - &format!("{}:{}", &test_validator.gossip().ip(), faucet_addr.port()), - ); - } + } - let progress_bar = new_spinner_progress_bar(); - - fn get_validator_stats( - rpc_client: &RpcClient, - identity: &Pubkey, - ) -> client_error::Result<(Slot, Slot, Slot, u64, Sol, String)> { - let processed_slot = - rpc_client.get_slot_with_commitment(CommitmentConfig::processed())?; - let confirmed_slot = - rpc_client.get_slot_with_commitment(CommitmentConfig::confirmed())?; - let finalized_slot = - rpc_client.get_slot_with_commitment(CommitmentConfig::finalized())?; - let transaction_count = - rpc_client.get_transaction_count_with_commitment(CommitmentConfig::processed())?; - let identity_balance = rpc_client - .get_balance_with_commitment(identity, CommitmentConfig::confirmed())? - .value; - - let health = match rpc_client.get_health() { - Ok(()) => "ok".to_string(), - Err(err) => { - if let client_error::ClientErrorKind::RpcError( - rpc_request::RpcError::RpcResponseError { - code: _, - message: _, - data: - rpc_request::RpcResponseErrorData::NodeUnhealthy { - num_slots_behind: Some(num_slots_behind), - }, - original_err: _, - }, - ) = &err.kind - { - format!("{} slots behind", num_slots_behind) - } else { - "unhealthy".to_string() - } - } - }; - - Ok(( - processed_slot, - confirmed_slot, - finalized_slot, - transaction_count, - Sol(identity_balance), - health, - )) - } + if let Some(warp_slot) = warp_slot { + genesis.warp_slot(warp_slot); + } - loop { - let snapshot_slot = rpc_client.get_snapshot_slot().ok(); - - for _i in 0..10 { - match get_validator_stats(&rpc_client, &identity) { - Ok(( - processed_slot, - confirmed_slot, - finalized_slot, - transaction_count, - identity_balance, - health, - )) => { - let uptime = chrono::Duration::from_std(validator_start.elapsed()).unwrap(); - - progress_bar.set_message(&format!( - "{:02}:{:02}:{:02} \ - {}| \ - Processed Slot: {} | Confirmed Slot: {} | Finalized Slot: {} | \ - Snapshot Slot: {} | \ - Transactions: {} | {}", - uptime.num_hours(), - uptime.num_minutes() % 60, - uptime.num_seconds() % 60, - if health == "ok" { - "".to_string() - } else { - format!("| {} ", style(health).bold().red()) - }, - processed_slot, - confirmed_slot, - finalized_slot, - snapshot_slot - .map(|s| s.to_string()) - .unwrap_or_else(|| "-".to_string()), - transaction_count, - identity_balance - )); - } - Err(err) => { - progress_bar.set_message(&format!("{}", err)); - } - } - thread::sleep(Duration::from_millis( - MS_PER_TICK * DEFAULT_TICKS_PER_SLOT / 2, - )); + if let Some(slots_per_epoch) = slots_per_epoch { + genesis.epoch_schedule(EpochSchedule::custom( + slots_per_epoch, + slots_per_epoch, + /* enable_warmup_epochs = */ false, + )); + } + + if let Some(gossip_host) = gossip_host { + genesis.gossip_host(gossip_host); + } + + if let Some(gossip_port) = gossip_port { + genesis.gossip_port(gossip_port); + } + + if let Some(dynamic_port_range) = dynamic_port_range { + genesis.port_range(dynamic_port_range); + } + + if let Some(bind_address) = bind_address { + genesis.bind_ip_addr(bind_address); + } + + match genesis.start_with_mint_address(mint_address) { + Ok(test_validator) => { + if let Some(dashboard) = dashboard { + dashboard.run(Duration::from_millis(250)); } + test_validator.join(); + } + Err(err) => { + drop(dashboard); + println!("Error: failed to start validator: {}", err); + exit(1); } } - - std::thread::park(); } fn remove_directory_contents(ledger_path: &Path) -> Result<(), io::Error> { for entry in fs::read_dir(&ledger_path)? { let entry = entry?; - if entry.metadata()?.is_file() { - fs::remove_file(&entry.path())? - } else { + if entry.metadata()?.is_dir() { fs::remove_dir_all(&entry.path())? + } else { + fs::remove_file(&entry.path())? } } Ok(()) diff --git a/validator/src/dashboard.rs b/validator/src/dashboard.rs new file mode 100644 index 0000000000..777049e714 --- /dev/null +++ b/validator/src/dashboard.rs @@ -0,0 +1,293 @@ +use { + crate::{admin_rpc_service, new_spinner_progress_bar, println_name_value, ProgressBar}, + console::style, + solana_client::{ + client_error, rpc_client::RpcClient, rpc_request, rpc_response::RpcContactInfo, + }, + solana_core::validator::ValidatorStartProgress, + solana_sdk::{ + clock::Slot, commitment_config::CommitmentConfig, native_token::Sol, pubkey::Pubkey, + }, + std::{ + io, + net::SocketAddr, + path::{Path, PathBuf}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread, + time::{Duration, SystemTime}, + }, +}; + +pub struct Dashboard { + progress_bar: ProgressBar, + ledger_path: PathBuf, + exit: Arc, +} + +impl Dashboard { + pub fn new( + ledger_path: &Path, + log_path: Option<&Path>, + validator_exit: Option<&mut solana_core::validator::ValidatorExit>, + ) -> Result { + println_name_value("Ledger location:", &format!("{}", ledger_path.display())); + if let Some(log_path) = log_path { + println_name_value("Log:", &format!("{}", log_path.display())); + } + + let progress_bar = new_spinner_progress_bar(); + progress_bar.set_message("Initializing..."); + + let exit = Arc::new(AtomicBool::new(false)); + if let Some(validator_exit) = validator_exit { + let exit = exit.clone(); + validator_exit.register_exit(Box::new(move || exit.store(true, Ordering::Relaxed))); + } + + Ok(Self { + exit, + ledger_path: ledger_path.to_path_buf(), + progress_bar, + }) + } + + pub fn run(self, refresh_interval: Duration) { + let Self { + exit, + ledger_path, + progress_bar, + .. + } = self; + drop(progress_bar); + + let mut runtime = admin_rpc_service::runtime(); + while !exit.load(Ordering::Relaxed) { + let progress_bar = new_spinner_progress_bar(); + progress_bar.set_message("Connecting..."); + + let (rpc_addr, start_time) = match runtime.block_on(wait_for_validator_startup( + &ledger_path, + &exit, + progress_bar, + refresh_interval, + )) { + None => continue, + Some(results) => results, + }; + + let rpc_client = RpcClient::new_socket(rpc_addr); + let identity = match rpc_client.get_identity() { + Ok(identity) => identity, + Err(err) => { + println!("Failed to get validator identity over RPC: {}", err); + continue; + } + }; + println_name_value("Identity:", &identity.to_string()); + + if let Ok(genesis_hash) = rpc_client.get_genesis_hash() { + println_name_value("Genesis Hash:", &genesis_hash.to_string()); + } + + if let Some(contact_info) = get_contact_info(&rpc_client, &identity) { + println_name_value( + "Version:", + &contact_info.version.unwrap_or_else(|| "?".to_string()), + ); + if let Some(shred_version) = contact_info.shred_version { + println_name_value("Shred Version:", &shred_version.to_string()); + } + if let Some(gossip) = contact_info.gossip { + println_name_value("Gossip Address:", &gossip.to_string()); + } + if let Some(tpu) = contact_info.tpu { + println_name_value("TPU Address:", &tpu.to_string()); + } + if let Some(rpc) = contact_info.rpc { + println_name_value("JSON RPC URL:", &format!("http://{}", rpc.to_string())); + } + } + + let progress_bar = new_spinner_progress_bar(); + let mut snapshot_slot = None; + for i in 0.. { + if exit.load(Ordering::Relaxed) { + break; + } + if i % 10 == 0 { + snapshot_slot = rpc_client.get_snapshot_slot().ok(); + } + + match get_validator_stats(&rpc_client, &identity) { + Ok(( + max_retransmit_slot, + processed_slot, + confirmed_slot, + finalized_slot, + transaction_count, + identity_balance, + health, + )) => { + let uptime = { + let uptime = + chrono::Duration::from_std(start_time.elapsed().unwrap()).unwrap(); + + format!( + "{:02}:{:02}:{:02} ", + uptime.num_hours(), + uptime.num_minutes() % 60, + uptime.num_seconds() % 60 + ) + }; + + progress_bar.set_message(&format!( + "{}{}{}| \ + Processed Slot: {} | Confirmed Slot: {} | Finalized Slot: {} | \ + Snapshot Slot: {} | \ + Transactions: {} | {}", + uptime, + if health == "ok" { + "".to_string() + } else { + format!("| {} ", style(health).bold().red()) + }, + if max_retransmit_slot == 0 { + "".to_string() + } else { + format!("| Max Slot: {} ", max_retransmit_slot) + }, + processed_slot, + confirmed_slot, + finalized_slot, + snapshot_slot + .map(|s| s.to_string()) + .unwrap_or_else(|| "-".to_string()), + transaction_count, + identity_balance + )); + thread::sleep(refresh_interval); + } + Err(err) => { + progress_bar + .abandon_with_message(&format!("RPC connection failure: {}", err)); + break; + } + } + } + } + } +} + +async fn wait_for_validator_startup( + ledger_path: &Path, + exit: &Arc, + progress_bar: ProgressBar, + refresh_interval: Duration, +) -> Option<(SocketAddr, SystemTime)> { + let mut admin_client = None; + loop { + if exit.load(Ordering::Relaxed) { + return None; + } + + if admin_client.is_none() { + match admin_rpc_service::connect(&ledger_path).await { + Ok(new_admin_client) => admin_client = Some(new_admin_client), + Err(err) => { + progress_bar.set_message(&format!("Unable to connect to validator: {}", err)); + thread::sleep(refresh_interval); + continue; + } + } + } + + match admin_client.as_ref().unwrap().start_progress().await { + Ok(start_progress) => { + if start_progress == ValidatorStartProgress::Running { + let admin_client = admin_client.take().unwrap(); + + match async move { + let rpc_addr = admin_client.rpc_addr().await?; + let start_time = admin_client.start_time().await?; + Ok::<_, jsonrpc_core_client::RpcError>((rpc_addr, start_time)) + } + .await + { + Ok((None, _)) => progress_bar.set_message(&"RPC service not available"), + Ok((Some(rpc_addr), start_time)) => return Some((rpc_addr, start_time)), + Err(err) => { + progress_bar + .set_message(&format!("Failed to get validator info: {}", err)); + } + } + } else { + progress_bar + .set_message(&format!("Validator startup: {:?}...", start_progress)); + } + } + Err(err) => { + admin_client = None; + progress_bar + .set_message(&format!("Failed to get validator start progress: {}", err)); + } + } + thread::sleep(refresh_interval); + } +} + +fn get_contact_info(rpc_client: &RpcClient, identity: &Pubkey) -> Option { + rpc_client + .get_cluster_nodes() + .ok() + .unwrap_or_default() + .into_iter() + .find(|node| node.pubkey == identity.to_string()) +} + +fn get_validator_stats( + rpc_client: &RpcClient, + identity: &Pubkey, +) -> client_error::Result<(Slot, Slot, Slot, Slot, u64, Sol, String)> { + let finalized_slot = rpc_client.get_slot_with_commitment(CommitmentConfig::finalized())?; + let confirmed_slot = rpc_client.get_slot_with_commitment(CommitmentConfig::confirmed())?; + let processed_slot = rpc_client.get_slot_with_commitment(CommitmentConfig::processed())?; + let max_retransmit_slot = rpc_client.get_max_retransmit_slot()?; + let transaction_count = + rpc_client.get_transaction_count_with_commitment(CommitmentConfig::processed())?; + let identity_balance = rpc_client + .get_balance_with_commitment(identity, CommitmentConfig::confirmed())? + .value; + + let health = match rpc_client.get_health() { + Ok(()) => "ok".to_string(), + Err(err) => { + if let client_error::ClientErrorKind::RpcError( + rpc_request::RpcError::RpcResponseError { + data: + rpc_request::RpcResponseErrorData::NodeUnhealthy { + num_slots_behind: Some(num_slots_behind), + }, + .. + }, + ) = &err.kind + { + format!("{} slots behind", num_slots_behind) + } else { + "health unknown".to_string() + } + } + }; + + Ok(( + max_retransmit_slot, + processed_slot, + confirmed_slot, + finalized_slot, + transaction_count, + Sol(identity_balance), + health, + )) +} diff --git a/validator/src/lib.rs b/validator/src/lib.rs index 87450dd01d..1bff0650fc 100644 --- a/validator/src/lib.rs +++ b/validator/src/lib.rs @@ -1,10 +1,15 @@ #![allow(clippy::integer_arithmetic)] -pub use solana_core::test_validator; +pub use solana_core::{cluster_info::MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, test_validator}; use { + console::style, + indicatif::{ProgressDrawTarget, ProgressStyle}, log::*, std::{env, process::exit, thread::JoinHandle}, }; +pub mod admin_rpc_service; +pub mod dashboard; + #[cfg(unix)] fn redirect_stderr(filename: &str) { use std::{fs::OpenOptions, os::unix::io::AsRawFd}; @@ -60,14 +65,7 @@ pub fn redirect_stderr_to_file(logfile: Option) -> Option } }; - solana_logger::setup_with_default( - &[ - "solana=info,solana_runtime::message_processor=error", /* info logging for all solana modules */ - "rpc=trace", /* json_rpc request/response logging */ - ] - .join(","), - ); - + solana_logger::setup_with_default("solana=info"); logger_thread } @@ -76,3 +74,61 @@ pub fn port_validator(port: String) -> Result<(), String> { .map(|_| ()) .map_err(|e| format!("{:?}", e)) } + +pub fn port_range_validator(port_range: String) -> Result<(), String> { + if let Some((start, end)) = solana_net_utils::parse_port_range(&port_range) { + if end - start < MINIMUM_VALIDATOR_PORT_RANGE_WIDTH { + Err(format!( + "Port range is too small. Try --dynamic-port-range {}-{}", + start, + start + MINIMUM_VALIDATOR_PORT_RANGE_WIDTH + )) + } else { + Ok(()) + } + } else { + Err("Invalid port range".to_string()) + } +} + +/// Pretty print a "name value" +pub fn println_name_value(name: &str, value: &str) { + println!("{} {}", style(name).bold(), value); +} + +/// Creates a new process bar for processing that will take an unknown amount of time +pub fn new_spinner_progress_bar() -> ProgressBar { + let progress_bar = indicatif::ProgressBar::new(42); + progress_bar.set_draw_target(ProgressDrawTarget::stdout()); + progress_bar + .set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}")); + progress_bar.enable_steady_tick(100); + + ProgressBar { + progress_bar, + is_term: console::Term::stdout().is_term(), + } +} + +pub struct ProgressBar { + progress_bar: indicatif::ProgressBar, + is_term: bool, +} + +impl ProgressBar { + pub fn set_message(&self, msg: &str) { + if self.is_term { + self.progress_bar.set_message(msg); + } else { + println!("{}", msg); + } + } + + pub fn abandon_with_message(&self, msg: &str) { + if self.is_term { + self.progress_bar.abandon_with_message(msg); + } else { + println!("{}", msg); + } + } +} diff --git a/validator/src/main.rs b/validator/src/main.rs index 657f3891d1..7976ed44d3 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1,64 +1,79 @@ #![allow(clippy::integer_arithmetic)] -use clap::{ - crate_description, crate_name, value_t, value_t_or_exit, values_t, values_t_or_exit, App, - AppSettings, Arg, ArgMatches, SubCommand, -}; -use log::*; -use rand::{seq::SliceRandom, thread_rng, Rng}; -use solana_clap_utils::{ - input_parsers::{keypair_of, keypairs_of, pubkey_of, value_of}, - input_validators::{ - is_keypair_or_ask_keyword, is_parsable, is_pubkey, is_pubkey_or_keypair, is_slot, +use { + clap::{ + crate_description, crate_name, value_t, value_t_or_exit, values_t, values_t_or_exit, App, + AppSettings, Arg, ArgMatches, SubCommand, }, - keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, -}; -use solana_client::{rpc_client::RpcClient, rpc_request::MAX_MULTIPLE_ACCOUNTS}; -use solana_core::ledger_cleanup_service::{ - DEFAULT_MAX_LEDGER_SHREDS, DEFAULT_MIN_MAX_LEDGER_SHREDS, -}; -use solana_core::{ - cluster_info::{ClusterInfo, Node, MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, VALIDATOR_PORT_RANGE}, - contact_info::ContactInfo, - gossip_service::GossipService, - poh_service, - rpc::JsonRpcConfig, - rpc_pubsub_service::PubSubConfig, - tpu::DEFAULT_TPU_COALESCE_MS, - validator::{is_snapshot_config_invalid, Validator, ValidatorConfig}, -}; -use solana_download_utils::{download_genesis_if_missing, download_snapshot}; -use solana_ledger::blockstore_db::BlockstoreRecoveryMode; -use solana_perf::recycler::enable_recycler_warming; -use solana_runtime::{ - accounts_index::AccountIndex, - bank_forks::{ArchiveFormat, SnapshotConfig, SnapshotVersion}, - hardened_unpack::{unpack_genesis_archive, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, - snapshot_utils::get_highest_snapshot_archive_path, -}; -use solana_sdk::{ - clock::Slot, - commitment_config::CommitmentConfig, - genesis_config::GenesisConfig, - hash::Hash, - pubkey::Pubkey, - signature::{Keypair, Signer}, -}; -use std::{ - collections::HashSet, - env, - fs::{self, File}, - net::{IpAddr, SocketAddr, TcpListener, UdpSocket}, - path::{Path, PathBuf}, - process::exit, - str::FromStr, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, + console::style, + fd_lock::FdLock, + log::*, + rand::{seq::SliceRandom, thread_rng, Rng}, + solana_clap_utils::{ + input_parsers::{keypair_of, keypairs_of, pubkey_of, value_of}, + input_validators::{ + is_keypair, is_keypair_or_ask_keyword, is_parsable, is_pubkey, is_pubkey_or_keypair, + is_slot, + }, + keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, + }, + solana_client::{ + rpc_client::RpcClient, rpc_config::RpcLeaderScheduleConfig, + rpc_request::MAX_MULTIPLE_ACCOUNTS, + }, + solana_core::ledger_cleanup_service::{ + DEFAULT_MAX_LEDGER_SHREDS, DEFAULT_MIN_MAX_LEDGER_SHREDS, + }, + solana_core::{ + cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE}, + contact_info::ContactInfo, + gossip_service::GossipService, + poh_service, + rpc::JsonRpcConfig, + rpc_pubsub_service::PubSubConfig, + tpu::DEFAULT_TPU_COALESCE_MS, + validator::{ + is_snapshot_config_invalid, Validator, ValidatorConfig, ValidatorStartProgress, + }, + }, + solana_download_utils::{download_genesis_if_missing, download_snapshot}, + solana_ledger::blockstore_db::BlockstoreRecoveryMode, + solana_perf::recycler::enable_recycler_warming, + solana_runtime::{ + accounts_index::{ + AccountIndex, AccountSecondaryIndexes, AccountSecondaryIndexesIncludeExclude, + }, + bank_forks::{ArchiveFormat, SnapshotConfig, SnapshotVersion}, + hardened_unpack::{unpack_genesis_archive, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, + snapshot_utils::get_highest_snapshot_archive_path, + }, + solana_sdk::{ + clock::{Slot, DEFAULT_S_PER_SLOT}, + commitment_config::CommitmentConfig, + genesis_config::GenesisConfig, + hash::Hash, + pubkey::Pubkey, + signature::{Keypair, Signer}, + }, + std::{ + collections::{HashSet, VecDeque}, + env, + fs::{self, File}, + net::{IpAddr, SocketAddr, TcpListener, UdpSocket}, + path::{Path, PathBuf}, + process::exit, + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, + thread::sleep, + time::{Duration, Instant, SystemTime}, + }, + velas_validator::{ + admin_rpc_service, dashboard::Dashboard, new_spinner_progress_bar, println_name_value, + redirect_stderr_to_file, }, - thread::sleep, - time::{Duration, Instant}, }; -use velas_validator::redirect_stderr_to_file; #[derive(Debug, PartialEq)] enum Operation { @@ -66,20 +81,231 @@ enum Operation { Run, } -fn port_range_validator(port_range: String) -> Result<(), String> { - if let Some((start, end)) = solana_net_utils::parse_port_range(&port_range) { - if end - start < MINIMUM_VALIDATOR_PORT_RANGE_WIDTH { - Err(format!( - "Port range is too small. Try --dynamic-port-range {}-{}", - start, - start + MINIMUM_VALIDATOR_PORT_RANGE_WIDTH - )) - } else { - Ok(()) +const EXCLUDE_KEY: &str = "account-index-exclude-key"; +const INCLUDE_KEY: &str = "account-index-include-key"; + +fn monitor_validator(ledger_path: &Path) { + let dashboard = Dashboard::new(ledger_path, None, None).unwrap_or_else(|err| { + println!( + "Error: Unable to connect to validator at {}: {:?}", + ledger_path.display(), + err, + ); + exit(1); + }); + dashboard.run(Duration::from_secs(2)); +} + +fn wait_for_restart_window( + ledger_path: &Path, + min_idle_time_in_minutes: usize, +) -> Result<(), Box> { + let sleep_interval = Duration::from_secs(5); + let min_delinquency_percentage = 0.05; + + let min_idle_slots = (min_idle_time_in_minutes as f64 * 60. / DEFAULT_S_PER_SLOT) as Slot; + + let admin_client = admin_rpc_service::connect(&ledger_path); + let rpc_addr = admin_rpc_service::runtime() + .block_on(async move { admin_client.await?.rpc_addr().await }) + .map_err(|err| format!("Unable to get validator RPC address: {}", err))?; + + let rpc_client = match rpc_addr { + None => return Err("RPC not available".into()), + Some(rpc_addr) => RpcClient::new_socket(rpc_addr), + }; + + let identity = rpc_client.get_identity()?; + println_name_value("Identity:", &identity.to_string()); + println_name_value( + "Minimum Idle Time:", + &format!( + "{} slots (~{} minutes)", + min_idle_slots, min_idle_time_in_minutes + ), + ); + + let mut current_epoch = None; + let mut leader_schedule = VecDeque::new(); + let mut restart_snapshot = None; + let mut upcoming_idle_windows = vec![]; // Vec<(starting slot, idle window length in slots)> + + let progress_bar = new_spinner_progress_bar(); + let monitor_start_time = SystemTime::now(); + loop { + let snapshot_slot = rpc_client.get_snapshot_slot().ok(); + let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::processed())?; + let healthy = rpc_client.get_health().ok().is_some(); + let delinquent_stake_percentage = { + let vote_accounts = rpc_client.get_vote_accounts()?; + let current_stake: u64 = vote_accounts + .current + .iter() + .map(|va| va.activated_stake) + .sum(); + let delinquent_stake: u64 = vote_accounts + .delinquent + .iter() + .map(|va| va.activated_stake) + .sum(); + let total_stake = current_stake + delinquent_stake; + delinquent_stake as f64 / total_stake as f64 + }; + + if match current_epoch { + None => true, + Some(current_epoch) => current_epoch != epoch_info.epoch, + } { + progress_bar.set_message(&format!( + "Fetching leader schedule for epoch {}...", + epoch_info.epoch + )); + let first_slot_in_epoch = epoch_info.absolute_slot - epoch_info.slot_index; + leader_schedule = rpc_client + .get_leader_schedule_with_config( + Some(first_slot_in_epoch), + RpcLeaderScheduleConfig { + identity: Some(identity.to_string()), + ..RpcLeaderScheduleConfig::default() + }, + )? + .ok_or_else(|| { + format!( + "Unable to get leader schedule from slot {}", + first_slot_in_epoch + ) + })? + .get(&identity.to_string()) + .cloned() + .unwrap_or_default() + .into_iter() + .map(|slot_index| first_slot_in_epoch.saturating_add(slot_index as u64)) + .filter(|slot| *slot > epoch_info.absolute_slot) + .collect::>(); + + upcoming_idle_windows.clear(); + { + let mut leader_schedule = leader_schedule.clone(); + let mut max_idle_window = 0; + + let mut idle_window_start_slot = epoch_info.absolute_slot; + while let Some(next_leader_slot) = leader_schedule.pop_front() { + let idle_window = next_leader_slot - idle_window_start_slot; + max_idle_window = max_idle_window.max(idle_window); + if idle_window > min_idle_slots { + upcoming_idle_windows.push((idle_window_start_slot, idle_window)); + } + idle_window_start_slot = next_leader_slot; + } + if !leader_schedule.is_empty() && upcoming_idle_windows.is_empty() { + return Err(format!( + "Validator has no idle window of at least {} slots. Largest idle window for epoch {} is {} slots", + min_idle_slots, epoch_info.epoch, max_idle_window + ) + .into()); + } + } + + current_epoch = Some(epoch_info.epoch); } - } else { - Err("Invalid port range".to_string()) + + let status = { + if !healthy { + style("Node is unhealthy").red().to_string() + } else { + // Wait until a hole in the leader schedule before restarting the node + let in_leader_schedule_hole = if epoch_info.slot_index + min_idle_slots as u64 + > epoch_info.slots_in_epoch + { + Err("Current epoch is almost complete".to_string()) + } else { + while leader_schedule + .get(0) + .map(|slot| *slot < epoch_info.absolute_slot) + .unwrap_or(false) + { + leader_schedule.pop_front(); + } + while upcoming_idle_windows + .get(0) + .map(|(slot, _)| *slot < epoch_info.absolute_slot) + .unwrap_or(false) + { + upcoming_idle_windows.pop(); + } + + match leader_schedule.get(0) { + None => { + Ok(()) // Validator has no leader slots + } + Some(next_leader_slot) => { + let idle_slots = + next_leader_slot.saturating_sub(epoch_info.absolute_slot); + if idle_slots >= min_idle_slots { + Ok(()) + } else { + Err(match upcoming_idle_windows.get(0) { + Some((starting_slot, length_in_slots)) => { + format!( + "Next idle window in {} slots, for {} slots", + starting_slot.saturating_sub(epoch_info.absolute_slot), + length_in_slots + ) + } + None => format!( + "Validator will be leader soon. Next leader slot is {}", + next_leader_slot + ), + }) + } + } + } + }; + + match in_leader_schedule_hole { + Ok(_) => { + if restart_snapshot == None { + restart_snapshot = snapshot_slot; + } + + if restart_snapshot == snapshot_slot { + "Waiting for a new snapshot".to_string() + } else if delinquent_stake_percentage >= min_delinquency_percentage { + style("Delinquency too high").red().to_string() + } else { + break; // Restart! + } + } + Err(why) => style(why).yellow().to_string(), + } + } + }; + + progress_bar.set_message(&format!( + "{} | Processed Slot: {} | Snapshot Slot: {} | {:.2}% delinquent stake | {}", + { + let elapsed = + chrono::Duration::from_std(monitor_start_time.elapsed().unwrap()).unwrap(); + + format!( + "{:02}:{:02}:{:02}", + elapsed.num_hours(), + elapsed.num_minutes() % 60, + elapsed.num_seconds() % 60 + ) + }, + epoch_info.absolute_slot, + snapshot_slot + .map(|s| s.to_string()) + .unwrap_or_else(|| "-".to_string()), + delinquent_stake_percentage * 100., + status + )); + std::thread::sleep(sleep_interval); } + drop(progress_bar); + println!("{}", style("Ready to restart").green()); + Ok(()) } fn hash_validator(hash: String) -> Result<(), String> { @@ -575,7 +801,7 @@ fn rpc_bootstrap( ledger_path: &Path, snapshot_output_dir: &Path, vote_account: &Pubkey, - authorized_voter_keypairs: &[Arc], + authorized_voter_keypairs: Arc>>>, cluster_entrypoints: &[ContactInfo], validator_config: &mut ValidatorConfig, bootstrap_config: RpcBootstrapConfig, @@ -583,6 +809,7 @@ fn rpc_bootstrap( use_progress_bar: bool, maximum_local_snapshot_age: Slot, should_check_duplicate_instance: bool, + start_progress: &Arc>, ) { if !no_port_check { let mut order: Vec<_> = (0..cluster_entrypoints.len()).collect(); @@ -603,6 +830,8 @@ fn rpc_bootstrap( let mut gossip = None; loop { if gossip.is_none() { + *start_progress.write().unwrap() = ValidatorStartProgress::SearchingForRpcService; + gossip = Some(start_gossip_node( &identity_keypair, &cluster_entrypoints, @@ -706,6 +935,11 @@ fn rpc_bootstrap( .get_slot_with_commitment(CommitmentConfig::finalized()) .map_err(|err| format!("Failed to get RPC node slot: {}", err)) .and_then(|slot| { + *start_progress.write().unwrap() = + ValidatorStartProgress::DownloadingSnapshot { + slot: snapshot_hash.0, + rpc_addr: rpc_contact_info.rpc, + }; info!("RPC node root slot: {}", slot); let (cluster_info, gossip_exit_flag, gossip_service) = gossip.take().unwrap(); @@ -732,6 +966,8 @@ fn rpc_bootstrap( &identity_keypair.pubkey(), &vote_account, &authorized_voter_keypairs + .read() + .unwrap() .iter() .map(|k| k.pubkey()) .collect::>(), @@ -785,6 +1021,8 @@ pub fn main() { PubSubConfig::default().max_in_buffer_capacity.to_string(); let default_rpc_pubsub_max_out_buffer_capacity = PubSubConfig::default().max_out_buffer_capacity.to_string(); + let default_rpc_pubsub_max_active_subscriptions = + PubSubConfig::default().max_active_subscriptions.to_string(); let default_rpc_send_transaction_retry_ms = ValidatorConfig::default() .send_transaction_retry_ms .to_string(); @@ -796,6 +1034,7 @@ pub fn main() { let matches = App::new(crate_name!()).about(crate_description!()) .version(solana_version::version!()) .setting(AppSettings::VersionlessSubcommands) + .setting(AppSettings::InferSubcommands) .arg( Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name) .long(SKIP_SEED_PHRASE_VALIDATION_ARG.long) @@ -805,7 +1044,7 @@ pub fn main() { Arg::with_name("identity") .short("i") .long("identity") - .value_name("PATH") + .value_name("KEYPAIR") .takes_value(true) .validator(is_keypair_or_ask_keyword) .help("Validator identity keypair"), @@ -813,7 +1052,7 @@ pub fn main() { .arg( Arg::with_name("authorized_voter_keypairs") .long("authorized-voter") - .value_name("PATH") + .value_name("KEYPAIR") .takes_value(true) .validator(is_keypair_or_ask_keyword) .requires("vote_account") @@ -825,7 +1064,7 @@ pub fn main() { .arg( Arg::with_name("vote_account") .long("vote-account") - .value_name("PUBKEY") + .value_name("ADDRESS") .takes_value(true) .validator(is_pubkey_or_keypair) .requires("identity") @@ -914,7 +1153,13 @@ pub fn main() { .value_name("PORT") .takes_value(true) .validator(velas_validator::port_validator) - .help("Use this port for JSON RPC and the next port for the RPC websocket"), + .help("Enable JSON RPC on this port, and the next port for the RPC websocket"), + ) + .arg( + Arg::with_name("minimal_rpc_api") + .long("--minimal-rpc-api") + .takes_value(false) + .help("Only expose the RPC methods required to serve snapshots to other nodes"), ) .arg( Arg::with_name("private_rpc") @@ -928,20 +1173,6 @@ pub fn main() { .takes_value(false) .help("Do not perform TCP/UDP reachable port checks at start-up") ) - .arg( - Arg::with_name("enable_rpc_exit") - .long("enable-rpc-exit") - .takes_value(false) - .help("Enable the JSON RPC 'validatorExit' API. \ - Only enable in a debug environment"), - ) - .arg( - Arg::with_name("enable_rpc_set_log_filter") - .long("enable-rpc-set-log-filter") - .takes_value(false) - .help("Enable the JSON RPC 'setLogFilter' API. \ - Only enable in a debug environment"), - ) .arg( Arg::with_name("enable_rpc_transaction_history") .long("enable-rpc-transaction-history") @@ -1023,7 +1254,14 @@ pub fn main() { .long("snapshots") .value_name("DIR") .takes_value(true) - .help("Use DIR as persistent snapshot [default: --ledger value]"), + .help("Use DIR as snapshot location [default: --ledger value]"), + ) + .arg( + Arg::with_name("tower") + .long("tower") + .value_name("DIR") + .takes_value(true) + .help("Use DIR as tower location [default: --ledger value]"), ) .arg( Arg::with_name("gossip_port") @@ -1059,7 +1297,7 @@ pub fn main() { .value_name("MIN_PORT-MAX_PORT") .takes_value(true) .default_value(default_dynamic_port_range) - .validator(port_range_validator) + .validator(velas_validator::port_range_validator) .help("Range to use for dynamically assigned ports"), ) .arg( @@ -1182,6 +1420,14 @@ pub fn main() { .help("After processing the ledger and the next slot is SLOT, wait until a \ supermajority of stake is visible on gossip before starting PoH"), ) + .arg( + Arg::with_name("no_wait_for_vote_to_start_leader") + .hidden(true) + .long("no-wait-for-vote-to-start-leader") + .help("If the validator starts up with no ledger, it will wait to start block + production until it sees a vote land in a rooted slot. This prevents + double signing. Turn off to risk double signing a block."), + ) .arg( Arg::with_name("hard_forks") .long("hard-fork") @@ -1195,7 +1441,7 @@ pub fn main() { Arg::with_name("trusted_validators") .long("trusted-validator") .validator(is_pubkey) - .value_name("PUBKEY") + .value_name("VALIDATOR IDENTITY") .multiple(true) .takes_value(true) .help("A snapshot hash must be published in gossip by this validator to be accepted. \ @@ -1205,7 +1451,7 @@ pub fn main() { Arg::with_name("debug_key") .long("debug-key") .validator(is_pubkey) - .value_name("PUBKEY") + .value_name("ADDRESS") .multiple(true) .takes_value(true) .help("Log when transactions are processed which reference a given key."), @@ -1220,7 +1466,7 @@ pub fn main() { Arg::with_name("repair_validators") .long("repair-validator") .validator(is_pubkey) - .value_name("PUBKEY") + .value_name("VALIDATOR IDENTITY") .multiple(true) .takes_value(true) .help("A list of validators to request repairs from. If specified, repair will not \ @@ -1230,7 +1476,7 @@ pub fn main() { Arg::with_name("gossip_validators") .long("gossip-validator") .validator(is_pubkey) - .value_name("PUBKEY") + .value_name("VALIDATOR IDENTITY") .multiple(true) .takes_value(true) .help("A list of validators to gossip with. If specified, gossip \ @@ -1241,7 +1487,29 @@ pub fn main() { Arg::with_name("no_rocksdb_compaction") .long("no-rocksdb-compaction") .takes_value(false) - .help("Disable manual compaction of the ledger database. May increase storage requirements.") + .help("Disable manual compaction of the ledger database (this is ignored).") + ) + .arg( + Arg::with_name("rocksdb_compaction_interval") + .long("rocksdb-compaction-interval-slots") + .value_name("ROCKSDB_COMPACTION_INTERVAL_SLOTS") + .takes_value(true) + .help("Number of slots between compacting ledger"), + ) + .arg( + Arg::with_name("tpu_coalesce_ms") + .long("tpu-coalesce-ms") + .value_name("MILLISECS") + .takes_value(true) + .validator(is_parsable::) + .help("Milliseconds to wait in the TPU receiver for packet coalescing."), + ) + .arg( + Arg::with_name("rocksdb_max_compaction_jitter") + .long("rocksdb-max-compaction-jitter-slots") + .value_name("ROCKSDB_MAX_COMPACTION_JITTER_SLOTS") + .takes_value(true) + .help("Introduce jitter into the compaction to offset compaction operation"), ) .arg( Arg::with_name("rocksdb_compaction_interval") @@ -1345,6 +1613,16 @@ pub fn main() { .default_value(&default_rpc_pubsub_max_out_buffer_capacity) .help("The maximum size in bytes to which the outgoing websocket buffer can grow."), ) + .arg( + Arg::with_name("rpc_pubsub_max_active_subscriptions") + .long("rpc-pubsub-max-active-subscriptions") + .takes_value(true) + .value_name("NUMBER") + .validator(is_parsable::) + .default_value(&default_rpc_pubsub_max_active_subscriptions) + .help("The maximum number of active subscriptions that RPC PubSub will accept \ + across all connections."), + ) .arg( Arg::with_name("rpc_send_transaction_retry_ms") .long("rpc-send-retry-ms") @@ -1363,6 +1641,13 @@ pub fn main() { .default_value(&default_rpc_send_transaction_leader_forward_count) .help("The number of upcoming leaders to which to forward transactions sent via rpc service."), ) + .arg( + Arg::with_name("rpc_scan_and_fix_roots") + .long("rpc-scan-and-fix-roots") + .takes_value(false) + .requires("enable_rpc_transaction_history") + .help("Verifies blockstore roots on boot and fixes any gaps"), + ) .arg( Arg::with_name("halt_on_trusted_validators_accounts_hash_mismatch") .long("halt-on-trusted-validators-accounts-hash-mismatch") @@ -1416,10 +1701,200 @@ pub fn main() { ), ) .arg( + Arg::with_name("no_bpf_jit") + .long("no-bpf-jit") + .takes_value(false) + .help("Disable the just-in-time compiler and instead use the interpreter for BPF"), + ) + .arg( + // legacy nop argument Arg::with_name("bpf_jit") .long("bpf-jit") + .hidden(true) + .takes_value(false) + .conflicts_with("no_bpf_jit") + ) + .arg( + Arg::with_name("poh_pinned_cpu_core") + .hidden(true) + .long("experimental-poh-pinned-cpu-core") + .takes_value(true) + .value_name("CPU_CORE_INDEX") + .validator(|s| { + let core_index = usize::from_str(&s).map_err(|e| e.to_string())?; + let max_index = core_affinity::get_core_ids().map(|cids| cids.len() - 1).unwrap_or(0); + if core_index > max_index { + return Err(format!("core index must be in the range [0, {}]", max_index)); + } + Ok(()) + }) + .help("EXPERIMENTAL: Specify which CPU core PoH is pinned to"), + ) + .arg( + Arg::with_name("poh_hashes_per_batch") + .hidden(true) + .long("poh-hashes-per-batch") + .takes_value(true) + .value_name("NUM") + .help("Specify hashes per batch in PoH service"), + ) + .arg( + Arg::with_name("account_indexes") + .long("account-index") + .takes_value(true) + .multiple(true) + .possible_values(&["program-id", "spl-token-owner", "spl-token-mint"]) + .value_name("INDEX") + .help("Enable an accounts index, indexed by the selected account field"), + ) + .arg( + Arg::with_name("account_index_exclude_key") + .long(EXCLUDE_KEY) + .takes_value(true) + .validator(is_pubkey) + .multiple(true) + .value_name("KEY") + .help("When account indexes are enabled, exclude this key from the index."), + ) + .arg( + Arg::with_name("account_index_include_key") + .long(INCLUDE_KEY) + .takes_value(true) + .validator(is_pubkey) + .conflicts_with("account_index_exclude_key") + .multiple(true) + .value_name("KEY") + .help("When account indexes are enabled, only include specific keys in the index. This overrides --account-index-exclude-key."), + ) + .arg( + Arg::with_name("no_accounts_db_caching") + .long("no-accounts-db-caching") + .help("Disables accounts caching"), + ) + .arg( + Arg::with_name("accounts_db_test_hash_calculation") + .long("accounts-db-test-hash-calculation") + .help("Enables testing of hash calculation using stores in \ + AccountsHashVerifier. This has a computational cost."), + ) + .arg( + Arg::with_name("accounts_db_index_hashing") + .long("accounts-db-index-hashing") + .help("Enables the use of the index in hash calculation in \ + AccountsHashVerifier/Accounts Background Service."), + ) + .arg( + Arg::with_name("no_accounts_db_index_hashing") + .long("no-accounts-db-index-hashing") + .help("This is obsolete. See --accounts-db-index-hashing. \ + Disables the use of the index in hash calculation in \ + AccountsHashVerifier/Accounts Background Service."), + ) + .arg( + // legacy nop argument + Arg::with_name("accounts_db_caching_enabled") + .long("accounts-db-caching-enabled") + .conflicts_with("no_accounts_db_caching") + .hidden(true) + ) + .arg( + Arg::with_name("no_duplicate_instance_check") + .long("no-duplicate-instance-check") .takes_value(false) - .help("Use the just-in-time compiler instead of the interpreter for BPF."), + .help("Disables duplicate instance check") + .hidden(true), + ) + .after_help("The default subcommand is run") + .subcommand( + SubCommand::with_name("exit") + .about("Send an exit request to the validator") + .arg( + Arg::with_name("force") + .short("f") + .long("force") + .takes_value(false) + .help("Request the validator exit immediately instead of waiting for a restart window") + ) + .arg( + Arg::with_name("monitor") + .short("m") + .long("monitor") + .takes_value(false) + .help("Monitor the validator after sending the exit request") + ) + .arg( + Arg::with_name("min_idle_time") + .takes_value(true) + .long("min-idle-time") + .value_name("MINUTES") + .validator(is_parsable::) + .default_value("10") + .help("Minimum time that the validator should not be leader before restarting") + ) + ) + .subcommand( + SubCommand::with_name("authorized-voter") + .about("Adjust the validator authorized voters") + .setting(AppSettings::SubcommandRequiredElseHelp) + .setting(AppSettings::InferSubcommands) + .subcommand( + SubCommand::with_name("add") + .about("Add an authorized voter") + .arg( + Arg::with_name("authorized_voter_keypair") + .index(1) + .value_name("KEYPAIR") + .takes_value(true) + .validator(is_keypair) + .help("Keypair of the authorized voter to add"), + ) + .after_help("Note: the new authorized voter only applies to the \ + currently running validator instance") + ) + .subcommand( + SubCommand::with_name("remove-all") + .about("Remove all authorized voters") + .after_help("Note: the removal only applies to the \ + currently running validator instance") + ) + ) + .subcommand( + SubCommand::with_name("init") + .about("Initialize the ledger directory then exit") + ) + .subcommand( + SubCommand::with_name("monitor") + .about("Monitor the validator") + ) + .subcommand( + SubCommand::with_name("run") + .about("Run the validator") + ) + .subcommand( + SubCommand::with_name("set-log-filter") + .about("Adjust the validator log filter") + .arg( + Arg::with_name("filter") + .takes_value(true) + .index(1) + .help("New filter using the same format as the RUST_LOG environment variable") + ) + .after_help("Note: the new filter only applies to the currently running validator instance") + ) + .subcommand( + SubCommand::with_name("wait-for-restart-window") + .about("Monitor the validator for a good time to restart") + .arg( + Arg::with_name("min_idle_time") + .takes_value(true) + .index(1) + .validator(is_parsable::) + .value_name("MINUTES") + .default_value("10") + .help("Minimum time that the validator should not be leader before restarting") + ) + .after_help("Note: If this command exits with a non-zero status \ + then this not a good time for a restart") ) .arg( Arg::with_name("poh_pinned_cpu_core") @@ -1501,19 +1976,127 @@ pub fn main() { ) .get_matches(); - let operation = match matches.subcommand().0 { - "" | "run" => Operation::Run, - "init" => Operation::Initialize, + let ledger_path = PathBuf::from(matches.value_of("ledger_path").unwrap()); + + let operation = match matches.subcommand() { + ("", _) | ("run", _) => Operation::Run, + ("authorized-voter", Some(authorized_voter_subcommand_matches)) => { + match authorized_voter_subcommand_matches.subcommand() { + ("add", Some(subcommand_matches)) => { + let authorized_voter_keypair = + value_t_or_exit!(subcommand_matches, "authorized_voter_keypair", String); + + let authorized_voter_keypair = fs::canonicalize(&authorized_voter_keypair) + .unwrap_or_else(|err| { + println!( + "Unable to access path: {}: {:?}", + authorized_voter_keypair, err + ); + exit(1); + }); + println!( + "Adding authorized voter: {}", + authorized_voter_keypair.display() + ); + + let admin_client = admin_rpc_service::connect(&ledger_path); + admin_rpc_service::runtime() + .block_on(async move { + admin_client + .await? + .add_authorized_voter( + authorized_voter_keypair.display().to_string(), + ) + .await + }) + .unwrap_or_else(|err| { + println!("addAuthorizedVoter request failed: {}", err); + exit(1); + }); + return; + } + ("remove-all", _) => { + let admin_client = admin_rpc_service::connect(&ledger_path); + admin_rpc_service::runtime() + .block_on(async move { + admin_client.await?.remove_all_authorized_voters().await + }) + .unwrap_or_else(|err| { + println!("removeAllAuthorizedVoters request failed: {}", err); + exit(1); + }); + println!("All authorized voters removed"); + return; + } + _ => unreachable!(), + } + } + ("init", _) => Operation::Initialize, + ("exit", Some(subcommand_matches)) => { + let min_idle_time = value_t_or_exit!(subcommand_matches, "min_idle_time", usize); + let force = subcommand_matches.is_present("force"); + let monitor = subcommand_matches.is_present("monitor"); + + if !force { + wait_for_restart_window(&ledger_path, min_idle_time).unwrap_or_else(|err| { + println!("{}", err); + exit(1); + }); + } + + let admin_client = admin_rpc_service::connect(&ledger_path); + admin_rpc_service::runtime() + .block_on(async move { admin_client.await?.exit().await }) + .unwrap_or_else(|err| { + println!("exit request failed: {}", err); + exit(1); + }); + println!("Exit request sent"); + + if monitor { + monitor_validator(&ledger_path); + } + return; + } + ("monitor", _) => { + monitor_validator(&ledger_path); + return; + } + ("set-log-filter", Some(subcommand_matches)) => { + let filter = value_t_or_exit!(subcommand_matches, "filter", String); + let admin_client = admin_rpc_service::connect(&ledger_path); + admin_rpc_service::runtime() + .block_on(async move { admin_client.await?.set_log_filter(filter).await }) + .unwrap_or_else(|err| { + println!("set log filter failed: {}", err); + exit(1); + }); + return; + } + ("wait-for-restart-window", Some(subcommand_matches)) => { + let min_idle_time = value_t_or_exit!(subcommand_matches, "min_idle_time", usize); + wait_for_restart_window(&ledger_path, min_idle_time).unwrap_or_else(|err| { + println!("{}", err); + exit(1); + }); + return; + } _ => unreachable!(), }; - let identity_keypair = Arc::new(keypair_of(&matches, "identity").unwrap_or_else(Keypair::new)); + let identity_keypair = Arc::new(keypair_of(&matches, "identity").unwrap_or_else(|| { + clap::Error::with_description( + "The --identity argument is required", + clap::ErrorKind::ArgumentNotFound, + ) + .exit(); + })); let authorized_voter_keypairs = keypairs_of(&matches, "authorized_voter_keypairs") .map(|keypairs| keypairs.into_iter().map(Arc::new).collect()) .unwrap_or_else(|| vec![identity_keypair.clone()]); + let authorized_voter_keypairs = Arc::new(RwLock::new(authorized_voter_keypairs)); - let ledger_path = PathBuf::from(matches.value_of("ledger_path").unwrap()); let init_complete_file = matches.value_of("init_complete_file"); let rpc_bootstrap_config = RpcBootstrapConfig { @@ -1587,23 +2170,12 @@ pub fn main() { let contact_debug_interval = value_t_or_exit!(matches, "contact_debug_interval", u64); - let account_indexes: HashSet = matches - .values_of("account_indexes") - .unwrap_or_default() - .map(|value| match value { - "program-id" => AccountIndex::ProgramId, - "spl-token-mint" => AccountIndex::SplTokenMint, - "spl-token-owner" => AccountIndex::SplTokenOwner, - "velas-accounts-storages" => AccountIndex::VelasAccountStorage, - "velas-accounts-owners" => AccountIndex::VelasAccountOwner, - "velas-accounts-operationals" => AccountIndex::VelasAccountOperational, - unexpected => panic!("Unable to handle 'account_indexes' flag {}", unexpected), - }) - .collect(); + let account_indexes = process_account_indexes(&matches); let restricted_repair_only_mode = matches.is_present("restricted_repair_only_mode"); let mut validator_config = ValidatorConfig { require_tower: matches.is_present("require_tower"), + tower_path: value_t!(matches, "tower", PathBuf).ok(), dev_halt_at_slot: value_t!(matches, "dev_halt_at_slot", Slot).ok(), cuda: matches.is_present("cuda"), expected_genesis_hash: matches @@ -1615,8 +2187,6 @@ pub fn main() { expected_shred_version: value_t!(matches, "expected_shred_version", u16).ok(), new_hard_forks: hardforks_of(&matches, "hard_forks"), rpc_config: JsonRpcConfig { - enable_validator_exit: matches.is_present("enable_rpc_exit"), - enable_set_log_filter: matches.is_present("enable_rpc_set_log_filter"), enable_rpc_transaction_history: matches.is_present("enable_rpc_transaction_history"), enable_cpi_and_log_storage: matches.is_present("enable_cpi_and_log_storage"), enable_bigtable_ledger_storage: matches @@ -1626,6 +2196,7 @@ pub fn main() { faucet_addr: matches.value_of("rpc_faucet_addr").map(|address| { solana_net_utils::parse_host_port(address).expect("failed to parse faucet address") }), + minimal_api: matches.is_present("minimal_rpc_api"), max_multiple_accounts: Some(value_t_or_exit!( matches, "rpc_max_multiple_accounts", @@ -1641,6 +2212,7 @@ pub fn main() { .ok() .map(Duration::from_secs), account_indexes: account_indexes.clone(), + rpc_scan_and_fix_roots: matches.is_present("rpc_scan_and_fix_roots"), }, rpc_addrs: value_t!(matches, "rpc_port", u16).ok().map(|rpc_port| { ( @@ -1665,6 +2237,11 @@ pub fn main() { "rpc_pubsub_max_out_buffer_capacity", usize ), + max_active_subscriptions: value_t_or_exit!( + matches, + "rpc_pubsub_max_active_subscriptions", + usize + ), }, voting_disabled: matches.is_present("no_voting") || restricted_repair_only_mode, wait_for_supermajority: value_t!(matches, "wait_for_supermajority", Slot).ok(), @@ -1679,7 +2256,7 @@ pub fn main() { poh_verify: !matches.is_present("skip_poh_verify"), debug_keys, contact_debug_interval, - bpf_jit: matches.is_present("bpf_jit"), + bpf_jit: !matches.is_present("no_bpf_jit"), send_transaction_retry_ms: value_t_or_exit!(matches, "rpc_send_transaction_retry_ms", u64), send_transaction_leader_forward_count: value_t_or_exit!( matches, @@ -1694,8 +2271,9 @@ pub fn main() { account_indexes, accounts_db_caching_enabled: !matches.is_present("no_accounts_db_caching"), accounts_db_test_hash_calculation: matches.is_present("accounts_db_test_hash_calculation"), - accounts_db_use_index_hash_calculation: !matches.is_present("no_accounts_db_index_hashing"), + accounts_db_use_index_hash_calculation: matches.is_present("accounts_db_index_hashing"), tpu_coalesce_ms, + no_wait_for_vote_to_start_leader: matches.is_present("no_wait_for_vote_to_start_leader"), ..ValidatorConfig::default() }; @@ -1867,6 +2445,15 @@ pub fn main() { }) }); + let mut ledger_fd_lock = FdLock::new(fs::File::open(&ledger_path).unwrap()); + let _ledger_lock = ledger_fd_lock.try_lock().unwrap_or_else(|_| { + println!( + "Error: Unable to lock {} directory. Check if another validator is running", + ledger_path.display() + ); + exit(1); + }); + let logfile = { let logfile = matches .value_of("logfile") @@ -1886,6 +2473,18 @@ pub fn main() { info!("{} {}", crate_name!(), solana_version::version!()); info!("Starting validator with: {:#?}", std::env::args_os()); + let start_progress = Arc::new(RwLock::new(ValidatorStartProgress::default())); + admin_rpc_service::run( + &ledger_path, + admin_rpc_service::AdminRpcRequestMetadata { + rpc_addr: validator_config.rpc_addrs.map(|(rpc_addr, _)| rpc_addr), + start_time: std::time::SystemTime::now(), + validator_exit: validator_config.validator_exit.clone(), + start_progress: start_progress.clone(), + authorized_voter_keypairs: authorized_voter_keypairs.clone(), + }, + ); + let gossip_host: IpAddr = matches .value_of("gossip_host") .map(|gossip_host| { @@ -1993,7 +2592,7 @@ pub fn main() { &ledger_path, &snapshot_output_dir, &vote_account, - &authorized_voter_keypairs, + authorized_voter_keypairs.clone(), &cluster_entrypoints, &mut validator_config, rpc_bootstrap_config, @@ -2001,13 +2600,16 @@ pub fn main() { use_progress_bar, maximum_local_snapshot_age, should_check_duplicate_instance, + &start_progress, ); + *start_progress.write().unwrap() = ValidatorStartProgress::Initializing; } if operation == Operation::Initialize { info!("Validator ledger initialization complete"); return; } + let validator = Validator::new( node, &identity_keypair, @@ -2017,6 +2619,7 @@ pub fn main() { cluster_entrypoints, &validator_config, should_check_duplicate_instance, + start_progress, ); if let Some(filename) = init_complete_file { @@ -2029,3 +2632,52 @@ pub fn main() { validator.join(); info!("Validator exiting.."); } + +fn process_account_indexes(matches: &ArgMatches) -> AccountSecondaryIndexes { + let account_indexes: HashSet = matches + .values_of("account_indexes") + .unwrap_or_default() + .map(|value| match value { + "program-id" => AccountIndex::ProgramId, + "spl-token-mint" => AccountIndex::SplTokenMint, + "spl-token-owner" => AccountIndex::SplTokenOwner, + _ => unreachable!(), + }) + .collect(); + + let account_indexes_include_keys: HashSet = + values_t!(matches, "account_index_include_key", Pubkey) + .unwrap_or_default() + .iter() + .cloned() + .collect(); + + let account_indexes_exclude_keys: HashSet = + values_t!(matches, "account_index_exclude_key", Pubkey) + .unwrap_or_default() + .iter() + .cloned() + .collect(); + + let exclude_keys = !account_indexes_exclude_keys.is_empty(); + let include_keys = !account_indexes_include_keys.is_empty(); + + let keys = if !account_indexes.is_empty() && (exclude_keys || include_keys) { + let account_indexes_keys = AccountSecondaryIndexesIncludeExclude { + exclude: exclude_keys, + keys: if exclude_keys { + account_indexes_exclude_keys + } else { + account_indexes_include_keys + }, + }; + Some(account_indexes_keys) + } else { + None + }; + + AccountSecondaryIndexes { + keys, + indexes: account_indexes, + } +} diff --git a/version/Cargo.toml b/version/Cargo.toml index abedfb6102..34cad06922 100644 --- a/version/Cargo.toml +++ b/version/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-version" -version = "0.3.6" +version = "1.6.14" # TODO: velas binaries version description = "Solana Version" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,12 +11,12 @@ edition = "2018" [dependencies] log = "0.4.11" -serde = "1.0.118" +serde = "1.0.122" serde_derive = "1.0.103" -solana-frozen-abi = { path = "../frozen-abi", version = "=1.5.19" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.6.14" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } [lib] name = "solana_version" diff --git a/watchtower/Cargo.toml b/watchtower/Cargo.toml index 2e9efbbc27..aa8c45333c 100644 --- a/watchtower/Cargo.toml +++ b/watchtower/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2018" name = "solana-watchtower" description = "Blockchain, Rebuilt for Scale" -version = "1.5.19" +version = "1.6.14" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,15 +13,15 @@ documentation = "https://docs.rs/solana-watchtower" clap = "2.33.1" log = "0.4.11" humantime = "2.0.1" -solana-clap-utils = { path = "../clap-utils", version = "=1.5.19" } -solana-cli-config = { path = "../cli-config", version = "=1.5.19" } -solana-cli-output = { path = "../cli-output", version = "=1.5.19" } -solana-client = { path = "../client", version = "=1.5.19" } -solana-logger = { path = "../logger", version = "=1.5.19" } -solana-metrics = { path = "../metrics", version = "=1.5.19" } -solana-notifier = { path = "../notifier", version = "=1.5.19" } -solana-sdk = { path = "../sdk", version = "=1.5.19" } -solana-version = { path = "../version" } +solana-clap-utils = { path = "../clap-utils", version = "=1.6.14" } +solana-cli-config = { path = "../cli-config", version = "=1.6.14" } +solana-cli-output = { path = "../cli-output", version = "=1.6.14" } +solana-client = { path = "../client", version = "=1.6.14" } +solana-logger = { path = "../logger", version = "=1.6.14" } +solana-metrics = { path = "../metrics", version = "=1.6.14" } +solana-notifier = { path = "../notifier", version = "=1.6.14" } +solana-sdk = { path = "../sdk", version = "=1.6.14" } +solana-version = { path = "../version", version = "=1.6.14" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"]