diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ef3bbb57bb3f2..666e2c7cfa24b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ Release channels have their own copy of this changelog: * CLI: * Add global `--skip-preflight` option for skipping preflight checks on all transactions sent through RPC. This flag, along with `--use-rpc`, can improve success rate with program deployments using the public RPC nodes. * Unhide `--accounts-db-access-storages-method` for agave-validator and agave-ledger-tool + * Remove tracer stats from banking-trace. `banking-trace` directory should be cleared when restarting on v2.2 for first time. It will not break if not cleared, but the file will be a mix of new/old format. (#4043) ## [2.1.0] * Breaking: diff --git a/Cargo.lock b/Cargo.lock index 731478914764d2..febc4748f09899 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -114,7 +114,7 @@ dependencies = [ "log", "solana-sdk", "solana-transaction-status", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -135,7 +135,7 @@ dependencies = [ "nix", "reqwest", "scopeguard", - "semver 1.0.23", + "semver 1.0.24", "serde", "serde_derive", "serde_yaml 0.8.26", @@ -173,6 +173,7 @@ dependencies = [ "rayon", "regex", "serde", + "serde_bytes", "serde_derive", "serde_json", "signal-hook", @@ -196,6 +197,7 @@ dependencies = [ "solana-rpc", "solana-runtime", "solana-runtime-transaction", + "solana-sbpf", "solana-sdk", "solana-stake-program", "solana-storage-bigtable", @@ -205,8 +207,7 @@ dependencies = [ "solana-unified-scheduler-pool", "solana-version", "solana-vote-program", - "solana_rbpf", - "thiserror 2.0.6", + "thiserror 2.0.9", "tikv-jemallocator", "tokio", ] @@ -309,7 +310,7 @@ dependencies = [ "spl-token-2022", "symlink", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", "tikv-jemallocator", "tokio", ] @@ -436,9 +437,9 @@ checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "aquamarine" @@ -740,7 +741,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -893,23 +894,23 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] name = "bit-set" -version = "0.5.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e11e16035ea35e4e5997b393eacbf6f63983188f7a2ad25bfb13465f5ad59de" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" dependencies = [ "bit-vec", ] [[package]] name = "bit-vec" -version = "0.6.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" [[package]] name = "bitflags" @@ -1043,7 +1044,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1159,22 +1160,22 @@ checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" [[package]] name = "bytemuck" -version = "1.20.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" +checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1252,7 +1253,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.23", + "semver 1.0.24", "serde", "serde_json", "thiserror 1.0.69", @@ -1325,7 +1326,7 @@ checksum = "45565fc9416b9896014f5732ac776f810ee53a66730c17e4020c3ec064a8f88f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1513,15 +1514,15 @@ dependencies = [ [[package]] name = "console" -version = "0.15.8" +version = "0.15.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" +checksum = "ea3c6ecd8059b57859df5c69830340ed3c41d30e3da0c1cbed90a96ac853041b" dependencies = [ "encode_unicode", - "lazy_static", "libc", - "unicode-width 0.1.9", - "windows-sys 0.52.0", + "once_cell", + "unicode-width 0.2.0", + "windows-sys 0.59.0", ] [[package]] @@ -1673,9 +1674,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] @@ -1818,7 +1819,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1842,7 +1843,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1853,7 +1854,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1915,7 +1916,7 @@ checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1926,7 +1927,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2050,7 +2051,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2126,9 +2127,9 @@ checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "encode_unicode" -version = "0.3.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" [[package]] name = "encoding_rs" @@ -2156,7 +2157,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2468,7 +2469,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2891,9 +2892,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.31" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ "bytes", "futures-channel", @@ -3098,7 +3099,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -3491,9 +3492,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.168" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libloading" @@ -3954,7 +3955,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -3996,7 +3997,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", - "libm", ] [[package]] @@ -4027,7 +4027,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -4555,9 +4555,9 @@ dependencies = [ [[package]] name = "proptest" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" dependencies = [ "bit-set", "bit-vec", @@ -4661,7 +4661,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -4696,9 +4696,9 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.19", + "rustls 0.23.20", "socket2 0.5.8", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -4714,11 +4714,11 @@ dependencies = [ "rand 0.8.5", "ring 0.17.3", "rustc-hash 2.0.0", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-pki-types", "rustls-platform-verifier", "slab", - "thiserror 2.0.6", + "thiserror 2.0.9", "tinyvec", "tracing", "web-time", @@ -5162,7 +5162,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.23", + "semver 1.0.24", ] [[package]] @@ -5201,9 +5201,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.19" +version = "0.23.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" +checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" dependencies = [ "once_cell", "ring 0.17.3", @@ -5265,7 +5265,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -5357,12 +5357,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "scroll" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" - [[package]] name = "sct" version = "0.7.0" @@ -5408,9 +5402,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" dependencies = [ "serde", ] @@ -5435,9 +5429,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" dependencies = [ "serde_derive", ] @@ -5462,13 +5456,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -5524,7 +5518,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -5574,7 +5568,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -5854,7 +5848,7 @@ dependencies = [ "spl-token-2022", "spl-token-group-interface", "spl-token-metadata-interface", - "thiserror 2.0.6", + "thiserror 2.0.9", "zstd", ] @@ -5987,7 +5981,7 @@ dependencies = [ "tar", "tempfile", "test-case", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6004,7 +5998,7 @@ dependencies = [ "solana-program", "solana-program-runtime", "solana-sdk", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6062,7 +6056,7 @@ dependencies = [ "solana-runtime", "solana-sdk", "tarpc", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-serde", ] @@ -6152,7 +6146,23 @@ dependencies = [ "solana-version", "spl-instruction-padding", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", +] + +[[package]] +name = "solana-bench-vote" +version = "2.2.0" +dependencies = [ + "bincode", + "clap 3.2.23", + "crossbeam-channel", + "solana-client", + "solana-connection-cache", + "solana-net-utils", + "solana-sdk", + "solana-streamer", + "solana-version", + "solana-vote-program", ] [[package]] @@ -6200,7 +6210,7 @@ dependencies = [ "serde_derive", "serde_json", "solana-define-syscall", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6232,12 +6242,12 @@ dependencies = [ "solana-poseidon", "solana-program-memory", "solana-program-runtime", + "solana-sbpf", "solana-sdk", "solana-timings", "solana-type-overrides", - "solana_rbpf", "test-case", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6312,6 +6322,7 @@ dependencies = [ "solana-stake-program", "solana-system-program", "solana-vote-program", + "static_assertions", ] [[package]] @@ -6327,7 +6338,7 @@ dependencies = [ "predicates", "regex", "reqwest", - "semver 1.0.23", + "semver 1.0.24", "serial_test", "solana-file-download", "solana-keypair", @@ -6370,7 +6381,7 @@ dependencies = [ "solana-signature", "solana-signer", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", "tiny-bip39", "uriparse", "url 2.5.4", @@ -6401,7 +6412,7 @@ dependencies = [ "solana-signer", "solana-zk-token-sdk", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", "tiny-bip39", "uriparse", "url 2.5.4", @@ -6426,7 +6437,7 @@ dependencies = [ "num-traits", "pretty-hex", "reqwest", - "semver 1.0.23", + "semver 1.0.24", "serde", "serde_derive", "serde_json", @@ -6452,6 +6463,7 @@ dependencies = [ "solana-rpc-client", "solana-rpc-client-api", "solana-rpc-client-nonce-utils", + "solana-sbpf", "solana-sdk", "solana-streamer", "solana-test-validator", @@ -6461,11 +6473,10 @@ dependencies = [ "solana-udp-client", "solana-version", "solana-vote-program", - "solana_rbpf", "spl-memo", "tempfile", "test-case", - "thiserror 2.0.6", + "thiserror 2.0.9", "tiny-bip39", ] @@ -6497,7 +6508,7 @@ dependencies = [ "humantime", "indicatif", "pretty-hex", - "semver 1.0.23", + "semver 1.0.24", "serde", "serde_json", "solana-account-decoder", @@ -6552,7 +6563,7 @@ dependencies = [ "solana-transaction", "solana-transaction-error", "solana-udp-client", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -6659,7 +6670,7 @@ dependencies = [ "solana-pubkey", "solana-sdk", "solana-svm-transaction", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6690,8 +6701,11 @@ dependencies = [ "criterion", "solana-compute-budget", "solana-compute-budget-instruction", + "solana-compute-budget-interface", "solana-compute-budget-program", - "solana-sdk", + "solana-feature-set", + "solana-message", + "solana-sdk-ids", "solana-svm-transaction", ] @@ -6731,7 +6745,7 @@ dependencies = [ "solana-net-utils", "solana-time-utils", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -6767,7 +6781,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustls 0.23.19", + "rustls 0.23.20", "serde", "serde_bytes", "serde_derive", @@ -6821,6 +6835,7 @@ dependencies = [ "solana-tpu-client", "solana-transaction-status", "solana-turbine", + "solana-unified-scheduler-logic", "solana-unified-scheduler-pool", "solana-version", "solana-vote", @@ -6835,7 +6850,7 @@ dependencies = [ "systemstat", "tempfile", "test-case", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "trees", ] @@ -6892,7 +6907,7 @@ dependencies = [ "bytemuck_derive", "curve25519-dalek 4.1.3", "solana-define-syscall", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -7101,7 +7116,7 @@ dependencies = [ "solana-transaction", "solana-version", "spl-memo", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -7196,7 +7211,7 @@ dependencies = [ "sha2 0.10.8", "solana-frozen-abi-macro", "solana-logger", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -7205,7 +7220,7 @@ version = "2.2.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -7267,7 +7282,7 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-transaction-status", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -7294,9 +7309,11 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "serde", + "serde-big-array", "serde_bytes", "serde_derive", "serial_test", + "siphasher", "solana-bloom", "solana-clap-utils", "solana-client", @@ -7325,7 +7342,7 @@ dependencies = [ "solana-vote-program", "static_assertions", "test-case", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -7566,7 +7583,7 @@ dependencies = [ "tar", "tempfile", "test-case", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-stream", "trees", @@ -7583,9 +7600,9 @@ dependencies = [ "solana-log-collector", "solana-measure", "solana-program-runtime", + "solana-sbpf", "solana-sdk", "solana-type-overrides", - "solana_rbpf", ] [[package]] @@ -7742,7 +7759,7 @@ dependencies = [ "solana-cluster-type", "solana-sha256-hasher", "solana-time-utils", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -7861,7 +7878,7 @@ version = "2.2.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "toml 0.8.12", ] @@ -7944,7 +7961,7 @@ dependencies = [ "solana-poh", "solana-runtime", "solana-sdk", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -7982,7 +7999,7 @@ dependencies = [ "ark-bn254", "light-poseidon", "solana-define-syscall", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -8091,7 +8108,7 @@ dependencies = [ "solana-system-interface", "solana-sysvar", "solana-sysvar-id", - "thiserror 2.0.6", + "thiserror 2.0.9", "wasm-bindgen", ] @@ -8147,7 +8164,6 @@ dependencies = [ "bincode", "enum-iterator", "itertools 0.12.1", - "libc", "log", "num-derive", "num-traits", @@ -8166,12 +8182,12 @@ dependencies = [ "solana-instruction", "solana-last-restart-slot", "solana-log-collector", - "solana-logger", "solana-measure", "solana-metrics", "solana-precompiles", "solana-pubkey", "solana-rent", + "solana-sbpf", "solana-sdk-ids", "solana-slot-hashes", "solana-stable-layout", @@ -8180,9 +8196,8 @@ dependencies = [ "solana-timings", "solana-transaction-context", "solana-type-overrides", - "solana_rbpf", "test-case", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -8210,14 +8225,14 @@ dependencies = [ "solana-logger", "solana-program-runtime", "solana-runtime", + "solana-sbpf", "solana-sdk", "solana-stake-program", "solana-svm", "solana-timings", "solana-vote-program", - "solana_rbpf", "test-case", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -8263,7 +8278,7 @@ dependencies = [ "futures-util", "log", "reqwest", - "semver 1.0.23", + "semver 1.0.24", "serde", "serde_derive", "serde_json", @@ -8273,7 +8288,7 @@ dependencies = [ "solana-pubkey", "solana-rpc-client-api", "solana-signature", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-stream", "tokio-tungstenite", @@ -8294,7 +8309,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls 0.23.19", + "rustls 0.23.20", "solana-connection-cache", "solana-keypair", "solana-logger", @@ -8310,7 +8325,7 @@ dependencies = [ "solana-streamer", "solana-tls-utils", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -8342,13 +8357,13 @@ dependencies = [ "num-traits", "parking_lot 0.12.3", "qstring", - "semver 1.0.23", + "semver 1.0.24", "solana-derivation-path", "solana-offchain-message", "solana-pubkey", "solana-signature", "solana-signer", - "thiserror 2.0.6", + "thiserror 2.0.9", "uriparse", ] @@ -8457,7 +8472,7 @@ dependencies = [ "spl-token-2022", "stream-cancel", "symlink", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-util 0.7.13", ] @@ -8479,7 +8494,7 @@ dependencies = [ "log", "reqwest", "reqwest-middleware", - "semver 1.0.23", + "semver 1.0.24", "serde", "serde_derive", "serde_json", @@ -8520,7 +8535,7 @@ dependencies = [ "jsonrpc-core", "reqwest", "reqwest-middleware", - "semver 1.0.23", + "semver 1.0.24", "serde", "serde_derive", "serde_json", @@ -8536,7 +8551,7 @@ dependencies = [ "solana-transaction-error", "solana-transaction-status-client-types", "solana-version", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -8564,7 +8579,7 @@ dependencies = [ "solana-signer", "solana-system-interface", "solana-transaction", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -8688,7 +8703,7 @@ dependencies = [ "tar", "tempfile", "test-case", - "thiserror 2.0.6", + "thiserror 2.0.9", "zstd", ] @@ -8708,13 +8723,32 @@ dependencies = [ "solana-sdk", "solana-sdk-ids", "solana-svm-transaction", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] name = "solana-sanitize" version = "2.2.0" +[[package]] +name = "solana-sbpf" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92b4c060a707fdb0754a876cbbf49591b60a573b5521b485125d2a4d6ff68ce3" +dependencies = [ + "byteorder", + "combine 3.8.1", + "gdbstub", + "hash32", + "libc", + "log", + "rand 0.8.5", + "rustc-demangle", + "shuttle", + "thiserror 1.0.69", + "winapi 0.3.9", +] + [[package]] name = "solana-sdk" version = "2.2.0" @@ -8801,6 +8835,7 @@ dependencies = [ "solana-serde", "solana-serde-varint", "solana-short-vec", + "solana-shred-version", "solana-signature", "solana-signer", "solana-system-transaction", @@ -8808,8 +8843,9 @@ dependencies = [ "solana-transaction", "solana-transaction-context", "solana-transaction-error", + "solana-validator-exit", "static_assertions", - "thiserror 2.0.6", + "thiserror 2.0.9", "tiny-bip39", "wasm-bindgen", ] @@ -8828,7 +8864,7 @@ dependencies = [ "bs58", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -8872,7 +8908,7 @@ dependencies = [ "solana-frozen-abi", "solana-frozen-abi-macro", "solana-program", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -8986,6 +9022,16 @@ dependencies = [ "solana-frozen-abi-macro", ] +[[package]] +name = "solana-shred-version" +version = "2.2.0" +dependencies = [ + "byteorder", + "solana-hard-forks", + "solana-hash", + "solana-sha256-hasher", +] + [[package]] name = "solana-signature" version = "2.2.0" @@ -9070,6 +9116,7 @@ version = "2.2.0" dependencies = [ "assert_matches", "bincode", + "criterion", "log", "proptest", "solana-compute-budget", @@ -9123,7 +9170,7 @@ dependencies = [ "solana-sdk", "solana-storage-proto", "solana-transaction-status", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tonic", "zstd", @@ -9168,7 +9215,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.19", + "rustls 0.23.20", "smallvec", "socket2 0.5.8", "solana-keypair", @@ -9188,7 +9235,7 @@ dependencies = [ "solana-tls-utils", "solana-transaction-error", "solana-transaction-metrics-tracker", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-util 0.7.13", "x509-parser", @@ -9214,32 +9261,60 @@ dependencies = [ "serde", "serde_derive", "shuttle", + "solana-account", "solana-bpf-loader-program", + "solana-clock", "solana-compute-budget", "solana-compute-budget-instruction", + "solana-compute-budget-interface", "solana-compute-budget-program", "solana-ed25519-program", + "solana-epoch-schedule", "solana-feature-set", "solana-fee", + "solana-fee-calculator", + "solana-fee-structure", "solana-frozen-abi", "solana-frozen-abi-macro", + "solana-hash", + "solana-instruction", + "solana-instructions-sysvar", + "solana-keypair", "solana-loader-v4-program", "solana-log-collector", "solana-logger", "solana-measure", + "solana-message", + "solana-native-token", + "solana-nonce", + "solana-precompiles", + "solana-program", "solana-program-runtime", + "solana-pubkey", + "solana-rent", + "solana-rent-debits", + "solana-reserved-account-keys", + "solana-sbpf", "solana-sdk", + "solana-sdk-ids", + "solana-secp256k1-program", "solana-secp256r1-program", + "solana-signature", + "solana-signer", "solana-svm", "solana-svm-conformance", "solana-svm-rent-collector", "solana-svm-transaction", "solana-system-program", + "solana-system-transaction", + "solana-sysvar", "solana-timings", + "solana-transaction", + "solana-transaction-context", + "solana-transaction-error", "solana-type-overrides", - "solana_rbpf", "test-case", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -9462,7 +9537,7 @@ dependencies = [ name = "solana-tls-utils" version = "2.2.0" dependencies = [ - "rustls 0.23.19", + "rustls 0.23.20", "solana-keypair", "solana-pubkey", "solana-signer", @@ -9500,7 +9575,7 @@ dependencies = [ "spl-associated-token-account", "spl-token", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -9509,19 +9584,31 @@ version = "2.2.0" dependencies = [ "log", "serial_test", + "solana-account", "solana-client", + "solana-client-traits", + "solana-clock", + "solana-commitment-config", "solana-connection-cache", + "solana-epoch-info", + "solana-hash", + "solana-keypair", + "solana-message", + "solana-pubkey", "solana-quic-client", "solana-rpc-client", "solana-rpc-client-api", "solana-runtime", - "solana-sdk", + "solana-signature", + "solana-signer", "solana-streamer", "solana-test-validator", "solana-tpu-client", + "solana-transaction", + "solana-transaction-error", "solana-transaction-status", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -9552,7 +9639,7 @@ dependencies = [ "solana-signer", "solana-transaction", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -9566,7 +9653,7 @@ dependencies = [ "log", "lru", "quinn", - "rustls 0.23.19", + "rustls 0.23.20", "solana-cli-config", "solana-clock", "solana-commitment-config", @@ -9582,7 +9669,7 @@ dependencies = [ "solana-time-utils", "solana-tls-utils", "solana-tpu-client", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-util 0.7.13", ] @@ -9727,7 +9814,7 @@ dependencies = [ "spl-token-confidential-transfer-proof-extraction", "spl-token-group-interface", "spl-token-metadata-interface", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -9748,7 +9835,7 @@ dependencies = [ "solana-transaction", "solana-transaction-context", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -9768,7 +9855,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls 0.23.19", + "rustls 0.23.20", "solana-entry", "solana-feature-set", "solana-geyser-plugin-manager", @@ -9790,7 +9877,7 @@ dependencies = [ "solana-tls-utils", "static_assertions", "test-case", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -9815,7 +9902,7 @@ dependencies = [ "solana-packet", "solana-streamer", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -9860,11 +9947,15 @@ dependencies = [ "solana-metrics", ] +[[package]] +name = "solana-validator-exit" +version = "2.2.0" + [[package]] name = "solana-version" version = "2.2.0" dependencies = [ - "semver 1.0.23", + "semver 1.0.24", "serde", "serde_derive", "solana-feature-set", @@ -9874,6 +9965,48 @@ dependencies = [ "solana-serde-varint", ] +[[package]] +name = "solana-vortexor" +version = "2.2.0" +dependencies = [ + "assert_matches", + "async-channel", + "bytes", + "clap 2.33.3", + "crossbeam-channel", + "dashmap", + "futures 0.3.31", + "futures-util", + "governor", + "histogram", + "indexmap 2.7.0", + "itertools 0.12.1", + "libc", + "log", + "nix", + "pem", + "percentage", + "quinn", + "quinn-proto", + "rand 0.8.5", + "rustls 0.23.20", + "smallvec", + "socket2 0.5.8", + "solana-clap-utils", + "solana-logger", + "solana-measure", + "solana-metrics", + "solana-net-utils", + "solana-perf", + "solana-sdk", + "solana-streamer", + "solana-transaction-metrics-tracker", + "solana-version", + "thiserror 2.0.9", + "tokio", + "x509-parser", +] + [[package]] name = "solana-vote" version = "2.2.0" @@ -9888,7 +10021,7 @@ dependencies = [ "solana-frozen-abi-macro", "solana-sdk", "solana-svm-transaction", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -9911,7 +10044,7 @@ dependencies = [ "solana-program-runtime", "solana-sdk", "test-case", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -9950,9 +10083,10 @@ dependencies = [ "bytemuck", "num-derive", "num-traits", + "solana-instruction", "solana-log-collector", "solana-program-runtime", - "solana-sdk", + "solana-sdk-ids", "solana-zk-sdk", ] @@ -9971,7 +10105,7 @@ dependencies = [ "solana-version", "solana-zk-token-sdk", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", "tiny-bip39", ] @@ -10006,7 +10140,7 @@ dependencies = [ "solana-signature", "solana-signer", "subtle", - "thiserror 2.0.6", + "thiserror 2.0.9", "tiny-bip39", "wasm-bindgen", "zeroize", @@ -10022,9 +10156,10 @@ dependencies = [ "num-derive", "num-traits", "solana-feature-set", + "solana-instruction", "solana-log-collector", "solana-program-runtime", - "solana-sdk", + "solana-sdk-ids", "solana-zk-token-sdk", ] @@ -10034,9 +10169,17 @@ version = "2.2.0" dependencies = [ "bytemuck", "curve25519-dalek 4.1.3", + "solana-account", "solana-compute-budget", + "solana-compute-budget-interface", + "solana-instruction", + "solana-keypair", "solana-program-test", - "solana-sdk", + "solana-pubkey", + "solana-signer", + "solana-system-interface", + "solana-transaction", + "solana-transaction-error", "solana-zk-token-sdk", ] @@ -10072,31 +10215,11 @@ dependencies = [ "solana-signature", "solana-signer", "subtle", - "thiserror 2.0.6", + "thiserror 2.0.9", "tiny-bip39", "zeroize", ] -[[package]] -name = "solana_rbpf" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c1941b5ef0c3ce8f2ac5dd984d0fb1a97423c4ff2a02eec81e3913f02e2ac2b" -dependencies = [ - "byteorder", - "combine 3.8.1", - "gdbstub", - "hash32", - "libc", - "log", - "rand 0.8.5", - "rustc-demangle", - "scroll", - "shuttle", - "thiserror 1.0.69", - "winapi 0.3.9", -] - [[package]] name = "spin" version = "0.5.2" @@ -10164,7 +10287,7 @@ checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -10176,7 +10299,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.90", + "syn 2.0.91", "thiserror 1.0.69", ] @@ -10264,7 +10387,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -10534,9 +10657,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.90" +version = "2.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +checksum = "d53cbcb5a243bd33b7858b1d7f4aca2153490815872d86d955d6ea29f743c035" dependencies = [ "proc-macro2", "quote", @@ -10569,7 +10692,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -10618,9 +10741,9 @@ dependencies = [ [[package]] name = "systemstat" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a24aec24a9312c83999a28e3ef9db7e2afd5c64bf47725b758cdc1cafd5b0bd2" +checksum = "668a4db78b439df482c238f559e4ea869017f9e62ef0a059c8bfcd841a4df544" dependencies = [ "bytesize", "lazy_static", @@ -10738,7 +10861,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -10750,7 +10873,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "test-case-core", ] @@ -10780,11 +10903,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.6" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" dependencies = [ - "thiserror-impl 2.0.6", + "thiserror-impl 2.0.9", ] [[package]] @@ -10795,18 +10918,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] name = "thiserror-impl" -version = "2.0.6" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" +checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -10952,7 +11075,7 @@ source = "git+https://github.com/anza-xyz/solana-tokio.git?rev=7cf47705faacf7bf0 dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -11196,7 +11319,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -11523,7 +11646,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "wasm-bindgen-shared", ] @@ -11557,7 +11680,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11950,7 +12073,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "synstructure 0.13.1", ] @@ -11971,7 +12094,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -11991,7 +12114,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "synstructure 0.13.1", ] @@ -12012,7 +12135,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -12034,7 +12157,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 7657fe3dc21e45..e435b29aa750a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,11 @@ inherits = "release" debug = true split-debuginfo = "packed" +lto = false # Preserve the 'thin local LTO' for this build. + +[profile.release] +split-debuginfo = "unpacked" +lto = "thin" [workspace] members = [ @@ -19,6 +24,7 @@ members = [ "banks-server", "bench-streamer", "bench-tps", + "bench-vote", "bloom", "bucket_map", "builtins", @@ -174,6 +180,7 @@ members = [ "sdk/serialize-utils", "sdk/sha256-hasher", "sdk/short-vec", + "sdk/shred-version", "sdk/signature", "sdk/signer", "sdk/slot-hashes", @@ -186,6 +193,7 @@ members = [ "sdk/transaction", "sdk/transaction-context", "sdk/transaction-error", + "sdk/validator-exit", "send-transaction-service", "stake-accounts", "storage-bigtable", @@ -217,6 +225,7 @@ members = [ "upload-perf", "validator", "version", + "vortexor", "vote", "watchtower", "wen-restart", @@ -250,7 +259,7 @@ agave-transaction-view = { path = "transaction-view", version = "=2.2.0" } aquamarine = "0.3.3" aes-gcm-siv = "0.11.1" ahash = "0.8.11" -anyhow = "1.0.94" +anyhow = "1.0.95" arbitrary = "1.4.1" ark-bn254 = "0.4.0" ark-ec = "0.4.0" @@ -276,8 +285,8 @@ bs58 = { version = "0.5.1", default-features = false } bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.8" -bytemuck = "1.20.0" -bytemuck_derive = "1.8.0" +bytemuck = "1.21.0" +bytemuck_derive = "1.8.1" byteorder = "1.5.0" bytes = "1.9" bzip2 = "0.4.4" @@ -287,14 +296,14 @@ cfg_eval = "0.1.2" chrono = { version = "0.4.39", default-features = false } chrono-humanize = "0.2.3" clap = "2.33.1" -console = "0.15.8" +console = "0.15.10" console_error_panic_hook = "0.1.7" console_log = "0.2.2" const_format = "0.2.34" core_affinity = "0.5.10" criterion = "0.5.1" criterion-stats = "0.3.0" -crossbeam-channel = "0.5.13" +crossbeam-channel = "0.5.14" csv = "1.3.1" ctrlc = "3.4.5" curve25519-dalek = { version = "4.1.3", features = ["digest", "rand_core"] } @@ -331,7 +340,7 @@ histogram = "0.6.9" hmac = "0.12.1" http = "0.2.12" humantime = "2.0.1" -hyper = "0.14.31" +hyper = "0.14.32" hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.15" @@ -351,7 +360,7 @@ jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" lazy-lru = "0.1.3" lazy_static = "1.5.0" -libc = "0.2.168" +libc = "0.2.169" libloading = "0.7.4" libsecp256k1 = { version = "0.6.0", default-features = false, features = [ "std", @@ -383,7 +392,7 @@ predicates = "2.1" pretty-hex = "0.3.0" prio-graph = "0.3.0" proc-macro2 = "1.0.92" -proptest = "1.5" +proptest = "1.6" prost = "0.11.9" prost-build = "0.11.9" prost-types = "0.11.9" @@ -404,11 +413,11 @@ reqwest = { version = "0.11.27", default-features = false } reqwest-middleware = "0.2.5" rolling-file = "0.2.0" rpassword = "7.3" -rustls = { version = "0.23.19", default-features = false } +rustls = { version = "0.23.20", features = ["std"], default-features = false } scopeguard = "1.2.0" -semver = "1.0.23" +semver = "1.0.24" seqlock = "0.2.0" -serde = "1.0.215" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde = "1.0.216" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde-big-array = "0.5.1" serde_bytes = "0.11.15" serde_derive = "1.0.215" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 @@ -569,6 +578,7 @@ solana-rpc-client-api = { path = "rpc-client-api", version = "=2.2.0" } solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=2.2.0" } solana-runtime = { path = "runtime", version = "=2.2.0" } solana-runtime-transaction = { path = "runtime-transaction", version = "=2.2.0" } +solana-sbpf = "=0.9.0" solana-sdk = { path = "sdk", version = "=2.2.0" } solana-sdk-ids = { path = "sdk/sdk-ids", version = "=2.2.0" } solana-sdk-macro = { path = "sdk/macro", version = "=2.2.0" } @@ -576,6 +586,7 @@ solana-secp256k1-program = { path = "sdk/secp256k1-program", version = "=2.2.0" solana-secp256k1-recover = { path = "curves/secp256k1-recover", version = "=2.2.0", default-features = false } solana-send-transaction-service = { path = "send-transaction-service", version = "=2.2.0" } solana-short-vec = { path = "sdk/short-vec", version = "=2.2.0" } +solana-shred-version = { path = "sdk/shred-version", version = "=2.2.0" } solana-stable-layout = { path = "sdk/stable-layout", version = "=2.2.0" } solana-stake-program = { path = "programs/stake", version = "=2.2.0" } solana-storage-bigtable = { path = "storage-bigtable", version = "=2.2.0" } @@ -603,6 +614,7 @@ solana-transaction-metrics-tracker = { path = "transaction-metrics-tracker", ver solana-turbine = { path = "turbine", version = "=2.2.0" } solana-type-overrides = { path = "type-overrides", version = "=2.2.0" } solana-udp-client = { path = "udp-client", version = "=2.2.0" } +solana-validator-exit = { path = "sdk/validator-exit", version = "=2.2.0" } solana-version = { path = "version", version = "=2.2.0" } solana-vote = { path = "vote", version = "=2.2.0" } solana-vote-program = { path = "programs/vote", version = "=2.2.0" } @@ -612,7 +624,6 @@ solana-zk-keygen = { path = "zk-keygen", version = "=2.2.0" } solana-zk-sdk = { path = "zk-sdk", version = "=2.2.0" } solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=2.2.0" } solana-zk-token-sdk = { path = "zk-token-sdk", version = "=2.2.0" } -solana_rbpf = "=0.8.5" spl-associated-token-account = "=6.0.0" spl-instruction-padding = "0.3" spl-memo = "=6.0.0" @@ -631,12 +642,12 @@ symlink = "0.1.0" syn = "2.0" sys-info = "0.9.1" sysctl = "0.4.6" -systemstat = "0.2.3" +systemstat = "0.2.4" tar = "0.4.43" tarpc = "0.29.0" tempfile = "3.14.0" test-case = "3.3.1" -thiserror = "2.0.6" +thiserror = "2.0.9" tiny-bip39 = "0.8.2" # Update solana-tokio patch below when updating this version tokio = "1.29.1" diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 82a983ede37a20..fafbeff18d0bce 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -103,6 +103,10 @@ harness = false name = "bench_hashing" harness = false +[[bench]] +name = "read_only_accounts_cache" +harness = false + [[bench]] name = "bench_serde" harness = false diff --git a/accounts-db/benches/bench_accounts_file.rs b/accounts-db/benches/bench_accounts_file.rs index 6fe87523cf18f1..ad077f4f2a82cc 100644 --- a/accounts-db/benches/bench_accounts_file.rs +++ b/accounts-db/benches/bench_accounts_file.rs @@ -1,8 +1,6 @@ #![allow(clippy::arithmetic_side_effects)] use { criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput}, - rand::{distributions::WeightedIndex, prelude::*}, - rand_chacha::ChaChaRng, solana_accounts_db::{ accounts_file::StorageAccess, append_vec::{self, AppendVec, SCAN_BUFFER_SIZE_WITHOUT_DATA}, @@ -15,13 +13,14 @@ use { account::{AccountSharedData, ReadableAccount}, clock::Slot, pubkey::Pubkey, - rent::Rent, rent_collector::RENT_EXEMPT_RENT_EPOCH, system_instruction::MAX_PERMITTED_DATA_LENGTH, }, - std::{iter, mem::ManuallyDrop}, + std::mem::ManuallyDrop, }; +mod utils; + const ACCOUNTS_COUNTS: [usize; 4] = [ 1, // the smallest count; will bench overhead 100, // number of accounts written per slot on mnb (with *no* rent rewrites) @@ -116,40 +115,20 @@ fn bench_scan_pubkeys(c: &mut Criterion) { MAX_PERMITTED_DATA_LENGTH as usize, ]; let weights = [3, 75, 20, 1, 1]; - let distribution = WeightedIndex::new(weights).unwrap(); - - let rent = Rent::default(); - let rent_minimum_balances: Vec<_> = data_sizes - .iter() - .map(|data_size| rent.minimum_balance(*data_size)) - .collect(); for accounts_count in ACCOUNTS_COUNTS { group.throughput(Throughput::Elements(accounts_count as u64)); - let mut rng = ChaChaRng::seed_from_u64(accounts_count as u64); - let pubkeys: Vec<_> = iter::repeat_with(Pubkey::new_unique) + let storable_accounts: Vec<_> = utils::accounts(255, &data_sizes, &weights) .take(accounts_count) .collect(); - let accounts: Vec<_> = iter::repeat_with(|| { - let index = distribution.sample(&mut rng); - AccountSharedData::new_rent_epoch( - rent_minimum_balances[index], - data_sizes[index], - &Pubkey::default(), - RENT_EXEMPT_RENT_EPOCH, - ) - }) - .take(pubkeys.len()) - .collect(); - let storable_accounts: Vec<_> = iter::zip(&pubkeys, &accounts).collect(); // create an append vec file let append_vec_path = temp_dir.path().join(format!("append_vec_{accounts_count}")); _ = std::fs::remove_file(&append_vec_path); - let file_size = accounts + let file_size = storable_accounts .iter() - .map(|account| append_vec::aligned_stored_size(account.data().len())) + .map(|(_, account)| append_vec::aligned_stored_size(account.data().len())) .sum(); let append_vec = AppendVec::new(append_vec_path, true, file_size); let stored_accounts_info = append_vec diff --git a/accounts-db/benches/read_only_accounts_cache.rs b/accounts-db/benches/read_only_accounts_cache.rs new file mode 100644 index 00000000000000..4f1fa4febd3820 --- /dev/null +++ b/accounts-db/benches/read_only_accounts_cache.rs @@ -0,0 +1,327 @@ +use { + criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}, + rand::{rngs::SmallRng, seq::SliceRandom, SeedableRng}, + solana_accounts_db::{ + accounts_db::AccountsDb, read_only_accounts_cache::ReadOnlyAccountsCache, + }, + solana_sdk::system_instruction::MAX_PERMITTED_DATA_LENGTH, + std::{ + hint::black_box, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread::Builder, + time::{Duration, Instant}, + }, +}; +mod utils; + +/// Sizes of accounts. +/// +/// - No data. +/// - 165 bytes (a token account). +/// - 200 bytes (a stake account). +/// - 10 mebibytes (the max size for an account). +const DATA_SIZES: &[usize] = &[0, 165, 200, MAX_PERMITTED_DATA_LENGTH as usize]; +/// Distribution of the account sizes: +/// +/// - 3% of accounts have no data. +/// - 75% of accounts are 165 bytes (a token account). +/// - 20% of accounts are 200 bytes (a stake account). +/// - 2% of accounts are 10 mebibytes (the max size for an account). +const WEIGHTS: &[usize] = &[3, 75, 20, 2]; +/// Numbers of reader and writer threads to bench. +const NUM_READERS_WRITERS: &[usize] = &[ + 8, 16, + // These parameters are likely to freeze your computer, if it has less than + // 32 cores. + 32, 64, +]; + +/// Benchmarks read-only cache loads and stores without causing eviction. +fn bench_read_only_accounts_cache(c: &mut Criterion) { + let mut group = c.benchmark_group("read_only_accounts_cache"); + let slot = 0; + + // Prepare initial accounts, but make sure to not fill up the cache. + let accounts: Vec<_> = utils::accounts_with_size_limit( + 255, + DATA_SIZES, + WEIGHTS, + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO / 2, + ) + .collect(); + let pubkeys: Vec<_> = accounts + .iter() + .map(|(pubkey, _)| pubkey.to_owned()) + .collect(); + + for num_readers_writers in NUM_READERS_WRITERS { + let cache = Arc::new(ReadOnlyAccountsCache::new( + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO, + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI, + AccountsDb::READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE, + )); + + for (pubkey, account) in accounts.iter() { + cache.store(*pubkey, slot, account.clone()); + } + + // Spawn the reader threads in the background. They are reading the + // reading the initially inserted accounts. + let stop_threads = Arc::new(AtomicBool::new(false)); + let reader_handles = (0..*num_readers_writers) + .map(|i| { + let stop_threads = Arc::clone(&stop_threads); + let cache = Arc::clone(&cache); + let pubkeys = pubkeys.clone(); + + Builder::new() + .name(format!("reader{i:02}")) + .spawn({ + move || { + // Continuously read random accounts. + let mut rng = SmallRng::seed_from_u64(i as u64); + while !stop_threads.load(Ordering::Relaxed) { + let pubkey = pubkeys.choose(&mut rng).unwrap(); + black_box(cache.load(*pubkey, slot)); + } + } + }) + .unwrap() + }) + .collect::>(); + + // Spawn the writer threads in the background. + let slot = 1; + let writer_handles = (0..*num_readers_writers) + .map(|i| { + let stop_threads = Arc::clone(&stop_threads); + let cache = Arc::clone(&cache); + let accounts = accounts.clone(); + + Builder::new() + .name(format!("writer{i:02}")) + .spawn({ + move || { + // Continuously write to already existing pubkeys. + let mut rng = SmallRng::seed_from_u64(100_u64.saturating_add(i as u64)); + while !stop_threads.load(Ordering::Relaxed) { + let (pubkey, account) = accounts.choose(&mut rng).unwrap(); + cache.store(*pubkey, slot, account.clone()); + } + } + }) + .unwrap() + }) + .collect::>(); + + group.bench_function(BenchmarkId::new("store", num_readers_writers), |b| { + b.iter_custom(|iters| { + let mut total_time = Duration::new(0, 0); + + for (pubkey, account) in accounts.iter().cycle().take(iters as usize) { + // Measure only stores. + let start = Instant::now(); + cache.store(*pubkey, slot, account.clone()); + total_time = total_time.saturating_add(start.elapsed()); + } + total_time + }) + }); + group.bench_function(BenchmarkId::new("load", num_readers_writers), |b| { + b.iter_custom(|iters| { + let start = Instant::now(); + for (pubkey, _) in accounts.iter().cycle().take(iters as usize) { + black_box(cache.load(*pubkey, slot)); + } + + start.elapsed() + }) + }); + + stop_threads.store(true, Ordering::Relaxed); + for reader_handle in reader_handles { + reader_handle.join().unwrap(); + } + for writer_handle in writer_handles { + writer_handle.join().unwrap(); + } + } +} + +/// Benchmarks the read-only cache eviction mechanism. It does so by performing +/// multithreaded reads and writes on a full cache. Each write triggers +/// eviction. Background reads add more contention. +fn bench_read_only_accounts_cache_eviction( + c: &mut Criterion, + group_name: &str, + max_data_size_lo: usize, + max_data_size_hi: usize, +) { + // Prepare initial accounts, two times the high limit of the cache, to make + // sure that the backgroud threads sometimes try to store something which + // is not in the cache. + let accounts: Vec<_> = utils::accounts_with_size_limit( + 255, + DATA_SIZES, + WEIGHTS, + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI * 2, + ) + .collect(); + let pubkeys: Vec<_> = accounts + .iter() + .map(|(pubkey, _)| pubkey.to_owned()) + .collect(); + + let mut group = c.benchmark_group(group_name); + + for num_readers_writers in NUM_READERS_WRITERS { + let cache = Arc::new(ReadOnlyAccountsCache::new( + max_data_size_lo, + max_data_size_hi, + AccountsDb::READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE, + )); + + // Fill up the cache. + let slot = 0; + for (pubkey, account) in accounts.iter() { + cache.store(*pubkey, slot, account.clone()); + } + + // Spawn the reader threads in the background. They are reading the + // reading the initially inserted accounts. + let stop_threads = Arc::new(AtomicBool::new(false)); + let reader_handles = (0..*num_readers_writers) + .map(|i| { + let stop_threads = Arc::clone(&stop_threads); + let cache = Arc::clone(&cache); + let pubkeys = pubkeys.clone(); + + Builder::new() + .name(format!("reader{i:02}")) + .spawn({ + move || { + // Continuously read random accounts. + let mut rng = SmallRng::seed_from_u64(i as u64); + while !stop_threads.load(Ordering::Relaxed) { + let pubkey = pubkeys.choose(&mut rng).unwrap(); + black_box(cache.load(*pubkey, slot)); + } + } + }) + .unwrap() + }) + .collect::>(); + + // Spawn the writer threads in the background. Prepare the accounts + // with the same public keys and sizes as the initial ones. The + // intention is a constant overwrite in background for additional + // contention. + let slot = 1; + let writer_handles = (0..*num_readers_writers) + .map(|i| { + let stop_threads = Arc::clone(&stop_threads); + let cache = Arc::clone(&cache); + let accounts = accounts.clone(); + + Builder::new() + .name(format!("writer{i:02}")) + .spawn({ + move || { + // Continuously write to already existing pubkeys. + let mut rng = SmallRng::seed_from_u64(100_u64.saturating_add(i as u64)); + while !stop_threads.load(Ordering::Relaxed) { + let (pubkey, account) = accounts.choose(&mut rng).unwrap(); + cache.store(*pubkey, slot, account.clone()); + } + } + }) + .unwrap() + }) + .collect::>(); + + // Benchmark the performance of loading and storing accounts in a + // cache that is fully populated. This triggers eviction for each + // write operation. Background threads introduce contention. + group.bench_function(BenchmarkId::new("load", num_readers_writers), |b| { + b.iter_custom(|iters| { + let mut rng = SmallRng::seed_from_u64(1); + let mut total_time = Duration::new(0, 0); + + for _ in 0..iters { + let pubkey = pubkeys.choose(&mut rng).unwrap().to_owned(); + + let start = Instant::now(); + black_box(cache.load(pubkey, slot)); + total_time = total_time.saturating_add(start.elapsed()); + } + + total_time + }) + }); + group.bench_function(BenchmarkId::new("store", num_readers_writers), |b| { + b.iter_custom(|iters| { + let accounts = utils::accounts(0, DATA_SIZES, WEIGHTS).take(iters as usize); + + let start = Instant::now(); + for (pubkey, account) in accounts { + cache.store(pubkey, slot, account); + } + + start.elapsed() + }) + }); + + stop_threads.store(true, Ordering::Relaxed); + for reader_handle in reader_handles { + reader_handle.join().unwrap(); + } + for writer_handle in writer_handles { + writer_handle.join().unwrap(); + } + } +} + +/// Benchmarks read-only cache eviction with low and high thresholds. After +/// each eviction, enough stores need to be made to reach the difference +/// between the low and high threshold, triggering another eviction. +/// +/// Even though eviction is not made on each store, the number of iterations +/// are high enough to trigger eviction often. Contention which comes from +/// locking the cache is still visible both in the benchmark's time and +/// profiles gathered from the benchmark run. +/// +/// This benchmark aims to simulate contention in a manner close to what occurs +/// on validators. +fn bench_read_only_accounts_cache_eviction_lo_hi(c: &mut Criterion) { + bench_read_only_accounts_cache_eviction( + c, + "read_only_accounts_cache_eviction_lo_hi", + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO, + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI, + ) +} + +/// Benchmarks read-only cache eviction without differentiating between low and +/// high thresholds. Each store triggers another eviction immediately. +/// +/// This benchmark measures the absolutely worst-case scenario, which may not +/// reflect actual conditions in validators. +fn bench_read_only_accounts_cache_eviction_hi(c: &mut Criterion) { + bench_read_only_accounts_cache_eviction( + c, + "read_only_accounts_cache_eviction_hi", + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI, + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI, + ) +} + +criterion_group!( + benches, + bench_read_only_accounts_cache, + bench_read_only_accounts_cache_eviction_lo_hi, + bench_read_only_accounts_cache_eviction_hi +); +criterion_main!(benches); diff --git a/accounts-db/benches/utils.rs b/accounts-db/benches/utils.rs new file mode 100644 index 00000000000000..1b2c57a145b1b7 --- /dev/null +++ b/accounts-db/benches/utils.rs @@ -0,0 +1,84 @@ +// This file is included as a module separately in each bench, which causes +// a `dead_code` warning if the given bench doesn't `use` all functions. +#![allow(dead_code)] + +use { + rand::{ + distributions::{Distribution, WeightedIndex}, + Rng, SeedableRng, + }, + rand_chacha::ChaChaRng, + solana_sdk::{ + account::AccountSharedData, pubkey::Pubkey, rent::Rent, + rent_collector::RENT_EXEMPT_RENT_EPOCH, + }, + std::iter, +}; + +/// Returns an iterator with storable accounts. +pub fn accounts<'a>( + seed: u64, + data_sizes: &'a [usize], + weights: &'a [usize], +) -> impl Iterator + 'a { + let distribution = WeightedIndex::new(weights).unwrap(); + let mut rng = ChaChaRng::seed_from_u64(seed); + let rent = Rent::default(); + + iter::repeat_with(move || { + let index = distribution.sample(&mut rng); + let data_size = data_sizes[index]; + let owner: [u8; 32] = rng.gen(); + let owner = Pubkey::new_from_array(owner); + ( + owner, + AccountSharedData::new_rent_epoch( + rent.minimum_balance(data_size), + data_size, + &owner, + RENT_EXEMPT_RENT_EPOCH, + ), + ) + }) +} + +/// Returns an iterator over storable accounts such that the cumulative size of +/// all accounts does not exceed the given `size_limit`. +pub fn accounts_with_size_limit<'a>( + seed: u64, + data_sizes: &'a [usize], + weights: &'a [usize], + size_limit: usize, +) -> impl Iterator + 'a { + let distribution = WeightedIndex::new(weights).unwrap(); + let mut rng = ChaChaRng::seed_from_u64(seed); + let rent = Rent::default(); + let mut sum = 0_usize; + let mut stop_iter = false; + + iter::from_fn(move || { + let index = distribution.sample(&mut rng); + let data_size = data_sizes[index]; + sum = sum.saturating_add(data_size); + if stop_iter { + None + } else { + // If the limit is reached, include the current account as the last + // one, then stop iterating. + if sum >= size_limit { + stop_iter = true; + } + let owner = Pubkey::new_unique(); + + Some(( + owner, + AccountSharedData::new_rent_epoch( + rent.minimum_balance(data_size), + data_size, + &owner, + RENT_EXEMPT_RENT_EPOCH, + ), + )) + } + }) +} diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 26923d5e05a224..878642ed396eb8 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -788,53 +788,49 @@ impl GenerateIndexTimings { ("total_us", self.index_time, i64), ("scan_stores_us", self.scan_time, i64), ("insertion_time_us", self.insertion_time_us, i64), - ("min_bin_size_in_mem", self.min_bin_size_in_mem as i64, i64), - ("max_bin_size_in_mem", self.max_bin_size_in_mem as i64, i64), + ("min_bin_size_in_mem", self.min_bin_size_in_mem, i64), + ("max_bin_size_in_mem", self.max_bin_size_in_mem, i64), ( "storage_size_storages_us", - self.storage_size_storages_us as i64, + self.storage_size_storages_us, i64 ), - ("index_flush_us", self.index_flush_us as i64, i64), + ("index_flush_us", self.index_flush_us, i64), ( "total_rent_paying", - self.rent_paying.load(Ordering::Relaxed) as i64, + self.rent_paying.load(Ordering::Relaxed), i64 ), ( "amount_to_top_off_rent", - self.amount_to_top_off_rent.load(Ordering::Relaxed) as i64, + self.amount_to_top_off_rent.load(Ordering::Relaxed), i64 ), ( "total_items_including_duplicates", - self.total_including_duplicates as i64, + self.total_including_duplicates, i64 ), - ("total_items_in_mem", self.total_items_in_mem as i64, i64), + ("total_items_in_mem", self.total_items_in_mem, i64), ( "accounts_data_len_dedup_time_us", - self.accounts_data_len_dedup_time_us as i64, + self.accounts_data_len_dedup_time_us, i64 ), ( "total_duplicate_slot_keys", - self.total_duplicate_slot_keys as i64, + self.total_duplicate_slot_keys, i64 ), ( "total_num_unique_duplicate_keys", - self.total_num_unique_duplicate_keys as i64, - i64 - ), - ( - "num_duplicate_accounts", - self.num_duplicate_accounts as i64, + self.total_num_unique_duplicate_keys, i64 ), + ("num_duplicate_accounts", self.num_duplicate_accounts, i64), ( "populate_duplicate_keys_us", - self.populate_duplicate_keys_us as i64, + self.populate_duplicate_keys_us, i64 ), ("total_slots", self.total_slots, i64), @@ -851,14 +847,10 @@ impl GenerateIndexTimings { ), ( "num_zero_lamport_single_refs", - self.num_zero_lamport_single_refs as i64, - i64 - ), - ( - "visit_zero_lamports_us", - self.visit_zero_lamports_us as i64, + self.num_zero_lamport_single_refs, i64 ), + ("visit_zero_lamports_us", self.visit_zero_lamports_us, i64), ( "all_accounts_are_zero_lamports_slots", self.all_accounts_are_zero_lamports_slots, @@ -1889,11 +1881,14 @@ impl AccountsDb { pub const DEFAULT_ACCOUNTS_HASH_CACHE_DIR: &'static str = "accounts_hash_cache"; // read only cache does not update lru on read of an entry unless it has been at least this many ms since the last lru update + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] const READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE: u32 = 100; // The default high and low watermark sizes for the accounts read cache. // If the cache size exceeds MAX_SIZE_HI, it'll evict entries until the size is <= MAX_SIZE_LO. + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO: usize = 400 * 1024 * 1024; + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI: usize = 410 * 1024 * 1024; pub fn default_for_tests() -> Self { @@ -2862,6 +2857,19 @@ impl AccountsDb { } else { found_not_zero += 1; } + + // If this candidate has multiple rooted slot list entries, + // we should reclaim the older ones. + if slot_list.len() > 1 + && *slot + <= max_clean_root_inclusive.unwrap_or(Slot::MAX) + { + should_collect_reclaims = true; + purges_old_accounts_local += 1; + useless = false; + } + // Note, this next if-block is only kept to maintain the + // `uncleaned_roots_slot_list_1` stat. if uncleaned_roots.contains(slot) { // Assertion enforced by `accounts_index.get()`, the latest slot // will not be greater than the given `max_clean_root` @@ -2870,12 +2878,7 @@ impl AccountsDb { { assert!(slot <= &max_clean_root_inclusive); } - if slot_list.len() > 1 { - // no need to reclaim old accounts if there is only 1 slot in the slot list - should_collect_reclaims = true; - purges_old_accounts_local += 1; - useless = false; - } else { + if slot_list.len() == 1 { self.clean_accounts_stats .uncleaned_roots_slot_list_1 .fetch_add(1, Ordering::Relaxed); @@ -3101,12 +3104,12 @@ impl AccountsDb { key_timings.dirty_store_processing_us, i64 ), - ("accounts_scan", accounts_scan.as_us() as i64, i64), - ("clean_old_rooted", clean_old_rooted.as_us() as i64, i64), - ("store_counts", store_counts_time.as_us() as i64, i64), - ("purge_filter", purge_filter.as_us() as i64, i64), - ("calc_deps", calc_deps_time.as_us() as i64, i64), - ("reclaims", reclaims_time.as_us() as i64, i64), + ("accounts_scan", accounts_scan.as_us(), i64), + ("clean_old_rooted", clean_old_rooted.as_us(), i64), + ("store_counts", store_counts_time.as_us(), i64), + ("purge_filter", purge_filter.as_us(), i64), + ("calc_deps", calc_deps_time.as_us(), i64), + ("reclaims", reclaims_time.as_us(), i64), ("delta_insert_us", key_timings.delta_insert_us, i64), ("delta_key_count", key_timings.delta_key_count, i64), ("dirty_pubkeys_count", key_timings.dirty_pubkeys_count, i64), @@ -6232,11 +6235,7 @@ impl AccountsDb { unflushable_unrooted_slot_count, i64 ), - ( - "flush_roots_elapsed", - flush_roots_elapsed.as_us() as i64, - i64 - ), + ("flush_roots_elapsed", flush_roots_elapsed.as_us(), i64), ("account_bytes_saved", account_bytes_saved, i64), ("num_accounts_saved", num_accounts_saved, i64), ( @@ -7528,15 +7527,6 @@ impl AccountsDb { } } - /// Wrapper function to calculate accounts delta hash for `slot` (only used for testing and benchmarking.) - /// - /// As part of calculating the accounts delta hash, get a list of accounts modified this slot - /// (aka dirty pubkeys) and add them to `self.uncleaned_pubkeys` for future cleaning. - #[cfg(feature = "dev-context-only-utils")] - pub fn calculate_accounts_delta_hash(&self, slot: Slot) -> AccountsDeltaHash { - self.calculate_accounts_delta_hash_internal(slot, None, HashMap::default()) - } - /// Calculate accounts delta hash for `slot` /// /// As part of calculating the accounts delta hash, get a list of accounts modified this slot @@ -9291,6 +9281,21 @@ impl AccountStorageEntry { // These functions/fields are only usable from a dev context (i.e. tests and benches) #[cfg(feature = "dev-context-only-utils")] impl AccountsDb { + /// useful to adapt tests written prior to introduction of the write cache + /// to use the write cache + pub fn add_root_and_flush_write_cache(&self, slot: Slot) { + self.add_root(slot); + self.flush_root_write_cache(slot); + } + + /// Wrapper function to calculate accounts delta hash for `slot` (only used for testing and benchmarking.) + /// + /// As part of calculating the accounts delta hash, get a list of accounts modified this slot + /// (aka dirty pubkeys) and add them to `self.uncleaned_pubkeys` for future cleaning. + pub fn calculate_accounts_delta_hash(&self, slot: Slot) -> AccountsDeltaHash { + self.calculate_accounts_delta_hash_internal(slot, None, HashMap::default()) + } + pub fn load_without_fixed_root( &self, ancestors: &Ancestors, @@ -9428,13 +9433,6 @@ impl AccountsDb { ) } - /// useful to adapt tests written prior to introduction of the write cache - /// to use the write cache - pub fn add_root_and_flush_write_cache(&self, slot: Slot) { - self.add_root(slot); - self.flush_root_write_cache(slot); - } - /// useful to adapt tests written prior to introduction of the write cache /// to use the write cache pub fn flush_root_write_cache(&self, root: Slot) { diff --git a/accounts-db/src/accounts_db/geyser_plugin_utils.rs b/accounts-db/src/accounts_db/geyser_plugin_utils.rs index 8cddf857f1681e..5841b3e7599785 100644 --- a/accounts-db/src/accounts_db/geyser_plugin_utils.rs +++ b/accounts-db/src/accounts_db/geyser_plugin_utils.rs @@ -39,20 +39,21 @@ impl AccountsDb { /// in the reverse order of the slots so that an account is only streamed once. At a slot, if the accounts is updated /// multiple times only the last write (with highest write_version) is notified. pub fn notify_account_restore_from_snapshot(&self) { - if self.accounts_update_notifier.is_none() { + let Some(accounts_update_notifier) = &self.accounts_update_notifier else { return; - } + }; - let mut slots = self.storage.all_slots(); - let mut notified_accounts: HashSet = HashSet::default(); let mut notify_stats = GeyserPluginNotifyAtSnapshotRestoreStats::default(); + if accounts_update_notifier.snapshot_notifications_enabled() { + let mut slots = self.storage.all_slots(); + let mut notified_accounts: HashSet = HashSet::default(); - slots.sort_by(|a, b| b.cmp(a)); - for slot in slots { - self.notify_accounts_in_slot(slot, &mut notified_accounts, &mut notify_stats); + slots.sort_by(|a, b| b.cmp(a)); + for slot in slots { + self.notify_accounts_in_slot(slot, &mut notified_accounts, &mut notify_stats); + } } - let accounts_update_notifier = self.accounts_update_notifier.as_ref().unwrap(); accounts_update_notifier.notify_end_of_restore_from_snapshot(); notify_stats.report(); } @@ -196,6 +197,10 @@ pub mod tests { } impl AccountsUpdateNotifierInterface for GeyserTestPlugin { + fn snapshot_notifications_enabled(&self) -> bool { + true + } + /// Notified when an account is updated at runtime, due to transaction activities fn notify_account_update( &self, diff --git a/accounts-db/src/accounts_db/stats.rs b/accounts-db/src/accounts_db/stats.rs index d33ae9c1f63658..22352cce5df24d 100644 --- a/accounts-db/src/accounts_db/stats.rs +++ b/accounts-db/src/accounts_db/stats.rs @@ -68,64 +68,64 @@ impl PurgeStats { metric_name, ( "safety_checks_elapsed", - self.safety_checks_elapsed.swap(0, Ordering::Relaxed) as i64, + self.safety_checks_elapsed.swap(0, Ordering::Relaxed), i64 ), ( "remove_cache_elapsed", - self.remove_cache_elapsed.swap(0, Ordering::Relaxed) as i64, + self.remove_cache_elapsed.swap(0, Ordering::Relaxed), i64 ), ( "remove_storage_entries_elapsed", self.remove_storage_entries_elapsed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "drop_storage_entries_elapsed", - self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed) as i64, + self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed), i64 ), ( "num_cached_slots_removed", - self.num_cached_slots_removed.swap(0, Ordering::Relaxed) as i64, + self.num_cached_slots_removed.swap(0, Ordering::Relaxed), i64 ), ( "num_stored_slots_removed", - self.num_stored_slots_removed.swap(0, Ordering::Relaxed) as i64, + self.num_stored_slots_removed.swap(0, Ordering::Relaxed), i64 ), ( "total_removed_storage_entries", self.total_removed_storage_entries - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "total_removed_cached_bytes", - self.total_removed_cached_bytes.swap(0, Ordering::Relaxed) as i64, + self.total_removed_cached_bytes.swap(0, Ordering::Relaxed), i64 ), ( "total_removed_stored_bytes", - self.total_removed_stored_bytes.swap(0, Ordering::Relaxed) as i64, + self.total_removed_stored_bytes.swap(0, Ordering::Relaxed), i64 ), ( "scan_storages_elapsed", - self.scan_storages_elapsed.swap(0, Ordering::Relaxed) as i64, + self.scan_storages_elapsed.swap(0, Ordering::Relaxed), i64 ), ( "purge_accounts_index_elapsed", - self.purge_accounts_index_elapsed.swap(0, Ordering::Relaxed) as i64, + self.purge_accounts_index_elapsed.swap(0, Ordering::Relaxed), i64 ), ( "handle_reclaims_elapsed", - self.handle_reclaims_elapsed.swap(0, Ordering::Relaxed) as i64, + self.handle_reclaims_elapsed.swap(0, Ordering::Relaxed), i64 ), ); @@ -211,44 +211,40 @@ impl LatestAccountsIndexRootsStats { pub fn report(&self) { datapoint_info!( "accounts_index_roots_len", - ( - "roots_len", - self.roots_len.load(Ordering::Relaxed) as i64, - i64 - ), + ("roots_len", self.roots_len.load(Ordering::Relaxed), i64), ( "uncleaned_roots_len", - self.uncleaned_roots_len.load(Ordering::Relaxed) as i64, + self.uncleaned_roots_len.load(Ordering::Relaxed), i64 ), ( "roots_range_width", - self.roots_range.load(Ordering::Relaxed) as i64, + self.roots_range.load(Ordering::Relaxed), i64 ), ( "unrooted_cleaned_count", - self.unrooted_cleaned_count.swap(0, Ordering::Relaxed) as i64, + self.unrooted_cleaned_count.swap(0, Ordering::Relaxed), i64 ), ( "rooted_cleaned_count", - self.rooted_cleaned_count.swap(0, Ordering::Relaxed) as i64, + self.rooted_cleaned_count.swap(0, Ordering::Relaxed), i64 ), ( "clean_unref_from_storage_us", - self.clean_unref_from_storage_us.swap(0, Ordering::Relaxed) as i64, + self.clean_unref_from_storage_us.swap(0, Ordering::Relaxed), i64 ), ( "clean_dead_slot_us", - self.clean_dead_slot_us.swap(0, Ordering::Relaxed) as i64, + self.clean_dead_slot_us.swap(0, Ordering::Relaxed), i64 ), ( "append_vecs_open", - APPEND_VEC_MMAPPED_FILES_OPEN.load(Ordering::Relaxed) as i64, + APPEND_VEC_MMAPPED_FILES_OPEN.load(Ordering::Relaxed), i64 ), ( @@ -388,7 +384,7 @@ impl ShrinkStats { ), ( "num_slots_shrunk", - self.num_slots_shrunk.swap(0, Ordering::Relaxed) as i64, + self.num_slots_shrunk.swap(0, Ordering::Relaxed), i64 ), ( @@ -403,7 +399,7 @@ impl ShrinkStats { ), ( "storage_read_elapsed", - self.storage_read_elapsed.swap(0, Ordering::Relaxed) as i64, + self.storage_read_elapsed.swap(0, Ordering::Relaxed), i64 ), ( @@ -413,78 +409,78 @@ impl ShrinkStats { ), ( "index_read_elapsed", - self.index_read_elapsed.swap(0, Ordering::Relaxed) as i64, + self.index_read_elapsed.swap(0, Ordering::Relaxed), i64 ), ( "create_and_insert_store_elapsed", self.create_and_insert_store_elapsed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "store_accounts_elapsed", - self.store_accounts_elapsed.swap(0, Ordering::Relaxed) as i64, + self.store_accounts_elapsed.swap(0, Ordering::Relaxed), i64 ), ( "update_index_elapsed", - self.update_index_elapsed.swap(0, Ordering::Relaxed) as i64, + self.update_index_elapsed.swap(0, Ordering::Relaxed), i64 ), ( "handle_reclaims_elapsed", - self.handle_reclaims_elapsed.swap(0, Ordering::Relaxed) as i64, + self.handle_reclaims_elapsed.swap(0, Ordering::Relaxed), i64 ), ( "remove_old_stores_shrink_us", - self.remove_old_stores_shrink_us.swap(0, Ordering::Relaxed) as i64, + self.remove_old_stores_shrink_us.swap(0, Ordering::Relaxed), i64 ), ( "rewrite_elapsed", - self.rewrite_elapsed.swap(0, Ordering::Relaxed) as i64, + self.rewrite_elapsed.swap(0, Ordering::Relaxed), i64 ), ( "drop_storage_entries_elapsed", - self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed) as i64, + self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed), i64 ), ( "accounts_removed", - self.accounts_removed.swap(0, Ordering::Relaxed) as i64, + self.accounts_removed.swap(0, Ordering::Relaxed), i64 ), ( "bytes_removed", - self.bytes_removed.swap(0, Ordering::Relaxed) as i64, + self.bytes_removed.swap(0, Ordering::Relaxed), i64 ), ( "bytes_written", - self.bytes_written.swap(0, Ordering::Relaxed) as i64, + self.bytes_written.swap(0, Ordering::Relaxed), i64 ), ( "skipped_shrink", - self.skipped_shrink.swap(0, Ordering::Relaxed) as i64, + self.skipped_shrink.swap(0, Ordering::Relaxed), i64 ), ( "alive_accounts", - self.alive_accounts.swap(0, Ordering::Relaxed) as i64, + self.alive_accounts.swap(0, Ordering::Relaxed), i64 ), ( "dead_accounts", - self.dead_accounts.swap(0, Ordering::Relaxed) as i64, + self.dead_accounts.swap(0, Ordering::Relaxed), i64 ), ( "accounts_loaded", - self.accounts_loaded.swap(0, Ordering::Relaxed) as i64, + self.accounts_loaded.swap(0, Ordering::Relaxed), i64 ), ( @@ -544,7 +540,7 @@ impl ShrinkAncientStats { "num_slots_shrunk", self.shrink_stats .num_slots_shrunk - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( @@ -565,7 +561,7 @@ impl ShrinkAncientStats { "storage_read_elapsed", self.shrink_stats .storage_read_elapsed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( @@ -579,112 +575,108 @@ impl ShrinkAncientStats { "index_read_elapsed", self.shrink_stats .index_read_elapsed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "create_and_insert_store_elapsed", self.shrink_stats .create_and_insert_store_elapsed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "store_accounts_elapsed", self.shrink_stats .store_accounts_elapsed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "update_index_elapsed", self.shrink_stats .update_index_elapsed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "handle_reclaims_elapsed", self.shrink_stats .handle_reclaims_elapsed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "remove_old_stores_shrink_us", self.shrink_stats .remove_old_stores_shrink_us - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "rewrite_elapsed", - self.shrink_stats.rewrite_elapsed.swap(0, Ordering::Relaxed) as i64, + self.shrink_stats.rewrite_elapsed.swap(0, Ordering::Relaxed), i64 ), ( "unpackable_slots_count", self.shrink_stats .unpackable_slots_count - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "newest_alive_packed_count", self.shrink_stats .newest_alive_packed_count - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "drop_storage_entries_elapsed", self.shrink_stats .drop_storage_entries_elapsed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "accounts_removed", self.shrink_stats .accounts_removed - .swap(0, Ordering::Relaxed) as i64, + .swap(0, Ordering::Relaxed), i64 ), ( "bytes_removed", - self.shrink_stats.bytes_removed.swap(0, Ordering::Relaxed) as i64, + self.shrink_stats.bytes_removed.swap(0, Ordering::Relaxed), i64 ), ( "bytes_written", - self.shrink_stats.bytes_written.swap(0, Ordering::Relaxed) as i64, + self.shrink_stats.bytes_written.swap(0, Ordering::Relaxed), i64 ), ( "alive_accounts", - self.shrink_stats.alive_accounts.swap(0, Ordering::Relaxed) as i64, + self.shrink_stats.alive_accounts.swap(0, Ordering::Relaxed), i64 ), ( "dead_accounts", - self.shrink_stats.dead_accounts.swap(0, Ordering::Relaxed) as i64, + self.shrink_stats.dead_accounts.swap(0, Ordering::Relaxed), i64 ), ( "accounts_loaded", - self.shrink_stats.accounts_loaded.swap(0, Ordering::Relaxed) as i64, + self.shrink_stats.accounts_loaded.swap(0, Ordering::Relaxed), i64 ), ( "ancient_append_vecs_shrunk", - self.ancient_append_vecs_shrunk.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "random", - self.random_shrink.swap(0, Ordering::Relaxed) as i64, + self.ancient_append_vecs_shrunk.swap(0, Ordering::Relaxed), i64 ), + ("random", self.random_shrink.swap(0, Ordering::Relaxed), i64), ( "slots_eligible_to_shrink", self.slots_eligible_to_shrink.swap(0, Ordering::Relaxed), @@ -702,37 +694,33 @@ impl ShrinkAncientStats { ), ( "slots_considered", - self.slots_considered.swap(0, Ordering::Relaxed) as i64, + self.slots_considered.swap(0, Ordering::Relaxed), i64 ), ( "ancient_scanned", - self.ancient_scanned.swap(0, Ordering::Relaxed) as i64, - i64 - ), - ( - "total_us", - self.total_us.swap(0, Ordering::Relaxed) as i64, + self.ancient_scanned.swap(0, Ordering::Relaxed), i64 ), + ("total_us", self.total_us.swap(0, Ordering::Relaxed), i64), ( "bytes_ancient_created", - self.bytes_ancient_created.swap(0, Ordering::Relaxed) as i64, + self.bytes_ancient_created.swap(0, Ordering::Relaxed), i64 ), ( "bytes_from_must_shrink", - self.bytes_from_must_shrink.swap(0, Ordering::Relaxed) as i64, + self.bytes_from_must_shrink.swap(0, Ordering::Relaxed), i64 ), ( "bytes_from_smallest_storages", - self.bytes_from_smallest_storages.swap(0, Ordering::Relaxed) as i64, + self.bytes_from_smallest_storages.swap(0, Ordering::Relaxed), i64 ), ( "bytes_from_newest_storages", - self.bytes_from_newest_storages.swap(0, Ordering::Relaxed) as i64, + self.bytes_from_newest_storages.swap(0, Ordering::Relaxed), i64 ), ( diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index e1eb4b85144987..2f810b9598c6a8 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -23,7 +23,7 @@ use { }, std::{ hash::DefaultHasher, - iter::FromIterator, + iter::{self, FromIterator}, str::FromStr, sync::{atomic::AtomicBool, RwLock}, thread::{self, Builder, JoinHandle}, @@ -8146,3 +8146,115 @@ fn compute_merkle_root(hashes: impl IntoIterator) -> Hash { let hashes = hashes.into_iter().collect(); AccountsHasher::compute_merkle_root_recurse(hashes, MERKLE_FANOUT) } + +/// Test that `clean` reclaims old accounts when cleaning old storages +/// +/// When `clean` constructs candidates from old storages, pubkeys in these storages may have other +/// newer versions of the accounts in other newer storages *not* explicitly marked to be visited by +/// `clean`. In this case, `clean` should still reclaim the old versions of these accounts. +#[test] +fn test_clean_old_storages_with_reclaims_rooted() { + let accounts_db = AccountsDb::new_single_for_tests(); + let pubkey = Pubkey::new_unique(); + let old_slot = 11; + let new_slot = 22; + let slots = [old_slot, new_slot]; + for &slot in &slots { + let account = AccountSharedData::new(slot, 0, &Pubkey::new_unique()); + // store `pubkey` into multiple slots, and also store another unique pubkey + // to prevent the whole storage from being marked as dead by `clean`. + accounts_db.store_for_tests( + slot, + &[(&pubkey, &account), (&Pubkey::new_unique(), &account)], + ); + accounts_db.add_root_and_flush_write_cache(slot); + // ensure this slot is *not* in the dirty_stores or uncleaned_pubkeys, because we want to + // test cleaning *old* storages, i.e. when they aren't explicitly marked for cleaning + assert!(!accounts_db.dirty_stores.contains_key(&slot)); + assert!(!accounts_db.uncleaned_pubkeys.contains_key(&slot)); + } + + // add `old_slot` to the dirty stores list to mimic it being picked up as old + let old_storage = accounts_db + .storage + .get_slot_storage_entry_shrinking_in_progress_ok(old_slot) + .unwrap(); + accounts_db.dirty_stores.insert(old_slot, old_storage); + + // ensure the slot list for `pubkey` has both the old and new slots + let slot_list = accounts_db + .accounts_index + .get_bin(&pubkey) + .slot_list_mut(&pubkey, |slot_list| slot_list.clone()) + .unwrap(); + assert_eq!(slot_list.len(), slots.len()); + assert!(slot_list.iter().map(|(slot, _)| slot).eq(slots.iter())); + + // `clean` should now reclaim the account in `old_slot`, even though `new_slot` is not + // explicitly being cleaned + accounts_db.clean_accounts_for_tests(); + + // ensure we've reclaimed the account in `old_slot` + let slot_list = accounts_db + .accounts_index + .get_bin(&pubkey) + .slot_list_mut(&pubkey, |slot_list| slot_list.clone()) + .unwrap(); + assert_eq!(slot_list.len(), 1); + assert!(slot_list + .iter() + .map(|(slot, _)| slot) + .eq(iter::once(&new_slot))); +} + +/// Test that `clean` respects rooted vs unrooted slots w.r.t. reclaims +/// +/// When an account is in multiple slots, and the latest is unrooted, `clean` should *not* reclaim +/// all the rooted versions. +#[test] +fn test_clean_old_storages_with_reclaims_unrooted() { + let accounts_db = AccountsDb::new_single_for_tests(); + let pubkey = Pubkey::new_unique(); + let old_slot = 11; + let new_slot = 22; + let slots = [old_slot, new_slot]; + for &slot in &slots { + let account = AccountSharedData::new(slot, 0, &Pubkey::new_unique()); + // store `pubkey` into multiple slots, and also store another unique pubkey + // to prevent the whole storage from being marked as dead by `clean`. + accounts_db.store_for_tests( + slot, + &[(&pubkey, &account), (&Pubkey::new_unique(), &account)], + ); + accounts_db.calculate_accounts_delta_hash(slot); + // ensure this slot is in uncleaned_pubkeys (but not dirty_stores) so it'll be cleaned + assert!(!accounts_db.dirty_stores.contains_key(&slot)); + assert!(accounts_db.uncleaned_pubkeys.contains_key(&slot)); + } + + // only `old_slot` should be rooted, not `new_slot` + accounts_db.add_root_and_flush_write_cache(old_slot); + assert!(accounts_db.accounts_index.is_alive_root(old_slot)); + assert!(!accounts_db.accounts_index.is_alive_root(new_slot)); + + // ensure the slot list for `pubkey` has both the old and new slots + let slot_list = accounts_db + .accounts_index + .get_bin(&pubkey) + .slot_list_mut(&pubkey, |slot_list| slot_list.clone()) + .unwrap(); + assert_eq!(slot_list.len(), slots.len()); + assert!(slot_list.iter().map(|(slot, _)| slot).eq(slots.iter())); + + // `clean` should *not* reclaim the account in `old_slot` because `new_slot` is not a root + accounts_db.clean_accounts_for_tests(); + + // ensure we have NOT reclaimed the account in `old_slot` + let slot_list = accounts_db + .accounts_index + .get_bin(&pubkey) + .slot_list_mut(&pubkey, |slot_list| slot_list.clone()) + .unwrap(); + assert_eq!(slot_list.len(), slots.len()); + assert!(slot_list.iter().map(|(slot, _)| slot).eq(slots.iter())); +} diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index f90168aeeccd59..2aae2d80a21553 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -2021,6 +2021,16 @@ impl + Into> AccountsIndex { w_roots_tracker.uncleaned_roots.extend(roots); } + /// Removes `root` from `uncleaned_roots` and returns whether it was previously present + #[cfg(feature = "dev-context-only-utils")] + pub fn remove_uncleaned_root(&self, root: Slot) -> bool { + self.roots_tracker + .write() + .unwrap() + .uncleaned_roots + .remove(&root) + } + pub fn max_root_inclusive(&self) -> Slot { self.roots_tracker .read() diff --git a/accounts-db/src/accounts_index/in_mem_accounts_index.rs b/accounts-db/src/accounts_index/in_mem_accounts_index.rs index 0e0c5db9aa4afc..330611e82f641b 100644 --- a/accounts-db/src/accounts_index/in_mem_accounts_index.rs +++ b/accounts-db/src/accounts_index/in_mem_accounts_index.rs @@ -746,7 +746,7 @@ impl + Into> InMemAccountsIndex + Into> InMemAccountsIndex bool; + /// Notified when an account is updated at runtime, due to transaction activities fn notify_account_update( &self, diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 8e7b4faf926b75..27c41ccf27dcce 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -32,6 +32,9 @@ mod file_io; pub mod hardened_unpack; pub mod partitioned_rewards; pub mod pubkey_bins; +#[cfg(feature = "dev-context-only-utils")] +pub mod read_only_accounts_cache; +#[cfg(not(feature = "dev-context-only-utils"))] mod read_only_accounts_cache; mod rolling_bit_field; pub mod secondary_index; diff --git a/accounts-db/src/read_only_accounts_cache.rs b/accounts-db/src/read_only_accounts_cache.rs index 2431761bc5f535..a616a863535073 100644 --- a/accounts-db/src/read_only_accounts_cache.rs +++ b/accounts-db/src/read_only_accounts_cache.rs @@ -1,5 +1,7 @@ //! ReadOnlyAccountsCache used to store accounts, such as executable accounts, //! which can be large, loaded many times, and rarely change. +#[cfg(feature = "dev-context-only-utils")] +use qualifier_attr::qualifiers; use { dashmap::{mapref::entry::Entry, DashMap}, index_list::{Index, IndexList}, @@ -22,6 +24,7 @@ use { }, }; +#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] const CACHE_ENTRY_SIZE: usize = std::mem::size_of::() + 2 * std::mem::size_of::(); @@ -65,6 +68,7 @@ struct AtomicReadOnlyCacheStats { evictor_wakeup_count_productive: AtomicU64, } +#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] #[derive(Debug)] pub(crate) struct ReadOnlyAccountsCache { cache: Arc>, @@ -93,6 +97,7 @@ pub(crate) struct ReadOnlyAccountsCache { } impl ReadOnlyAccountsCache { + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) fn new( max_data_size_lo: usize, max_data_size_hi: usize, @@ -137,6 +142,7 @@ impl ReadOnlyAccountsCache { } } + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) fn load(&self, pubkey: Pubkey, slot: Slot) -> Option { let (account, load_us) = measure_us!({ let mut found = None; @@ -175,6 +181,7 @@ impl ReadOnlyAccountsCache { CACHE_ENTRY_SIZE + account.data().len() } + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) fn store(&self, pubkey: Pubkey, slot: Slot, account: AccountSharedData) { let measure_store = Measure::start(""); self.highest_slot_stored.fetch_max(slot, Ordering::Release); @@ -218,6 +225,7 @@ impl ReadOnlyAccountsCache { self.remove(pubkey) } + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) fn remove(&self, pubkey: Pubkey) -> Option { Self::do_remove(&pubkey, &self.cache, &self.queue, &self.data_size) } diff --git a/accounts-db/src/storable_accounts.rs b/accounts-db/src/storable_accounts.rs index 6304daf6002ba6..1843c53833d2c7 100644 --- a/accounts-db/src/storable_accounts.rs +++ b/accounts-db/src/storable_accounts.rs @@ -100,7 +100,8 @@ pub struct StorableAccountsCacher { /// abstract access to pubkey, account, slot, target_slot of either: /// a. (slot, &[&Pubkey, &ReadableAccount]) -/// b. (slot, &[&Pubkey, &ReadableAccount, Slot]) (we will use this later) +/// b. (slot, &[Pubkey, ReadableAccount]) +/// c. (slot, &[&Pubkey, &ReadableAccount, Slot]) (we will use this later) /// This trait avoids having to allocate redundant data when there is a duplicated slot parameter. /// All legacy callers do not have a unique slot per account to store. pub trait StorableAccounts<'a>: Sync { @@ -165,6 +166,26 @@ impl<'a: 'b, 'b> StorableAccounts<'a> for (Slot, &'b [(&'a Pubkey, &'a AccountSh } } +impl<'a: 'b, 'b> StorableAccounts<'a> for (Slot, &'b [(Pubkey, AccountSharedData)]) { + fn account( + &self, + index: usize, + mut callback: impl for<'local> FnMut(AccountForStorage<'local>) -> Ret, + ) -> Ret { + callback((&self.1[index].0, &self.1[index].1).into()) + } + fn slot(&self, _index: usize) -> Slot { + // per-index slot is not unique per slot when per-account slot is not included in the source data + self.target_slot() + } + fn target_slot(&self) -> Slot { + self.0 + } + fn len(&self) -> usize { + self.1.len() + } +} + /// holds slices of accounts being moved FROM a common source slot to 'target_slot' pub struct StorableAccountsBySlot<'a> { target_slot: Slot, diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 57f768e2773cd5..99722c7cf6082e 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -8,7 +8,9 @@ use { solana_client::connection_cache::ConnectionCache, solana_core::{ banking_stage::BankingStage, - banking_trace::{BankingPacketBatch, BankingTracer, BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT}, + banking_trace::{ + BankingPacketBatch, BankingTracer, Channels, BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT, + }, validator::BlockProductionMethod, }, solana_gossip::cluster_info::{ClusterInfo, Node}, @@ -440,9 +442,14 @@ fn main() { BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT, ))) .unwrap(); - let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); - let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); - let (gossip_vote_sender, gossip_vote_receiver) = banking_tracer.create_channel_gossip_vote(); + let Channels { + non_vote_sender, + non_vote_receiver, + tpu_vote_sender, + tpu_vote_receiver, + gossip_vote_sender, + gossip_vote_receiver, + } = banking_tracer.create_channels(false); let cluster_info = { let keypair = Arc::new(Keypair::new()); let node = Node::new_localhost_with_pubkey(&keypair.pubkey()); @@ -504,7 +511,7 @@ fn main() { timestamp(), ); non_vote_sender - .send(BankingPacketBatch::new((vec![packet_batch.clone()], None))) + .send(BankingPacketBatch::new(vec![packet_batch.clone()])) .unwrap(); } @@ -554,11 +561,6 @@ fn main() { bank = bank_forks.read().unwrap().working_bank(); insert_time.stop(); - // set cost tracker limits to MAX so it will not filter out TXs - bank.write_cost_tracker() - .unwrap() - .set_limits(u64::MAX, u64::MAX, u64::MAX); - assert!(poh_recorder.read().unwrap().bank().is_none()); poh_recorder .write() diff --git a/bench-streamer/src/main.rs b/bench-streamer/src/main.rs index cba5486fc5bcb9..f34c1e69cc0d9e 100644 --- a/bench-streamer/src/main.rs +++ b/bench-streamer/src/main.rs @@ -84,7 +84,7 @@ fn main() -> Result<()> { num_sockets = max(num_sockets, n.to_string().parse().expect("integer")); } - let num_producers: u64 = matches.value_of_t("num_producers").unwrap_or(4); + let num_producers: u64 = matches.value_of_t("num-producers").unwrap_or(4); let port = 0; let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index 857ae4cd2f7e20..eb2ab2540006e8 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -78,9 +78,13 @@ fn test_bench_tps_local_cluster(config: Config) { cluster.transfer(&cluster.funding_keypair, &faucet_pubkey, 100_000_000); - let client = Arc::new(cluster.build_tpu_quic_client().unwrap_or_else(|err| { - panic!("Could not create TpuClient with Quic Cache {err:?}"); - })); + let client = Arc::new( + cluster + .build_validator_tpu_quic_client(cluster.entry_point_info.pubkey()) + .unwrap_or_else(|err| { + panic!("Could not create TpuClient with Quic Cache {err:?}"); + }), + ); let lamports_per_account = 100; diff --git a/bench-vote/Cargo.toml b/bench-vote/Cargo.toml new file mode 100644 index 00000000000000..54fa6ec87f0014 --- /dev/null +++ b/bench-vote/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "solana-bench-vote" +publish = false +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +bincode = { workspace = true } +clap = { version = "3.1.5", features = ["cargo"] } +crossbeam-channel = { workspace = true } +solana-client = { workspace = true } +solana-connection-cache = { workspace = true } +solana-net-utils = { workspace = true } +solana-sdk = { workspace = true } +solana-streamer = { workspace = true } +solana-version = { workspace = true } +solana-vote-program = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bench-vote/src/main.rs b/bench-vote/src/main.rs new file mode 100644 index 00000000000000..998f4020139128 --- /dev/null +++ b/bench-vote/src/main.rs @@ -0,0 +1,315 @@ +#![allow(clippy::arithmetic_side_effects)] + +use { + clap::{crate_description, crate_name, Arg, Command}, + crossbeam_channel::unbounded, + solana_client::connection_cache::ConnectionCache, + solana_connection_cache::client_connection::ClientConnection, + solana_net_utils::bind_to_unspecified, + solana_sdk::{ + hash::Hash, message::Message, signature::Keypair, signer::Signer, transaction::Transaction, + }, + solana_streamer::{ + packet::PacketBatchRecycler, + streamer::{receiver, PacketBatchReceiver, StreamerReceiveStats}, + }, + solana_vote_program::{vote_instruction, vote_state::Vote}, + std::{ + cmp::max, + net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, + thread::{self, spawn, JoinHandle, Result}, + time::{Duration, Instant, SystemTime}, + }, +}; + +const SINK_REPORT_INTERVAL: Duration = Duration::from_secs(5); +const SINK_RECEIVE_TIMEOUT: Duration = Duration::from_secs(1); +const SOCKET_RECEIVE_TIMEOUT: Duration = Duration::from_secs(1); +const COALESCE_TIME: Duration = Duration::from_millis(1); + +fn sink( + exit: Arc, + received_size: Arc, + receiver: PacketBatchReceiver, + verbose: bool, +) -> JoinHandle<()> { + spawn(move || { + let mut last_report = Instant::now(); + while !exit.load(Ordering::Relaxed) { + if let Ok(packet_batch) = receiver.recv_timeout(SINK_RECEIVE_TIMEOUT) { + received_size.fetch_add(packet_batch.len(), Ordering::Relaxed); + } + + let count = received_size.load(Ordering::Relaxed); + + if verbose && last_report.elapsed() > SINK_REPORT_INTERVAL { + println!("Received txns count: {count}"); + last_report = Instant::now(); + } + } + }) +} + +const TRANSACTIONS_PER_THREAD: u64 = 1_000_000; // Number of transactions per thread + +fn main() -> Result<()> { + let matches = Command::new(crate_name!()) + .about(crate_description!()) + .version(solana_version::version!()) + .arg( + Arg::new("num-recv-sockets") + .long("num-recv-sockets") + .value_name("NUM") + .takes_value(true) + .help("Use NUM receive sockets"), + ) + .arg( + Arg::new("num-producers") + .long("num-producers") + .value_name("NUM") + .takes_value(true) + .help("Use this many producer threads."), + ) + .arg( + Arg::new("server-only") + .long("server-only") + .takes_value(false) + .help("Run the bench tool as a server only."), + ) + .arg( + Arg::new("client-only") + .long("client-only") + .takes_value(false) + .requires("server-address") + .help("Run the bench tool as a client only."), + ) + .arg( + Arg::with_name("server-address") + .short('n') + .long("server-address") + .value_name("HOST:PORT") + .takes_value(true) + .validator(|arg| solana_net_utils::is_host_port(arg.to_string())) + .help("The destination streamer address to which the client will send transactions to"), + ) + .arg( + Arg::new("use-connection-cache") + .long("use-connection-cache") + .takes_value(false) + .help("Use this many producer threads."), + ) + .arg( + Arg::new("verbose") + .long("verbose") + .takes_value(false) + .help("Show verbose messages."), + ) + .get_matches(); + + let mut num_sockets = 1usize; + if let Some(n) = matches.value_of("num-recv-sockets") { + num_sockets = max(num_sockets, n.to_string().parse().expect("integer")); + } + + let num_producers: u64 = matches.value_of_t("num-producers").unwrap_or(4); + + let use_connection_cache = matches.is_present("use-connection-cache"); + + let server_only = matches.is_present("server-only"); + let client_only = matches.is_present("client-only"); + let verbose = matches.is_present("verbose"); + + let destination = matches.is_present("server-address").then(|| { + let addr = matches + .value_of("server-address") + .expect("Destination must be set when --client-only is used"); + solana_net_utils::parse_host_port(addr).expect("Expecting a valid server address") + }); + + let port = destination.map_or(0, |addr| addr.port()); + let ip_addr = destination.map_or(IpAddr::V4(Ipv4Addr::UNSPECIFIED), |addr| addr.ip()); + + let (exit, read_threads, sink_threads, destination) = if !client_only { + let exit = Arc::new(AtomicBool::new(false)); + + let mut read_channels = Vec::new(); + let mut read_threads = Vec::new(); + let recycler = PacketBatchRecycler::default(); + let (port, read_sockets) = solana_net_utils::multi_bind_in_range( + ip_addr, + (port, port + num_sockets as u16), + num_sockets, + ) + .unwrap(); + let stats = Arc::new(StreamerReceiveStats::new("bench-streamer-test")); + for read in read_sockets { + read.set_read_timeout(Some(SOCKET_RECEIVE_TIMEOUT)).unwrap(); + + let (s_reader, r_reader) = unbounded(); + read_channels.push(r_reader); + read_threads.push(receiver( + "solRcvrBenStrmr".to_string(), + Arc::new(read), + exit.clone(), + s_reader, + recycler.clone(), + stats.clone(), + COALESCE_TIME, // coalesce + true, // use_pinned_memory + None, // in_vote_only_mode + false, // is_staked_service + )); + } + + let received_size = Arc::new(AtomicUsize::new(0)); + let sink_threads: Vec<_> = read_channels + .into_iter() + .map(|r_reader| sink(exit.clone(), received_size.clone(), r_reader, verbose)) + .collect(); + + let destination = SocketAddr::new(ip_addr, port); + println!("Running server at {destination:?}"); + ( + Some(exit), + Some(read_threads), + Some(sink_threads), + destination, + ) + } else { + (None, None, None, destination.unwrap()) + }; + + let start = SystemTime::now(); + + let producer_threads = + (!server_only).then(|| producer(destination, num_producers, use_connection_cache, verbose)); + + producer_threads + .into_iter() + .flatten() + .try_for_each(JoinHandle::join)?; + + if !server_only { + if let Some(exit) = exit { + exit.store(true, Ordering::Relaxed); + } + } else { + println!("To stop the server, please press ^C"); + } + + read_threads + .into_iter() + .flatten() + .try_for_each(JoinHandle::join)?; + sink_threads + .into_iter() + .flatten() + .try_for_each(JoinHandle::join)?; + + if !(server_only) { + let elapsed = start.elapsed().unwrap(); + let ftime = elapsed.as_nanos() as f64 / 1_000_000_000.0; + let fcount = (TRANSACTIONS_PER_THREAD * num_producers) as f64; + + println!( + "Performance: {:?}/s, count: {fcount}, time in second: {ftime}", + fcount / ftime + ); + } + Ok(()) +} + +#[derive(Clone)] +enum Transporter { + Cache(Arc), + DirectSocket(Arc), +} + +fn producer( + sock: SocketAddr, + num_producers: u64, + use_connection_cache: bool, + verbose: bool, +) -> Vec> { + println!("Running clients against {sock:?}"); + let transporter = if use_connection_cache { + Transporter::Cache(Arc::new(ConnectionCache::with_udp( + "connection_cache_vote_udp", + 1, // connection_pool_size + ))) + } else { + Transporter::DirectSocket(Arc::new(bind_to_unspecified().unwrap())) + }; + + let mut handles = vec![]; + + let current_slot: u64 = 0; + + let identity_keypair = Keypair::new(); // Replace with loaded keypair + + for _i in 0..num_producers { + let transporter = transporter.clone(); + let identity_keypair = identity_keypair.insecure_clone(); + handles.push(thread::spawn(move || { + // Generate and send transactions + for _j in 0..TRANSACTIONS_PER_THREAD { + // Create a vote instruction + let vote = Vote { + slots: vec![current_slot], // Voting for the current slot + hash: Hash::new_unique(), + timestamp: None, // Optional timestamp + }; + + let vote_instruction = vote_instruction::vote( + &identity_keypair.pubkey(), + &identity_keypair.pubkey(), + vote, + ); + + // Build the transaction + let message = Message::new(&[vote_instruction], Some(&identity_keypair.pubkey())); + + let recent_blockhash = Hash::new_unique(); + let transaction = Transaction::new(&[&identity_keypair], message, recent_blockhash); + + let serialized_transaction = bincode::serialize(&transaction).unwrap(); + + match &transporter { + Transporter::Cache(cache) => { + let connection = cache.get_connection(&sock); + + match connection.send_data(&serialized_transaction) { + Ok(_) => { + if verbose { + println!("Sent transaction successfully"); + } + } + Err(ex) => { + println!("Error sending transaction {ex:?}"); + } + } + } + Transporter::DirectSocket(socket) => { + match socket.send_to(&serialized_transaction, sock) { + Ok(_) => { + if verbose { + println!( + "Sent transaction via direct socket successfully {sock:?}" + ); + } + } + Err(ex) => { + println!("Error sending transaction {ex:?}"); + } + } + } + } + } + })); + } + handles +} diff --git a/builtins-default-costs/Cargo.toml b/builtins-default-costs/Cargo.toml index 9c0a1e0753a799..986ef59c9fcc7d 100644 --- a/builtins-default-costs/Cargo.toml +++ b/builtins-default-costs/Cargo.toml @@ -35,6 +35,7 @@ name = "solana_builtins_default_costs" [dev-dependencies] rand = "0.8.5" +static_assertions = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -44,6 +45,7 @@ frozen-abi = [ "dep:solana-frozen-abi", "solana-vote-program/frozen-abi", ] +dev-context-only-utils = [] [lints] workspace = true diff --git a/builtins-default-costs/src/lib.rs b/builtins-default-costs/src/lib.rs index 3150e799c42969..a1c4000a75499d 100644 --- a/builtins-default-costs/src/lib.rs +++ b/builtins-default-costs/src/lib.rs @@ -12,15 +12,68 @@ use { }, }; +#[derive(Clone)] +pub struct MigratingBuiltinCost { + native_cost: u64, + core_bpf_migration_feature: Pubkey, + // encoding positional information explicitly for migration feature item, + // its value must be correctly corresponding to this object's position + // in MIGRATING_BUILTINS_COSTS, otherwise a const validation + // `validate_position(MIGRATING_BUILTINS_COSTS)` will fail at compile time. + position: usize, +} + +#[derive(Clone)] +pub struct NotMigratingBuiltinCost { + native_cost: u64, +} + /// DEVELOPER: when a builtin is migrated to sbpf, please add its corresponding -/// migration feature ID to BUILTIN_INSTRUCTION_COSTS, so the builtin's default -/// cost can be determined properly based on feature status. +/// migration feature ID to BUILTIN_INSTRUCTION_COSTS, and move it from +/// NON_MIGRATING_BUILTINS_COSTS to MIGRATING_BUILTINS_COSTS, so the builtin's +/// default cost can be determined properly based on feature status. /// When migration completed, eg the feature gate is enabled everywhere, please -/// remove that builtin entry from BUILTIN_INSTRUCTION_COSTS. +/// remove that builtin entry from MIGRATING_BUILTINS_COSTS. #[derive(Clone)] -struct BuiltinCost { - native_cost: u64, - core_bpf_migration_feature: Option, +pub enum BuiltinCost { + Migrating(MigratingBuiltinCost), + NotMigrating(NotMigratingBuiltinCost), +} + +impl BuiltinCost { + pub fn native_cost(&self) -> u64 { + match self { + BuiltinCost::Migrating(MigratingBuiltinCost { native_cost, .. }) => *native_cost, + BuiltinCost::NotMigrating(NotMigratingBuiltinCost { native_cost }) => *native_cost, + } + } + + pub fn core_bpf_migration_feature(&self) -> Option<&Pubkey> { + match self { + BuiltinCost::Migrating(MigratingBuiltinCost { + core_bpf_migration_feature, + .. + }) => Some(core_bpf_migration_feature), + BuiltinCost::NotMigrating(_) => None, + } + } + + pub fn position(&self) -> Option { + match self { + BuiltinCost::Migrating(MigratingBuiltinCost { position, .. }) => Some(*position), + BuiltinCost::NotMigrating(_) => None, + } + } + + fn has_migrated(&self, feature_set: &FeatureSet) -> bool { + match self { + BuiltinCost::Migrating(MigratingBuiltinCost { + core_bpf_migration_feature, + .. + }) => feature_set.is_active(core_bpf_migration_feature), + BuiltinCost::NotMigrating(_) => false, + } + } } lazy_static! { @@ -33,100 +86,110 @@ lazy_static! { /// calculate the cost of a transaction which is used in replay to enforce /// block cost limits as of /// https://github.com/solana-labs/solana/issues/29595. - static ref BUILTIN_INSTRUCTION_COSTS: AHashMap = [ + static ref BUILTIN_INSTRUCTION_COSTS: AHashMap = + MIGRATING_BUILTINS_COSTS + .iter() + .chain(NON_MIGRATING_BUILTINS_COSTS.iter()) + .cloned() + .collect(); + // DO NOT ADD MORE ENTRIES TO THIS MAP +} + +/// DEVELOPER WARNING: please do not add new entry into MIGRATING_BUILTINS_COSTS or +/// NON_MIGRATING_BUILTINS_COSTS, do so will modify BUILTIN_INSTRUCTION_COSTS therefore +/// cause consensus failure. However, when a builtin started being migrated to core bpf, +/// it MUST be moved from NON_MIGRATING_BUILTINS_COSTS to MIGRATING_BUILTINS_COSTS, then +/// correctly furnishing `core_bpf_migration_feature`. +/// +#[allow(dead_code)] +const TOTAL_COUNT_BUILTS: usize = 12; +#[cfg(test)] +static_assertions::const_assert_eq!( + MIGRATING_BUILTINS_COSTS.len() + NON_MIGRATING_BUILTINS_COSTS.len(), + TOTAL_COUNT_BUILTS +); + +pub const MIGRATING_BUILTINS_COSTS: &[(Pubkey, BuiltinCost)] = &[ ( stake::id(), - BuiltinCost { + BuiltinCost::Migrating(MigratingBuiltinCost { native_cost: solana_stake_program::stake_instruction::DEFAULT_COMPUTE_UNITS, - core_bpf_migration_feature: Some(feature_set::migrate_stake_program_to_core_bpf::id()), - }, + core_bpf_migration_feature: feature_set::migrate_stake_program_to_core_bpf::id(), + position: 0, + }), ), ( config::id(), - BuiltinCost { + BuiltinCost::Migrating(MigratingBuiltinCost { native_cost: solana_config_program::config_processor::DEFAULT_COMPUTE_UNITS, - core_bpf_migration_feature: Some(feature_set::migrate_config_program_to_core_bpf::id()), - }, + core_bpf_migration_feature: feature_set::migrate_config_program_to_core_bpf::id(), + position: 1, + }), ), + ( + address_lookup_table::id(), + BuiltinCost::Migrating(MigratingBuiltinCost { + native_cost: solana_address_lookup_table_program::processor::DEFAULT_COMPUTE_UNITS, + core_bpf_migration_feature: + feature_set::migrate_address_lookup_table_program_to_core_bpf::id(), + position: 2, + }), + ), +]; + +pub const NON_MIGRATING_BUILTINS_COSTS: &[(Pubkey, BuiltinCost)] = &[ ( vote::id(), - BuiltinCost { + BuiltinCost::NotMigrating(NotMigratingBuiltinCost { native_cost: solana_vote_program::vote_processor::DEFAULT_COMPUTE_UNITS, - core_bpf_migration_feature: None, - }, + }), ), ( system_program::id(), - BuiltinCost { + BuiltinCost::NotMigrating(NotMigratingBuiltinCost { native_cost: solana_system_program::system_processor::DEFAULT_COMPUTE_UNITS, - core_bpf_migration_feature: None, - }, + }), ), ( compute_budget::id(), - BuiltinCost { + BuiltinCost::NotMigrating(NotMigratingBuiltinCost { native_cost: solana_compute_budget_program::DEFAULT_COMPUTE_UNITS, - core_bpf_migration_feature: None, - }, - ), - ( - address_lookup_table::id(), - BuiltinCost { - native_cost: solana_address_lookup_table_program::processor::DEFAULT_COMPUTE_UNITS, - core_bpf_migration_feature: Some( - feature_set::migrate_address_lookup_table_program_to_core_bpf::id(), - ), - }, + }), ), ( bpf_loader_upgradeable::id(), - BuiltinCost { + BuiltinCost::NotMigrating(NotMigratingBuiltinCost { native_cost: solana_bpf_loader_program::UPGRADEABLE_LOADER_COMPUTE_UNITS, - core_bpf_migration_feature: None, - }, + }), ), ( bpf_loader_deprecated::id(), - BuiltinCost { + BuiltinCost::NotMigrating(NotMigratingBuiltinCost { native_cost: solana_bpf_loader_program::DEPRECATED_LOADER_COMPUTE_UNITS, - core_bpf_migration_feature: None, - }, + }), ), ( bpf_loader::id(), - BuiltinCost { + BuiltinCost::NotMigrating(NotMigratingBuiltinCost { native_cost: solana_bpf_loader_program::DEFAULT_LOADER_COMPUTE_UNITS, - core_bpf_migration_feature: None, - }, + }), ), ( loader_v4::id(), - BuiltinCost { + BuiltinCost::NotMigrating(NotMigratingBuiltinCost { native_cost: solana_loader_v4_program::DEFAULT_COMPUTE_UNITS, - core_bpf_migration_feature: None, - }, + }), ), // Note: These are precompile, run directly in bank during sanitizing; ( secp256k1_program::id(), - BuiltinCost { - native_cost: 0, - core_bpf_migration_feature: None, - }, + BuiltinCost::NotMigrating(NotMigratingBuiltinCost { native_cost: 0 }), ), ( ed25519_program::id(), - BuiltinCost { - native_cost: 0, - core_bpf_migration_feature: None, - }, + BuiltinCost::NotMigrating(NotMigratingBuiltinCost { native_cost: 0 }), ), - // DO NOT ADD MORE ENTRIES TO THIS MAP - ] - .iter() - .cloned() - .collect(); -} +]; lazy_static! { /// A table of 256 booleans indicates whether the first `u8` of a Pubkey exists in @@ -148,28 +211,84 @@ pub fn get_builtin_instruction_cost<'a>( ) -> Option { BUILTIN_INSTRUCTION_COSTS .get(program_id) - .filter( - // Returns true if builtin program id has no core_bpf_migration_feature or feature is not activated; - // otherwise returns false because it's not considered as builtin - |builtin_cost| -> bool { - builtin_cost - .core_bpf_migration_feature - .map(|feature_id| !feature_set.is_active(&feature_id)) - .unwrap_or(true) - }, - ) - .map(|builtin_cost| builtin_cost.native_cost) + .filter(|builtin_cost| !builtin_cost.has_migrated(feature_set)) + .map(|builtin_cost| builtin_cost.native_cost()) } -#[inline] -pub fn is_builtin_program(program_id: &Pubkey) -> bool { - BUILTIN_INSTRUCTION_COSTS.contains_key(program_id) +pub enum BuiltinMigrationFeatureIndex { + NotBuiltin, + BuiltinNoMigrationFeature, + BuiltinWithMigrationFeature(usize), +} + +pub fn get_builtin_migration_feature_index(program_id: &Pubkey) -> BuiltinMigrationFeatureIndex { + BUILTIN_INSTRUCTION_COSTS.get(program_id).map_or( + BuiltinMigrationFeatureIndex::NotBuiltin, + |builtin_cost| { + builtin_cost.position().map_or( + BuiltinMigrationFeatureIndex::BuiltinNoMigrationFeature, + BuiltinMigrationFeatureIndex::BuiltinWithMigrationFeature, + ) + }, + ) +} + +/// const function validates `position` correctness at compile time. +#[allow(dead_code)] +const fn validate_position(migrating_builtins: &[(Pubkey, BuiltinCost)]) { + let mut index = 0; + while index < migrating_builtins.len() { + match migrating_builtins[index].1 { + BuiltinCost::Migrating(MigratingBuiltinCost { position, .. }) => assert!( + position == index, + "migration feture must exist and at correct position" + ), + BuiltinCost::NotMigrating(_) => { + panic!("migration feture must exist and at correct position") + } + } + index += 1; + } +} +const _: () = validate_position(MIGRATING_BUILTINS_COSTS); + +/// Helper function to return ref of migration feature Pubkey at position `index` +/// from MIGRATING_BUILTINS_COSTS +pub fn get_migration_feature_id(index: usize) -> &'static Pubkey { + MIGRATING_BUILTINS_COSTS + .get(index) + .expect("valid index of MIGRATING_BUILTINS_COSTS") + .1 + .core_bpf_migration_feature() + .expect("migrating builtin") +} + +#[cfg(feature = "dev-context-only-utils")] +pub fn get_migration_feature_position(feature_id: &Pubkey) -> usize { + MIGRATING_BUILTINS_COSTS + .iter() + .position(|(_, c)| c.core_bpf_migration_feature().expect("migrating builtin") == feature_id) + .unwrap() } #[cfg(test)] mod test { use super::*; + #[test] + fn test_const_builtin_cost_arrays() { + // sanity check to make sure built-ins are declared in the correct array + assert!(MIGRATING_BUILTINS_COSTS + .iter() + .enumerate() + .all(|(index, (_, c))| { + c.core_bpf_migration_feature().is_some() && c.position() == Some(index) + })); + assert!(NON_MIGRATING_BUILTINS_COSTS + .iter() + .all(|(_, c)| c.core_bpf_migration_feature().is_none())); + } + #[test] fn test_get_builtin_instruction_cost() { // use native cost if no migration planned @@ -181,15 +300,11 @@ mod test { // use native cost if migration is planned but not activated assert_eq!( Some(solana_stake_program::stake_instruction::DEFAULT_COMPUTE_UNITS), - get_builtin_instruction_cost(&solana_stake_program::id(), &FeatureSet::default()) + get_builtin_instruction_cost(&stake::id(), &FeatureSet::default()) ); // None if migration is planned and activated, in which case, it's no longer builtin - assert!(get_builtin_instruction_cost( - &solana_stake_program::id(), - &FeatureSet::all_enabled() - ) - .is_none()); + assert!(get_builtin_instruction_cost(&stake::id(), &FeatureSet::all_enabled()).is_none()); // None if not builtin assert!( @@ -200,4 +315,64 @@ mod test { .is_none() ); } + + #[test] + fn test_get_builtin_migration_feature_index() { + assert!(matches!( + get_builtin_migration_feature_index(&Pubkey::new_unique()), + BuiltinMigrationFeatureIndex::NotBuiltin + )); + assert!(matches!( + get_builtin_migration_feature_index(&compute_budget::id()), + BuiltinMigrationFeatureIndex::BuiltinNoMigrationFeature, + )); + let feature_index = get_builtin_migration_feature_index(&stake::id()); + assert!(matches!( + feature_index, + BuiltinMigrationFeatureIndex::BuiltinWithMigrationFeature(_) + )); + let BuiltinMigrationFeatureIndex::BuiltinWithMigrationFeature(feature_index) = + feature_index + else { + panic!("expect migrating builtin") + }; + assert_eq!( + get_migration_feature_id(feature_index), + &feature_set::migrate_stake_program_to_core_bpf::id() + ); + let feature_index = get_builtin_migration_feature_index(&config::id()); + assert!(matches!( + feature_index, + BuiltinMigrationFeatureIndex::BuiltinWithMigrationFeature(_) + )); + let BuiltinMigrationFeatureIndex::BuiltinWithMigrationFeature(feature_index) = + feature_index + else { + panic!("expect migrating builtin") + }; + assert_eq!( + get_migration_feature_id(feature_index), + &feature_set::migrate_config_program_to_core_bpf::id() + ); + let feature_index = get_builtin_migration_feature_index(&address_lookup_table::id()); + assert!(matches!( + feature_index, + BuiltinMigrationFeatureIndex::BuiltinWithMigrationFeature(_) + )); + let BuiltinMigrationFeatureIndex::BuiltinWithMigrationFeature(feature_index) = + feature_index + else { + panic!("expect migrating builtin") + }; + assert_eq!( + get_migration_feature_id(feature_index), + &feature_set::migrate_address_lookup_table_program_to_core_bpf::id() + ); + } + + #[test] + #[should_panic(expected = "valid index of MIGRATING_BUILTINS_COSTS")] + fn test_get_migration_feature_id_invalid_index() { + let _ = get_migration_feature_id(MIGRATING_BUILTINS_COSTS.len() + 1); + } } diff --git a/ci/README.md b/ci/README.md index 45ebd39e1d4a94..0d91fa2d60b996 100644 --- a/ci/README.md +++ b/ci/README.md @@ -122,7 +122,7 @@ Creating a "queue=cuda" agent follows the same process but additionally: 1. Run `az vm deallocate --resource-group ci --name XYZ` 1. Run `az vm generalize --resource-group ci --name XYZ` 1. Run `az image create --resource-group ci --source XYZ --name boilerplate` -1. Goto the `ci` resource group in the Azure portal and remove all resources +1. Go to the `ci` resource group in the Azure portal and remove all resources with the XYZ name in them ## Buildkite AWS CloudFormation Setup diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 3c5c8372aeaeb7..69ff4d0d9263ec 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -48,6 +48,7 @@ solana-remote-wallet = { workspace = true, features = ["default"] } solana-rpc-client = { workspace = true, features = ["default"] } solana-rpc-client-api = { workspace = true } solana-rpc-client-nonce-utils = { workspace = true, features = ["clap"] } +solana-sbpf = { workspace = true } solana-sdk = { workspace = true } solana-streamer = { workspace = true } solana-tps-client = { workspace = true } @@ -56,7 +57,6 @@ solana-transaction-status = { workspace = true } solana-udp-client = { workspace = true } solana-version = { workspace = true } solana-vote-program = { workspace = true } -solana_rbpf = { workspace = true } spl-memo = { workspace = true, features = ["no-entrypoint"] } thiserror = { workspace = true } tiny-bip39 = { workspace = true } diff --git a/cli/src/program.rs b/cli/src/program.rs index bed71f96470474..577ba91950c63b 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -42,7 +42,6 @@ use { solana_compute_budget::compute_budget::ComputeBudget, solana_feature_set::{FeatureSet, FEATURE_NAMES}, solana_program_runtime::invoke_context::InvokeContext, - solana_rbpf::{elf::Executable, verifier::RequisiteVerifier}, solana_remote_wallet::remote_wallet::RemoteWalletManager, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::{ @@ -52,6 +51,7 @@ use { request::MAX_MULTIPLE_ACCOUNTS, }, solana_rpc_client_nonce_utils::blockhash_query::BlockhashQuery, + solana_sbpf::{elf::Executable, verifier::RequisiteVerifier}, solana_sdk::{ account::Account, account_utils::StateMut, diff --git a/cli/src/program_v4.rs b/cli/src/program_v4.rs index 37093db5d73b91..026c096b201839 100644 --- a/cli/src/program_v4.rs +++ b/cli/src/program_v4.rs @@ -26,7 +26,6 @@ use { }, solana_compute_budget::compute_budget::ComputeBudget, solana_program_runtime::invoke_context::InvokeContext, - solana_rbpf::{elf::Executable, verifier::RequisiteVerifier}, solana_remote_wallet::remote_wallet::RemoteWalletManager, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::{ @@ -34,6 +33,7 @@ use { filter::{Memcmp, RpcFilterType}, request::MAX_MULTIPLE_ACCOUNTS, }, + solana_sbpf::{elf::Executable, verifier::RequisiteVerifier}, solana_sdk::{ account::Account, feature_set::{FeatureSet, FEATURE_NAMES}, diff --git a/compute-budget-instruction/Cargo.toml b/compute-budget-instruction/Cargo.toml index b8daccad80d7c5..8e5bdd0b1c9bf7 100644 --- a/compute-budget-instruction/Cargo.toml +++ b/compute-budget-instruction/Cargo.toml @@ -26,6 +26,7 @@ name = "solana_compute_budget_instruction" bincode = { workspace = true } criterion = { workspace = true } rand = { workspace = true } +solana-builtins-default-costs = { workspace = true, features = ["dev-context-only-utils"] } solana-program = { workspace = true } [package.metadata.docs.rs] diff --git a/compute-budget-instruction/src/builtin_programs_filter.rs b/compute-budget-instruction/src/builtin_programs_filter.rs index 6dd81844b5df59..1525dd1f2cfc61 100644 --- a/compute-budget-instruction/src/builtin_programs_filter.rs +++ b/compute-budget-instruction/src/builtin_programs_filter.rs @@ -1,5 +1,7 @@ use { - solana_builtins_default_costs::{is_builtin_program, MAYBE_BUILTIN_KEY}, + solana_builtins_default_costs::{ + get_builtin_migration_feature_index, BuiltinMigrationFeatureIndex, MAYBE_BUILTIN_KEY, + }, solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey}, }; @@ -10,6 +12,12 @@ pub const FILTER_SIZE: u8 = (PACKET_DATA_SIZE / core::mem::size_of::()) pub(crate) enum ProgramKind { NotBuiltin, Builtin, + // Builtin program maybe in process of being migrated to core bpf, + // if core_bpf_migration_feature is activated, then the migration has + // completed and it should no longer be considered as builtin + MigratingBuiltin { + core_bpf_migration_feature_index: usize, + }, } pub(crate) struct BuiltinProgramsFilter { @@ -40,17 +48,24 @@ impl BuiltinProgramsFilter { return ProgramKind::NotBuiltin; } - if is_builtin_program(program_id) { - ProgramKind::Builtin - } else { - ProgramKind::NotBuiltin + match get_builtin_migration_feature_index(program_id) { + BuiltinMigrationFeatureIndex::NotBuiltin => ProgramKind::NotBuiltin, + BuiltinMigrationFeatureIndex::BuiltinNoMigrationFeature => ProgramKind::Builtin, + BuiltinMigrationFeatureIndex::BuiltinWithMigrationFeature( + core_bpf_migration_feature_index, + ) => ProgramKind::MigratingBuiltin { + core_bpf_migration_feature_index, + }, } } } #[cfg(test)] mod test { - use super::*; + use { + super::*, solana_builtins_default_costs::get_migration_feature_position, + solana_sdk::feature_set, + }; const DUMMY_PROGRAM_ID: &str = "dummmy1111111111111111111111111111111111111"; @@ -92,6 +107,32 @@ mod test { test_store.get_program_kind(index, &solana_sdk::compute_budget::id()), ProgramKind::Builtin, ); + + // migrating builtins + for (migrating_builtin_pubkey, migration_feature_id) in [ + ( + solana_sdk::stake::program::id(), + feature_set::migrate_stake_program_to_core_bpf::id(), + ), + ( + solana_sdk::config::program::id(), + feature_set::migrate_config_program_to_core_bpf::id(), + ), + ( + solana_sdk::address_lookup_table::program::id(), + feature_set::migrate_address_lookup_table_program_to_core_bpf::id(), + ), + ] { + index += 1; + assert_eq!( + test_store.get_program_kind(index, &migrating_builtin_pubkey), + ProgramKind::MigratingBuiltin { + core_bpf_migration_feature_index: get_migration_feature_position( + &migration_feature_id + ), + } + ); + } } #[test] diff --git a/compute-budget-instruction/src/compute_budget_instruction_details.rs b/compute-budget-instruction/src/compute_budget_instruction_details.rs index dba59b8f343d13..e0ef6341694167 100644 --- a/compute-budget-instruction/src/compute_budget_instruction_details.rs +++ b/compute-budget-instruction/src/compute_budget_instruction_details.rs @@ -3,6 +3,7 @@ use { builtin_programs_filter::{BuiltinProgramsFilter, ProgramKind}, compute_budget_program_id_filter::ComputeBudgetProgramIdFilter, }, + solana_builtins_default_costs::{get_migration_feature_id, MIGRATING_BUILTINS_COSTS}, solana_compute_budget::compute_budget_limits::*, solana_sdk::{ borsh1::try_from_slice_unchecked, @@ -17,6 +18,24 @@ use { std::num::NonZeroU32, }; +#[cfg_attr(test, derive(Eq, PartialEq))] +#[cfg_attr(feature = "dev-context-only-utils", derive(Clone))] +#[derive(Debug)] +struct MigrationBuiltinFeatureCounter { + // The vector of counters, matching the size of the static vector MIGRATION_FEATURE_IDS, + // each counter representing the number of times its corresponding feature ID is + // referenced in this transaction. + migrating_builtin: [u16; MIGRATING_BUILTINS_COSTS.len()], +} + +impl Default for MigrationBuiltinFeatureCounter { + fn default() -> Self { + Self { + migrating_builtin: [0; MIGRATING_BUILTINS_COSTS.len()], + } + } +} + #[cfg_attr(test, derive(Eq, PartialEq))] #[cfg_attr(feature = "dev-context-only-utils", derive(Clone))] #[derive(Default, Debug)] @@ -29,8 +48,9 @@ pub struct ComputeBudgetInstructionDetails { requested_loaded_accounts_data_size_limit: Option<(u8, u32)>, num_non_compute_budget_instructions: u16, // Additional builtin program counters - num_builtin_instructions: u16, + num_non_migratable_builtin_instructions: u16, num_non_builtin_instructions: u16, + migrating_builtin_feature_counters: MigrationBuiltinFeatureCounter, } impl ComputeBudgetInstructionDetails { @@ -61,7 +81,8 @@ impl ComputeBudgetInstructionDetails { match filter.get_program_kind(instruction.program_id_index as usize, program_id) { ProgramKind::Builtin => { saturating_add_assign!( - compute_budget_instruction_details.num_builtin_instructions, + compute_budget_instruction_details + .num_non_migratable_builtin_instructions, 1 ); } @@ -71,6 +92,20 @@ impl ComputeBudgetInstructionDetails { 1 ); } + ProgramKind::MigratingBuiltin { + core_bpf_migration_feature_index, + } => { + saturating_add_assign!( + *compute_budget_instruction_details + .migrating_builtin_feature_counters + .migrating_builtin + .get_mut(core_bpf_migration_feature_index) + .expect( + "migrating feature index within range of MIGRATION_FEATURE_IDS" + ), + 1 + ); + } } } } @@ -175,10 +210,26 @@ impl ComputeBudgetInstructionDetails { fn calculate_default_compute_unit_limit(&self, feature_set: &FeatureSet) -> u32 { if feature_set.is_active(&feature_set::reserve_minimal_cus_for_builtin_instructions::id()) { - u32::from(self.num_builtin_instructions) + // evaluate if any builtin has migrated with feature_set + let (num_migrated, num_not_migrated) = self + .migrating_builtin_feature_counters + .migrating_builtin + .iter() + .enumerate() + .fold((0, 0), |(migrated, not_migrated), (index, count)| { + if *count > 0 && feature_set.is_active(get_migration_feature_id(index)) { + (migrated + count, not_migrated) + } else { + (migrated, not_migrated + count) + } + }); + + u32::from(self.num_non_migratable_builtin_instructions) + .saturating_add(u32::from(num_not_migrated)) .saturating_mul(MAX_BUILTIN_ALLOCATION_COMPUTE_UNIT_LIMIT) .saturating_add( u32::from(self.num_non_builtin_instructions) + .saturating_add(u32::from(num_migrated)) .saturating_mul(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT), ) } else { @@ -192,6 +243,7 @@ impl ComputeBudgetInstructionDetails { mod test { use { super::*, + solana_builtins_default_costs::get_migration_feature_position, solana_sdk::{ instruction::Instruction, message::Message, @@ -221,7 +273,7 @@ mod test { let expected_details = Ok(ComputeBudgetInstructionDetails { requested_heap_size: Some((1, 40 * 1024)), num_non_compute_budget_instructions: 2, - num_builtin_instructions: 1, + num_non_migratable_builtin_instructions: 1, num_non_builtin_instructions: 2, ..ComputeBudgetInstructionDetails::default() }); @@ -279,7 +331,7 @@ mod test { let expected_details = Ok(ComputeBudgetInstructionDetails { requested_compute_unit_price: Some((1, u64::MAX)), num_non_compute_budget_instructions: 2, - num_builtin_instructions: 1, + num_non_migratable_builtin_instructions: 1, num_non_builtin_instructions: 2, ..ComputeBudgetInstructionDetails::default() }); @@ -309,7 +361,7 @@ mod test { let expected_details = Ok(ComputeBudgetInstructionDetails { requested_loaded_accounts_data_size_limit: Some((1, u32::MAX)), num_non_compute_budget_instructions: 2, - num_builtin_instructions: 1, + num_non_migratable_builtin_instructions: 1, num_non_builtin_instructions: 2, ..ComputeBudgetInstructionDetails::default() }); @@ -336,7 +388,7 @@ mod test { let mut feature_set = FeatureSet::default(); let ComputeBudgetInstructionDetails { num_non_compute_budget_instructions, - num_builtin_instructions, + num_non_migratable_builtin_instructions, num_non_builtin_instructions, .. } = *instruction_details; @@ -346,7 +398,8 @@ mod test { 0, ); u32::from(num_non_builtin_instructions) * DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - + u32::from(num_builtin_instructions) * MAX_BUILTIN_ALLOCATION_COMPUTE_UNIT_LIMIT + + u32::from(num_non_migratable_builtin_instructions) + * MAX_BUILTIN_ALLOCATION_COMPUTE_UNIT_LIMIT } else { u32::from(num_non_compute_budget_instructions) * DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT }; @@ -370,7 +423,7 @@ mod test { // no compute-budget instructions, all default ComputeBudgetLimits except cu-limit let instruction_details = ComputeBudgetInstructionDetails { num_non_compute_budget_instructions: 4, - num_builtin_instructions: 1, + num_non_migratable_builtin_instructions: 1, num_non_builtin_instructions: 3, ..ComputeBudgetInstructionDetails::default() }; @@ -521,4 +574,87 @@ mod test { ); } } + + #[test] + fn test_builtin_program_migration() { + let tx = build_sanitized_transaction(&[ + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), + solana_sdk::stake::instruction::delegate_stake( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + &Pubkey::new_unique(), + ), + ]); + let feature_id_index = + get_migration_feature_position(&feature_set::migrate_stake_program_to_core_bpf::id()); + let mut expected_details = ComputeBudgetInstructionDetails { + num_non_compute_budget_instructions: 2, + num_non_builtin_instructions: 1, + ..ComputeBudgetInstructionDetails::default() + }; + expected_details + .migrating_builtin_feature_counters + .migrating_builtin[feature_id_index] = 1; + let expected_details = Ok(expected_details); + let details = + ComputeBudgetInstructionDetails::try_from(SVMMessage::program_instructions_iter(&tx)); + assert_eq!(details, expected_details); + let details = details.unwrap(); + + // reserve_minimal_cus_for_builtin_instructions: false; + // migrate_stake_program_to_core_bpf: false; + // expect: 1 bpf ix, 1 non-compute-budget builtin, cu-limit = 2 * 200K + let mut feature_set = FeatureSet::default(); + let cu_limits = details.sanitize_and_convert_to_compute_budget_limits(&feature_set); + assert_eq!( + cu_limits, + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT * 2, + ..ComputeBudgetLimits::default() + }) + ); + + // reserve_minimal_cus_for_builtin_instructions: true; + // migrate_stake_program_to_core_bpf: false; + // expect: 1 bpf ix, 1 non-compute-budget builtin, cu-limit = 200K + 3K + feature_set.activate( + &feature_set::reserve_minimal_cus_for_builtin_instructions::id(), + 0, + ); + let cu_limits = details.sanitize_and_convert_to_compute_budget_limits(&feature_set); + assert_eq!( + cu_limits, + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + + MAX_BUILTIN_ALLOCATION_COMPUTE_UNIT_LIMIT, + ..ComputeBudgetLimits::default() + }) + ); + + // reserve_minimal_cus_for_builtin_instructions: true; + // migrate_stake_program_to_core_bpf: true; + // expect: 2 bpf ix, cu-limit = 2 * 200K + feature_set.activate(&feature_set::migrate_stake_program_to_core_bpf::id(), 0); + let cu_limits = details.sanitize_and_convert_to_compute_budget_limits(&feature_set); + assert_eq!( + cu_limits, + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT * 2, + ..ComputeBudgetLimits::default() + }) + ); + + // reserve_minimal_cus_for_builtin_instructions: false; + // migrate_stake_program_to_core_bpf: false; + // expect: 1 bpf ix, 1 non-compute-budget builtin, cu-limit = 2 * 200K + feature_set.deactivate(&feature_set::reserve_minimal_cus_for_builtin_instructions::id()); + let cu_limits = details.sanitize_and_convert_to_compute_budget_limits(&feature_set); + assert_eq!( + cu_limits, + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT * 2, + ..ComputeBudgetLimits::default() + }) + ); + } } diff --git a/core/Cargo.toml b/core/Cargo.toml index bba797068b58f7..0143180ced5b4d 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -120,6 +120,7 @@ solana-program-runtime = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } solana-stake-program = { workspace = true } solana-system-program = { workspace = true } +solana-unified-scheduler-logic = { workspace = true } solana-unified-scheduler-pool = { workspace = true, features = [ "dev-context-only-utils", ] } diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 0f449719ce34cb..5d764f4b7c1d44 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -2,7 +2,7 @@ #![feature(test)] use { - solana_core::validator::BlockProductionMethod, + solana_core::{banking_trace::Channels, validator::BlockProductionMethod}, solana_vote_program::{vote_state::TowerSync, vote_transaction::new_tower_sync_transaction}, }; @@ -211,9 +211,14 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { genesis_config.ticks_per_slot = 10_000; let banking_tracer = BankingTracer::new_disabled(); - let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); - let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); - let (gossip_vote_sender, gossip_vote_receiver) = banking_tracer.create_channel_gossip_vote(); + let Channels { + non_vote_sender, + non_vote_receiver, + tpu_vote_sender, + tpu_vote_receiver, + gossip_vote_sender, + gossip_vote_receiver, + } = banking_tracer.create_channels(false); let mut bank = Bank::new_for_benches(&genesis_config); // Allow arbitrary transaction processing time for the purposes of this bench @@ -319,16 +324,14 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { let mut sent = 0; if let Some(vote_packets) = &vote_packets { tpu_vote_sender - .send(BankingPacketBatch::new(( + .send(BankingPacketBatch::new( vote_packets[start..start + chunk_len].to_vec(), - None, - ))) + )) .unwrap(); gossip_vote_sender - .send(BankingPacketBatch::new(( + .send(BankingPacketBatch::new( vote_packets[start..start + chunk_len].to_vec(), - None, - ))) + )) .unwrap(); } for v in verified[start..start + chunk_len].chunks(chunk_len / num_threads) { @@ -343,7 +346,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { sent += xv.len(); } non_vote_sender - .send(BankingPacketBatch::new((v.to_vec(), None))) + .send(BankingPacketBatch::new(v.to_vec())) .unwrap(); } diff --git a/core/benches/banking_trace.rs b/core/benches/banking_trace.rs index fb93deebc17ae2..66bfc84c630436 100644 --- a/core/benches/banking_trace.rs +++ b/core/benches/banking_trace.rs @@ -7,7 +7,7 @@ use { for_test::{ drop_and_clean_temp_dir_unless_suppressed, sample_packet_batch, terminate_tracer, }, - receiving_loop_with_minimized_sender_overhead, BankingPacketBatch, BankingTracer, + receiving_loop_with_minimized_sender_overhead, BankingPacketBatch, BankingTracer, Channels, TraceError, TracerThreadResult, BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT, }, std::{ @@ -35,7 +35,11 @@ fn black_box_packet_batch(packet_batch: BankingPacketBatch) -> TracerThreadResul fn bench_banking_tracer_main_thread_overhead_noop_baseline(bencher: &mut Bencher) { let exit = Arc::::default(); let tracer = BankingTracer::new_disabled(); - let (non_vote_sender, non_vote_receiver) = tracer.create_channel_non_vote(); + let Channels { + non_vote_sender, + non_vote_receiver, + .. + } = tracer.create_channels(false); let exit_for_dummy_thread = exit.clone(); let dummy_main_thread = thread::spawn(move || { @@ -64,7 +68,11 @@ fn bench_banking_tracer_main_thread_overhead_under_peak_write(bencher: &mut Benc BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT, ))) .unwrap(); - let (non_vote_sender, non_vote_receiver) = tracer.create_channel_non_vote(); + let Channels { + non_vote_sender, + non_vote_receiver, + .. + } = tracer.create_channels(false); let exit_for_dummy_thread = exit.clone(); let dummy_main_thread = thread::spawn(move || { @@ -101,7 +109,11 @@ fn bench_banking_tracer_main_thread_overhead_under_sustained_write(bencher: &mut 1024 * 1024, // cause more frequent trace file rotation ))) .unwrap(); - let (non_vote_sender, non_vote_receiver) = tracer.create_channel_non_vote(); + let Channels { + non_vote_sender, + non_vote_receiver, + .. + } = tracer.create_channels(false); let exit_for_dummy_thread = exit.clone(); let dummy_main_thread = thread::spawn(move || { @@ -142,7 +154,11 @@ fn bench_banking_tracer_background_thread_throughput(bencher: &mut Bencher) { let (tracer, tracer_thread) = BankingTracer::new(Some((&path, exit.clone(), 50 * 1024 * 1024))).unwrap(); - let (non_vote_sender, non_vote_receiver) = tracer.create_channel_non_vote(); + let Channels { + non_vote_sender, + non_vote_receiver, + .. + } = tracer.create_channels(false); let dummy_main_thread = thread::spawn(move || { receiving_loop_with_minimized_sender_overhead::<_, TraceError, 0>( diff --git a/core/benches/forwarder.rs b/core/benches/forwarder.rs index 10a050f3d97d4b..80f401d9f5c834 100644 --- a/core/benches/forwarder.rs +++ b/core/benches/forwarder.rs @@ -3,15 +3,12 @@ extern crate test; use { itertools::Itertools, solana_client::connection_cache::ConnectionCache, - solana_core::{ - banking_stage::{ - forwarder::Forwarder, - leader_slot_metrics::LeaderSlotMetricsTracker, - unprocessed_packet_batches::{DeserializedPacket, UnprocessedPacketBatches}, - unprocessed_transaction_storage::{ThreadType, UnprocessedTransactionStorage}, - BankingStageStats, - }, - tracer_packet_stats::TracerPacketStats, + solana_core::banking_stage::{ + forwarder::Forwarder, + leader_slot_metrics::LeaderSlotMetricsTracker, + unprocessed_packet_batches::{DeserializedPacket, UnprocessedPacketBatches}, + unprocessed_transaction_storage::{ThreadType, UnprocessedTransactionStorage}, + BankingStageStats, }, solana_gossip::cluster_info::{ClusterInfo, Node}, solana_ledger::{ @@ -38,7 +35,6 @@ struct BenchSetup { unprocessed_packet_batches: UnprocessedTransactionStorage, tracker: LeaderSlotMetricsTracker, stats: BankingStageStats, - tracer_stats: TracerPacketStats, } fn setup(num_packets: usize, contentious_transaction: bool) -> BenchSetup { @@ -88,7 +84,6 @@ fn setup(num_packets: usize, contentious_transaction: bool) -> BenchSetup { transaction.message.account_keys[0] = solana_sdk::pubkey::Pubkey::new_unique(); } let mut packet = Packet::from_data(None, transaction).unwrap(); - packet.meta_mut().set_tracer(true); packet.meta_mut().set_from_staked_node(true); DeserializedPacket::new(packet).unwrap() }) @@ -118,7 +113,6 @@ fn setup(num_packets: usize, contentious_transaction: bool) -> BenchSetup { unprocessed_packet_batches, tracker: LeaderSlotMetricsTracker::new(0), stats: BankingStageStats::default(), - tracer_stats: TracerPacketStats::new(0), } } @@ -132,19 +126,12 @@ fn bench_forwarder_handle_forwading_contentious_transaction(bencher: &mut Benche mut unprocessed_packet_batches, mut tracker, stats, - mut tracer_stats, } = setup(num_packets, true); // hold packets so they can be reused for benching let hold = true; bencher.iter(|| { - forwarder.handle_forwarding( - &mut unprocessed_packet_batches, - hold, - &mut tracker, - &stats, - &mut tracer_stats, - ); + forwarder.handle_forwarding(&mut unprocessed_packet_batches, hold, &mut tracker, &stats); // reset packet.forwarded flag to reuse `unprocessed_packet_batches` if let UnprocessedTransactionStorage::LocalTransactionStorage(unprocessed_packets) = &mut unprocessed_packet_batches @@ -169,19 +156,12 @@ fn bench_forwarder_handle_forwading_parallel_transactions(bencher: &mut Bencher) mut unprocessed_packet_batches, mut tracker, stats, - mut tracer_stats, } = setup(num_packets, false); // hold packets so they can be reused for benching let hold = true; bencher.iter(|| { - forwarder.handle_forwarding( - &mut unprocessed_packet_batches, - hold, - &mut tracker, - &stats, - &mut tracer_stats, - ); + forwarder.handle_forwarding(&mut unprocessed_packet_batches, hold, &mut tracker, &stats); // reset packet.forwarded flag to reuse `unprocessed_packet_batches` if let UnprocessedTransactionStorage::LocalTransactionStorage(unprocessed_packets) = &mut unprocessed_packet_batches diff --git a/core/benches/sigverify_stage.rs b/core/benches/sigverify_stage.rs index 3f11cc150574d3..e29710ef40b664 100644 --- a/core/benches/sigverify_stage.rs +++ b/core/benches/sigverify_stage.rs @@ -23,7 +23,6 @@ use { }, solana_sdk::{ hash::Hash, - packet::PacketFlags, signature::{Keypair, Signer}, system_transaction, }, @@ -58,7 +57,7 @@ fn run_bench_packet_discard(num_ips: usize, bencher: &mut Bencher) { info!("total packets: {}", total); bencher.iter(move || { - SigVerifyStage::discard_excess_packets(&mut batches, 10_000, |_| ()); + SigVerifyStage::discard_excess_packets(&mut batches, 10_000); let mut num_packets = 0; for batch in batches.iter_mut() { for p in batch.iter_mut() { @@ -105,7 +104,7 @@ fn bench_packet_discard_mixed_senders(bencher: &mut Bencher) { } } bencher.iter(move || { - SigVerifyStage::discard_excess_packets(&mut batches, 10_000, |_| ()); + SigVerifyStage::discard_excess_packets(&mut batches, 10_000); let mut num_packets = 0; for batch in batches.iter_mut() { for packet in batch.iter_mut() { @@ -171,30 +170,25 @@ fn bench_sigverify_stage(bencher: &mut Bencher, use_same_tx: bool) { ); let mut sent_len = 0; - for mut batch in batches.into_iter() { + for batch in batches.into_iter() { sent_len += batch.len(); - batch - .iter_mut() - .for_each(|packet| packet.meta_mut().flags |= PacketFlags::TRACER_PACKET); packet_s.send(batch).unwrap(); } let mut received = 0; - let mut total_tracer_packets_received_in_sigverify_stage = 0; - trace!("sent: {}", sent_len); + let expected = if use_same_tx { 1 } else { sent_len }; + trace!("sent: {}, expected: {}", sent_len, expected); loop { - if let Ok(message) = verified_r.recv_timeout(Duration::from_millis(10)) { - let (verifieds, tracer_packet_stats) = (&message.0, message.1.as_ref().unwrap()); + if let Ok(verifieds) = verified_r.recv_timeout(Duration::from_millis(10)) { received += verifieds.iter().map(|batch| batch.len()).sum::(); - total_tracer_packets_received_in_sigverify_stage += - tracer_packet_stats.total_tracer_packets_received_in_sigverify_stage; - test::black_box(message); - if total_tracer_packets_received_in_sigverify_stage >= sent_len { + test::black_box(verifieds); + if received >= expected { break; } } } trace!("received: {}", received); }); + // This will wait for all packets to make it through sigverify. stage.join().unwrap(); } diff --git a/core/src/banking_simulation.rs b/core/src/banking_simulation.rs index 6e5113ded67336..82a4e5e94a3e76 100644 --- a/core/src/banking_simulation.rs +++ b/core/src/banking_simulation.rs @@ -3,8 +3,9 @@ use { crate::{ banking_stage::{BankingStage, LikeClusterInfo}, banking_trace::{ - BankingPacketBatch, BankingTracer, ChannelLabel, TimedTracedEvent, TracedEvent, - TracedSender, TracerThread, BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT, BASENAME, + BankingPacketBatch, BankingTracer, ChannelLabel, Channels, TimedTracedEvent, + TracedEvent, TracedSender, TracerThread, BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT, + BASENAME, }, validator::BlockProductionMethod, }, @@ -488,8 +489,7 @@ impl SimulatorLoop { .bank_forks .read() .unwrap() - .working_bank_with_scheduler() - .clone_with_scheduler(); + .working_bank_with_scheduler(); self.poh_recorder .write() .unwrap() @@ -676,11 +676,7 @@ impl BankingSimulator { let parent_slot = self.parent_slot().unwrap(); let mut packet_batches_by_time = self.banking_trace_events.packet_batches_by_time; let freeze_time_by_slot = self.banking_trace_events.freeze_time_by_slot; - let bank = bank_forks - .read() - .unwrap() - .working_bank_with_scheduler() - .clone_with_scheduler(); + let bank = bank_forks.read().unwrap().working_bank_with_scheduler(); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); assert_eq!(parent_slot, bank.slot()); @@ -758,9 +754,14 @@ impl BankingSimulator { BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT, ); - let (non_vote_sender, non_vote_receiver) = retracer.create_channel_non_vote(); - let (tpu_vote_sender, tpu_vote_receiver) = retracer.create_channel_tpu_vote(); - let (gossip_vote_sender, gossip_vote_receiver) = retracer.create_channel_gossip_vote(); + let Channels { + non_vote_sender, + non_vote_receiver, + tpu_vote_sender, + tpu_vote_receiver, + gossip_vote_sender, + gossip_vote_receiver, + } = retracer.create_channels(false); let connection_cache = Arc::new(ConnectionCache::new("connection_cache_sim")); let (replay_vote_sender, _replay_vote_receiver) = unbounded(); @@ -827,8 +828,7 @@ impl BankingSimulator { let timed_batches_to_send = packet_batches_by_time.split_off(&base_event_time); let batch_and_tx_counts = timed_batches_to_send .values() - .map(|(_label, batches_with_stats)| { - let batches = &batches_with_stats.0; + .map(|(_label, batches)| { ( batches.len(), batches.iter().map(|batch| batch.len()).sum::(), diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 49ccdb6ae15eff..4dfc6bc8e5c733 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -25,7 +25,6 @@ use { }, }, banking_trace::BankingPacketReceiver, - tracer_packet_stats::TracerPacketStats, validator::BlockProductionMethod, }, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, @@ -52,6 +51,7 @@ use { time::{Duration, Instant}, }, transaction_scheduler::{ + prio_graph_scheduler::PrioGraphSchedulerConfig, receive_and_buffer::SanitizedTransactionReceiveAndBuffer, transaction_state_container::TransactionStateContainer, }, @@ -319,8 +319,6 @@ pub enum ForwardOption { #[derive(Debug, Default)] pub struct FilterForwardingResults { pub(crate) total_forwardable_packets: usize, - pub(crate) total_tracer_packets_in_buffer: usize, - pub(crate) total_forwardable_tracer_packets: usize, pub(crate) total_dropped_packets: usize, pub(crate) total_packet_conversion_us: u64, pub(crate) total_filter_packets_us: u64, @@ -621,7 +619,11 @@ impl BankingStage { bank_forks.clone(), forwarder.is_some(), ); - let scheduler = PrioGraphScheduler::new(work_senders, finished_work_receiver); + let scheduler = PrioGraphScheduler::new( + work_senders, + finished_work_receiver, + PrioGraphSchedulerConfig::default(), + ); let scheduler_controller = SchedulerController::new( decision_maker.clone(), receive_and_buffer, @@ -686,7 +688,6 @@ impl BankingStage { unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, banking_stage_stats: &BankingStageStats, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, - tracer_packet_stats: &mut TracerPacketStats, ) { if unprocessed_transaction_storage.should_not_process() { return; @@ -722,7 +723,6 @@ impl BankingStage { false, slot_metrics_tracker, banking_stage_stats, - tracer_packet_stats, )); slot_metrics_tracker.increment_forward_us(forward_us); // Take metrics action after forwarding packets to include forwarded @@ -735,7 +735,6 @@ impl BankingStage { true, slot_metrics_tracker, banking_stage_stats, - tracer_packet_stats, )); slot_metrics_tracker.increment_forward_and_hold_us(forward_and_hold_us); // Take metrics action after forwarding packets @@ -754,7 +753,6 @@ impl BankingStage { mut unprocessed_transaction_storage: UnprocessedTransactionStorage, ) { let mut banking_stage_stats = BankingStageStats::new(id); - let mut tracer_packet_stats = TracerPacketStats::new(id); let mut slot_metrics_tracker = LeaderSlotMetricsTracker::new(id); let mut last_metrics_update = Instant::now(); @@ -770,19 +768,15 @@ impl BankingStage { &mut unprocessed_transaction_storage, &banking_stage_stats, &mut slot_metrics_tracker, - &mut tracer_packet_stats, )); slot_metrics_tracker .increment_process_buffered_packets_us(process_buffered_packets_us); last_metrics_update = Instant::now(); } - tracer_packet_stats.report(1000); - match packet_receiver.receive_and_buffer_packets( &mut unprocessed_transaction_storage, &mut banking_stage_stats, - &mut tracer_packet_stats, &mut slot_metrics_tracker, ) { Ok(()) | Err(RecvTimeoutError::Timeout) => (), @@ -813,7 +807,7 @@ impl BankingStage { mod tests { use { super::*, - crate::banking_trace::{BankingPacketBatch, BankingTracer}, + crate::banking_trace::{BankingPacketBatch, BankingTracer, Channels}, crossbeam_channel::{unbounded, Receiver}, itertools::Itertools, solana_entry::entry::{self, Entry, EntrySlice}, @@ -874,10 +868,14 @@ mod tests { let genesis_config = create_genesis_config(2).genesis_config; let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let banking_tracer = BankingTracer::new_disabled(); - let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); - let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); - let (gossip_vote_sender, gossip_vote_receiver) = - banking_tracer.create_channel_gossip_vote(); + let Channels { + non_vote_sender, + non_vote_receiver, + tpu_vote_sender, + tpu_vote_receiver, + gossip_vote_sender, + gossip_vote_receiver, + } = banking_tracer.create_channels(false); let ledger_path = get_tmp_ledger_path_auto_delete!(); { let blockstore = Arc::new( @@ -926,10 +924,14 @@ mod tests { let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let start_hash = bank.last_blockhash(); let banking_tracer = BankingTracer::new_disabled(); - let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); - let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); - let (gossip_vote_sender, gossip_vote_receiver) = - banking_tracer.create_channel_gossip_vote(); + let Channels { + non_vote_sender, + non_vote_receiver, + tpu_vote_sender, + tpu_vote_receiver, + gossip_vote_sender, + gossip_vote_receiver, + } = banking_tracer.create_channels(false); let ledger_path = get_tmp_ledger_path_auto_delete!(); { let blockstore = Arc::new( @@ -1004,10 +1006,14 @@ mod tests { let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let start_hash = bank.last_blockhash(); let banking_tracer = BankingTracer::new_disabled(); - let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); - let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); - let (gossip_vote_sender, gossip_vote_receiver) = - banking_tracer.create_channel_gossip_vote(); + let Channels { + non_vote_sender, + non_vote_receiver, + tpu_vote_sender, + tpu_vote_receiver, + gossip_vote_sender, + gossip_vote_receiver, + } = banking_tracer.create_channels(false); let ledger_path = get_tmp_ledger_path_auto_delete!(); { let blockstore = Arc::new( @@ -1073,7 +1079,7 @@ mod tests { .collect(); let packet_batches = convert_from_old_verified(packet_batches); non_vote_sender // no_ver, anf, tx - .send(BankingPacketBatch::new((packet_batches, None))) + .send(BankingPacketBatch::new(packet_batches)) .unwrap(); drop(non_vote_sender); @@ -1138,7 +1144,14 @@ mod tests { .. } = create_slow_genesis_config(2); let banking_tracer = BankingTracer::new_disabled(); - let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); + let Channels { + non_vote_sender, + non_vote_receiver, + tpu_vote_sender, + tpu_vote_receiver, + gossip_vote_sender, + gossip_vote_receiver, + } = banking_tracer.create_channels(false); // Process a batch that includes a transaction that receives two lamports. let alice = Keypair::new(); @@ -1152,7 +1165,7 @@ mod tests { .collect(); let packet_batches = convert_from_old_verified(packet_batches); non_vote_sender - .send(BankingPacketBatch::new((packet_batches, None))) + .send(BankingPacketBatch::new(packet_batches)) .unwrap(); // Process a second batch that uses the same from account, so conflicts with above TX @@ -1165,12 +1178,9 @@ mod tests { .collect(); let packet_batches = convert_from_old_verified(packet_batches); non_vote_sender - .send(BankingPacketBatch::new((packet_batches, None))) + .send(BankingPacketBatch::new(packet_batches)) .unwrap(); - let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); - let (gossip_vote_sender, gossip_vote_receiver) = - banking_tracer.create_channel_gossip_vote(); let ledger_path = get_tmp_ledger_path_auto_delete!(); { let (replay_vote_sender, _replay_vote_receiver) = unbounded(); @@ -1361,10 +1371,14 @@ mod tests { let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let start_hash = bank.last_blockhash(); let banking_tracer = BankingTracer::new_disabled(); - let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); - let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); - let (gossip_vote_sender, gossip_vote_receiver) = - banking_tracer.create_channel_gossip_vote(); + let Channels { + non_vote_sender, + non_vote_receiver, + tpu_vote_sender, + tpu_vote_receiver, + gossip_vote_sender, + gossip_vote_receiver, + } = banking_tracer.create_channels(false); let ledger_path = get_tmp_ledger_path_auto_delete!(); { let blockstore = Arc::new( @@ -1472,7 +1486,7 @@ mod tests { Builder::new() .spawn(move || { sender - .send(BankingPacketBatch::new((packet_batches, None))) + .send(BankingPacketBatch::new(packet_batches)) .unwrap() }) .unwrap() diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index 815c51b9b6b253..cdb2ad2ea2ceed 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -357,76 +357,79 @@ impl ConsumeWorkerMetrics { ) { self.error_metrics .total - .fetch_add(*total, Ordering::Relaxed); + .fetch_add(total.0, Ordering::Relaxed); self.error_metrics .account_in_use - .fetch_add(*account_in_use, Ordering::Relaxed); + .fetch_add(account_in_use.0, Ordering::Relaxed); self.error_metrics .too_many_account_locks - .fetch_add(*too_many_account_locks, Ordering::Relaxed); + .fetch_add(too_many_account_locks.0, Ordering::Relaxed); self.error_metrics .account_loaded_twice - .fetch_add(*account_loaded_twice, Ordering::Relaxed); + .fetch_add(account_loaded_twice.0, Ordering::Relaxed); self.error_metrics .account_not_found - .fetch_add(*account_not_found, Ordering::Relaxed); + .fetch_add(account_not_found.0, Ordering::Relaxed); self.error_metrics .blockhash_not_found - .fetch_add(*blockhash_not_found, Ordering::Relaxed); + .fetch_add(blockhash_not_found.0, Ordering::Relaxed); self.error_metrics .blockhash_too_old - .fetch_add(*blockhash_too_old, Ordering::Relaxed); + .fetch_add(blockhash_too_old.0, Ordering::Relaxed); self.error_metrics .call_chain_too_deep - .fetch_add(*call_chain_too_deep, Ordering::Relaxed); + .fetch_add(call_chain_too_deep.0, Ordering::Relaxed); self.error_metrics .already_processed - .fetch_add(*already_processed, Ordering::Relaxed); + .fetch_add(already_processed.0, Ordering::Relaxed); self.error_metrics .instruction_error - .fetch_add(*instruction_error, Ordering::Relaxed); + .fetch_add(instruction_error.0, Ordering::Relaxed); self.error_metrics .insufficient_funds - .fetch_add(*insufficient_funds, Ordering::Relaxed); + .fetch_add(insufficient_funds.0, Ordering::Relaxed); self.error_metrics .invalid_account_for_fee - .fetch_add(*invalid_account_for_fee, Ordering::Relaxed); + .fetch_add(invalid_account_for_fee.0, Ordering::Relaxed); self.error_metrics .invalid_account_index - .fetch_add(*invalid_account_index, Ordering::Relaxed); + .fetch_add(invalid_account_index.0, Ordering::Relaxed); self.error_metrics .invalid_program_for_execution - .fetch_add(*invalid_program_for_execution, Ordering::Relaxed); + .fetch_add(invalid_program_for_execution.0, Ordering::Relaxed); self.error_metrics .invalid_compute_budget - .fetch_add(*invalid_compute_budget, Ordering::Relaxed); + .fetch_add(invalid_compute_budget.0, Ordering::Relaxed); self.error_metrics .not_allowed_during_cluster_maintenance - .fetch_add(*not_allowed_during_cluster_maintenance, Ordering::Relaxed); + .fetch_add(not_allowed_during_cluster_maintenance.0, Ordering::Relaxed); self.error_metrics .invalid_writable_account - .fetch_add(*invalid_writable_account, Ordering::Relaxed); + .fetch_add(invalid_writable_account.0, Ordering::Relaxed); self.error_metrics .invalid_rent_paying_account - .fetch_add(*invalid_rent_paying_account, Ordering::Relaxed); + .fetch_add(invalid_rent_paying_account.0, Ordering::Relaxed); self.error_metrics .would_exceed_max_block_cost_limit - .fetch_add(*would_exceed_max_block_cost_limit, Ordering::Relaxed); + .fetch_add(would_exceed_max_block_cost_limit.0, Ordering::Relaxed); self.error_metrics .would_exceed_max_account_cost_limit - .fetch_add(*would_exceed_max_account_cost_limit, Ordering::Relaxed); + .fetch_add(would_exceed_max_account_cost_limit.0, Ordering::Relaxed); self.error_metrics .would_exceed_max_vote_cost_limit - .fetch_add(*would_exceed_max_vote_cost_limit, Ordering::Relaxed); + .fetch_add(would_exceed_max_vote_cost_limit.0, Ordering::Relaxed); self.error_metrics .would_exceed_account_data_block_limit - .fetch_add(*would_exceed_account_data_block_limit, Ordering::Relaxed); + .fetch_add(would_exceed_account_data_block_limit.0, Ordering::Relaxed); self.error_metrics .max_loaded_accounts_data_size_exceeded - .fetch_add(*max_loaded_accounts_data_size_exceeded, Ordering::Relaxed); + .fetch_add(max_loaded_accounts_data_size_exceeded.0, Ordering::Relaxed); self.error_metrics .program_execution_temporarily_restricted - .fetch_add(*program_execution_temporarily_restricted, Ordering::Relaxed); + .fetch_add( + program_execution_temporarily_restricted.0, + Ordering::Relaxed, + ); } } diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 2c8d46d46d7cdb..7f8d7f1d4baee9 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -12,7 +12,6 @@ use { BankingStageStats, }, itertools::Itertools, - solana_compute_budget_instruction::instructions_processor::process_compute_budget_instructions, solana_feature_set as feature_set, solana_ledger::token_balances::collect_token_balances, solana_measure::{measure::Measure, measure_us}, @@ -39,7 +38,6 @@ use { transaction_processing_result::TransactionProcessingResultExtensions, transaction_processor::{ExecutionRecordingConfig, TransactionProcessingConfig}, }, - solana_svm_transaction::svm_message::SVMMessage, solana_timings::ExecuteTimings, std::{ num::Saturating, @@ -753,16 +751,17 @@ impl Consumer { pub fn check_fee_payer_unlocked( bank: &Bank, - message: &impl SVMMessage, + transaction: &impl TransactionWithMeta, error_counters: &mut TransactionErrorMetrics, ) -> Result<(), TransactionError> { - let fee_payer = message.fee_payer(); - let fee_budget_limits = FeeBudgetLimits::from(process_compute_budget_instructions( - message.program_instructions_iter(), - &bank.feature_set, - )?); + let fee_payer = transaction.fee_payer(); + let fee_budget_limits = FeeBudgetLimits::from( + transaction + .compute_budget_instruction_details() + .sanitize_and_convert_to_compute_budget_limits(&bank.feature_set)?, + ); let fee = solana_fee::calculate_fee( - message, + transaction, bank.get_lamports_per_signature() == 0, bank.fee_structure().lamports_per_signature, fee_budget_limits.prioritization_fee, diff --git a/core/src/banking_stage/forwarder.rs b/core/src/banking_stage/forwarder.rs index de4a5d913b6f25..0a3cfd35b2a9e5 100644 --- a/core/src/banking_stage/forwarder.rs +++ b/core/src/banking_stage/forwarder.rs @@ -10,7 +10,6 @@ use { immutable_deserialized_packet::ImmutableDeserializedPacket, LikeClusterInfo, }, next_leader::{next_leader, next_leader_tpu_vote}, - tracer_packet_stats::TracerPacketStats, }, solana_client::connection_cache::ConnectionCache, solana_connection_cache::client_connection::ClientConnection as TpuConnection, @@ -96,7 +95,6 @@ impl Forwarder { hold: bool, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, banking_stage_stats: &BankingStageStats, - tracer_packet_stats: &mut TracerPacketStats, ) { let forward_option = unprocessed_transaction_storage.forward_option(); @@ -139,19 +137,13 @@ impl Forwarder { slot_metrics_tracker.increment_forwardable_batches_count(1); let batched_forwardable_packets_count = forward_batch.len(); - let (_forward_result, successful_forwarded_packets_count, leader_pubkey) = self + let (_forward_result, successful_forwarded_packets_count, _leader_pubkey) = self .forward_buffered_packets( &forward_option, forward_batch.get_forwardable_packets(), banking_stage_stats, ); - if let Some(leader_pubkey) = leader_pubkey { - tracer_packet_stats.increment_total_forwardable_tracer_packets( - filter_forwarding_result.total_forwardable_tracer_packets, - leader_pubkey, - ); - } let failed_forwarded_packets_count = batched_forwardable_packets_count .saturating_sub(successful_forwarded_packets_count); @@ -174,9 +166,6 @@ impl Forwarder { slot_metrics_tracker.increment_cleared_from_buffer_after_forward_count( filter_forwarding_result.total_forwardable_packets as u64, ); - tracer_packet_stats.increment_total_cleared_from_buffer_after_forward( - filter_forwarding_result.total_tracer_packets_in_buffer, - ); unprocessed_transaction_storage.clear_forwarded_packets(); } } @@ -485,7 +474,6 @@ mod tests { true, &mut LeaderSlotMetricsTracker::new(0), &stats, - &mut TracerPacketStats::new(0), ); let recv_socket = &local_node.sockets.tpu_forwards_quic[0]; @@ -584,7 +572,6 @@ mod tests { hold, &mut LeaderSlotMetricsTracker::new(0), &stats, - &mut TracerPacketStats::new(0), ); let recv_socket = &local_node.sockets.tpu_forwards_quic[0]; diff --git a/core/src/banking_stage/leader_slot_metrics.rs b/core/src/banking_stage/leader_slot_metrics.rs index ce232a370b2a5f..b721d4be3695b7 100644 --- a/core/src/banking_stage/leader_slot_metrics.rs +++ b/core/src/banking_stage/leader_slot_metrics.rs @@ -403,88 +403,88 @@ fn report_transaction_error_metrics(errors: &TransactionErrorMetrics, id: &str, "banking_stage-leader_slot_transaction_errors", "id" => id, ("slot", slot as i64, i64), - ("total", errors.total as i64, i64), - ("account_in_use", errors.account_in_use as i64, i64), + ("total", errors.total.0 as i64, i64), + ("account_in_use", errors.account_in_use.0 as i64, i64), ( "too_many_account_locks", - errors.too_many_account_locks as i64, + errors.too_many_account_locks.0 as i64, i64 ), ( "account_loaded_twice", - errors.account_loaded_twice as i64, + errors.account_loaded_twice.0 as i64, i64 ), - ("account_not_found", errors.account_not_found as i64, i64), - ("blockhash_not_found", errors.blockhash_not_found as i64, i64), - ("blockhash_too_old", errors.blockhash_too_old as i64, i64), - ("call_chain_too_deep", errors.call_chain_too_deep as i64, i64), - ("already_processed", errors.already_processed as i64, i64), - ("instruction_error", errors.instruction_error as i64, i64), - ("insufficient_funds", errors.insufficient_funds as i64, i64), + ("account_not_found", errors.account_not_found.0 as i64, i64), + ("blockhash_not_found", errors.blockhash_not_found.0 as i64, i64), + ("blockhash_too_old", errors.blockhash_too_old.0 as i64, i64), + ("call_chain_too_deep", errors.call_chain_too_deep.0 as i64, i64), + ("already_processed", errors.already_processed.0 as i64, i64), + ("instruction_error", errors.instruction_error.0 as i64, i64), + ("insufficient_funds", errors.insufficient_funds.0 as i64, i64), ( "invalid_account_for_fee", - errors.invalid_account_for_fee as i64, + errors.invalid_account_for_fee.0 as i64, i64 ), ( "invalid_account_index", - errors.invalid_account_index as i64, + errors.invalid_account_index.0 as i64, i64 ), ( "invalid_program_for_execution", - errors.invalid_program_for_execution as i64, + errors.invalid_program_for_execution.0 as i64, i64 ), ( "invalid_compute_budget", - errors.invalid_compute_budget as i64, + errors.invalid_compute_budget.0 as i64, i64 ), ( "not_allowed_during_cluster_maintenance", - errors.not_allowed_during_cluster_maintenance as i64, + errors.not_allowed_during_cluster_maintenance.0 as i64, i64 ), ( "invalid_writable_account", - errors.invalid_writable_account as i64, + errors.invalid_writable_account.0 as i64, i64 ), ( "invalid_rent_paying_account", - errors.invalid_rent_paying_account as i64, + errors.invalid_rent_paying_account.0 as i64, i64 ), ( "would_exceed_max_block_cost_limit", - errors.would_exceed_max_block_cost_limit as i64, + errors.would_exceed_max_block_cost_limit.0 as i64, i64 ), ( "would_exceed_max_account_cost_limit", - errors.would_exceed_max_account_cost_limit as i64, + errors.would_exceed_max_account_cost_limit.0 as i64, i64 ), ( "would_exceed_max_vote_cost_limit", - errors.would_exceed_max_vote_cost_limit as i64, + errors.would_exceed_max_vote_cost_limit.0 as i64, i64 ), ( "would_exceed_account_data_block_limit", - errors.would_exceed_account_data_block_limit as i64, + errors.would_exceed_account_data_block_limit.0 as i64, i64 ), ( "max_loaded_accounts_data_size_exceeded", - errors.max_loaded_accounts_data_size_exceeded as i64, + errors.max_loaded_accounts_data_size_exceeded.0 as i64, i64 ), ( "program_execution_temporarily_restricted", - errors.program_execution_temporarily_restricted as i64, + errors.program_execution_temporarily_restricted.0 as i64, i64 ), ); @@ -745,14 +745,14 @@ impl LeaderSlotMetricsTracker { leader_slot_metrics .packet_count_metrics .account_lock_throttled_transactions_count, - error_counters.account_in_use as u64 + error_counters.account_in_use.0 as u64 ); saturating_add_assign!( leader_slot_metrics .packet_count_metrics .account_locks_limit_throttled_transactions_count, - error_counters.too_many_account_locks as u64 + error_counters.too_many_account_locks.0 as u64 ); saturating_add_assign!( @@ -762,13 +762,10 @@ impl LeaderSlotMetricsTracker { *cost_model_throttled_transactions_count ); - saturating_add_assign!( - leader_slot_metrics - .timing_metrics - .process_packets_timings - .cost_model_us, - *cost_model_us - ); + leader_slot_metrics + .timing_metrics + .process_packets_timings + .cost_model_us += cost_model_us; leader_slot_metrics .packet_count_metrics @@ -995,98 +992,74 @@ impl LeaderSlotMetricsTracker { // Processing buffer timing metrics pub(crate) fn increment_make_decision_us(&mut self, us: u64) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .timing_metrics - .process_buffered_packets_timings - .make_decision_us, - us - ); + leader_slot_metrics + .timing_metrics + .process_buffered_packets_timings + .make_decision_us += us; } } pub(crate) fn increment_consume_buffered_packets_us(&mut self, us: u64) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .timing_metrics - .process_buffered_packets_timings - .consume_buffered_packets_us, - us - ); + leader_slot_metrics + .timing_metrics + .process_buffered_packets_timings + .consume_buffered_packets_us += us; } } pub(crate) fn increment_forward_us(&mut self, us: u64) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .timing_metrics - .process_buffered_packets_timings - .forward_us, - us - ); + leader_slot_metrics + .timing_metrics + .process_buffered_packets_timings + .forward_us += us; } } pub(crate) fn increment_forward_and_hold_us(&mut self, us: u64) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .timing_metrics - .process_buffered_packets_timings - .forward_and_hold_us, - us - ); + leader_slot_metrics + .timing_metrics + .process_buffered_packets_timings + .forward_and_hold_us += us; } } pub(crate) fn increment_process_packets_transactions_us(&mut self, us: u64) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .timing_metrics - .consume_buffered_packets_timings - .process_packets_transactions_us, - us - ); + leader_slot_metrics + .timing_metrics + .consume_buffered_packets_timings + .process_packets_transactions_us += us } } // Processing packets timing metrics pub(crate) fn increment_transactions_from_packets_us(&mut self, us: u64) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .timing_metrics - .process_packets_timings - .transactions_from_packets_us, - us - ); + leader_slot_metrics + .timing_metrics + .process_packets_timings + .transactions_from_packets_us += us; } } pub(crate) fn increment_process_transactions_us(&mut self, us: u64) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .timing_metrics - .process_packets_timings - .process_transactions_us, - us - ); + leader_slot_metrics + .timing_metrics + .process_packets_timings + .process_transactions_us += us; } } pub(crate) fn increment_filter_retryable_packets_us(&mut self, us: u64) { if let Some(leader_slot_metrics) = &mut self.leader_slot_metrics { - saturating_add_assign!( - leader_slot_metrics - .timing_metrics - .process_packets_timings - .filter_retryable_packets_us, - us - ); + leader_slot_metrics + .timing_metrics + .process_packets_timings + .filter_retryable_packets_us += us; } } diff --git a/core/src/banking_stage/leader_slot_timing_metrics.rs b/core/src/banking_stage/leader_slot_timing_metrics.rs index 31b3dc0a24e7ca..5a3d93a0f65b09 100644 --- a/core/src/banking_stage/leader_slot_timing_metrics.rs +++ b/core/src/banking_stage/leader_slot_timing_metrics.rs @@ -1,8 +1,6 @@ use { - solana_poh::poh_recorder::RecordTransactionsTimings, - solana_sdk::{clock::Slot, saturating_add_assign}, - solana_timings::ExecuteTimings, - std::time::Instant, + solana_poh::poh_recorder::RecordTransactionsTimings, solana_sdk::clock::Slot, + solana_timings::ExecuteTimings, std::time::Instant, }; #[derive(Default, Debug)] @@ -19,12 +17,12 @@ pub struct LeaderExecuteAndCommitTimings { impl LeaderExecuteAndCommitTimings { pub fn accumulate(&mut self, other: &LeaderExecuteAndCommitTimings) { - saturating_add_assign!(self.collect_balances_us, other.collect_balances_us); - saturating_add_assign!(self.load_execute_us, other.load_execute_us); - saturating_add_assign!(self.freeze_lock_us, other.freeze_lock_us); - saturating_add_assign!(self.record_us, other.record_us); - saturating_add_assign!(self.commit_us, other.commit_us); - saturating_add_assign!(self.find_and_send_votes_us, other.find_and_send_votes_us); + self.collect_balances_us += other.collect_balances_us; + self.load_execute_us += other.load_execute_us; + self.freeze_lock_us += other.freeze_lock_us; + self.record_us += other.record_us; + self.commit_us += other.commit_us; + self.find_and_send_votes_us += other.find_and_send_votes_us; self.record_transactions_timings .accumulate(&other.record_transactions_timings); self.execute_timings.accumulate(&other.execute_timings); diff --git a/core/src/banking_stage/packet_deserializer.rs b/core/src/banking_stage/packet_deserializer.rs index 78fab3718252f4..33f41cf377cc48 100644 --- a/core/src/banking_stage/packet_deserializer.rs +++ b/core/src/banking_stage/packet_deserializer.rs @@ -5,10 +5,7 @@ use { immutable_deserialized_packet::{DeserializedPacketError, ImmutableDeserializedPacket}, packet_filter::PacketFilterFailure, }, - crate::{ - banking_trace::{BankingPacketBatch, BankingPacketReceiver}, - sigverify::SigverifyTracerPacketStats, - }, + crate::banking_trace::{BankingPacketBatch, BankingPacketReceiver}, crossbeam_channel::RecvTimeoutError, solana_perf::packet::PacketBatch, solana_sdk::saturating_add_assign, @@ -19,8 +16,6 @@ use { pub struct ReceivePacketResults { /// Deserialized packets from all received packet batches pub deserialized_packets: Vec, - /// Aggregate tracer stats for all received packet batches - pub new_tracer_stats_option: Option, /// Counts of packets received and errors recorded during deserialization /// and filtering pub packet_stats: PacketReceiverStats, @@ -112,10 +107,9 @@ impl PacketDeserializer { ) -> ReceivePacketResults { let mut packet_stats = PacketReceiverStats::default(); let mut deserialized_packets = Vec::with_capacity(packet_count); - let mut aggregated_tracer_packet_stats_option = None::; for banking_batch in banking_batches { - for packet_batch in &banking_batch.0 { + for packet_batch in banking_batch.iter() { let packet_indexes = Self::generate_packet_indexes(packet_batch); saturating_add_assign!( @@ -134,23 +128,10 @@ impl PacketDeserializer { &packet_filter, )); } - - if let Some(tracer_packet_stats) = &banking_batch.1 { - if let Some(aggregated_tracer_packet_stats) = - &mut aggregated_tracer_packet_stats_option - { - aggregated_tracer_packet_stats.aggregate(tracer_packet_stats); - } else { - // BankingPacketBatch is owned by Arc; so we have to clone its internal field - // (SigverifyTracerPacketStats). - aggregated_tracer_packet_stats_option = Some(tracer_packet_stats.clone()); - } - } } ReceivePacketResults { deserialized_packets, - new_tracer_stats_option: aggregated_tracer_packet_stats_option, packet_stats, } } @@ -163,22 +144,20 @@ impl PacketDeserializer { ) -> Result<(usize, Vec), RecvTimeoutError> { let start = Instant::now(); - let message = self.packet_batch_receiver.recv_timeout(recv_timeout)?; - let packet_batches = &message.0; + let packet_batches = self.packet_batch_receiver.recv_timeout(recv_timeout)?; let mut num_packets_received = packet_batches .iter() .map(|batch| batch.len()) .sum::(); - let mut messages = vec![message]; + let mut messages = vec![packet_batches]; - while let Ok(message) = self.packet_batch_receiver.try_recv() { - let packet_batches = &message.0; + while let Ok(packet_batches) = self.packet_batch_receiver.try_recv() { trace!("got more packet batches in packet deserializer"); num_packets_received += packet_batches .iter() .map(|batch| batch.len()) .sum::(); - messages.push(message); + messages.push(packet_batches); if start.elapsed() >= recv_timeout || num_packets_received >= packet_count_upperbound { break; @@ -240,7 +219,6 @@ mod tests { fn test_deserialize_and_collect_packets_empty() { let results = PacketDeserializer::deserialize_and_collect_packets(0, &[], Ok); assert_eq!(results.deserialized_packets.len(), 0); - assert!(results.new_tracer_stats_option.is_none()); assert_eq!(results.packet_stats.passed_sigverify_count, 0); assert_eq!(results.packet_stats.failed_sigverify_count, 0); } @@ -254,11 +232,10 @@ mod tests { let packet_count: usize = packet_batches.iter().map(|x| x.len()).sum(); let results = PacketDeserializer::deserialize_and_collect_packets( packet_count, - &[BankingPacketBatch::new((packet_batches, None))], + &[BankingPacketBatch::new(packet_batches)], Ok, ); assert_eq!(results.deserialized_packets.len(), 2); - assert!(results.new_tracer_stats_option.is_none()); assert_eq!(results.packet_stats.passed_sigverify_count, 2); assert_eq!(results.packet_stats.failed_sigverify_count, 0); } @@ -273,11 +250,10 @@ mod tests { let packet_count: usize = packet_batches.iter().map(|x| x.len()).sum(); let results = PacketDeserializer::deserialize_and_collect_packets( packet_count, - &[BankingPacketBatch::new((packet_batches, None))], + &[BankingPacketBatch::new(packet_batches)], Ok, ); assert_eq!(results.deserialized_packets.len(), 1); - assert!(results.new_tracer_stats_option.is_none()); assert_eq!(results.packet_stats.passed_sigverify_count, 1); assert_eq!(results.packet_stats.failed_sigverify_count, 1); } diff --git a/core/src/banking_stage/packet_receiver.rs b/core/src/banking_stage/packet_receiver.rs index 6b77d103c69670..e95b20c3df4f1e 100644 --- a/core/src/banking_stage/packet_receiver.rs +++ b/core/src/banking_stage/packet_receiver.rs @@ -6,7 +6,7 @@ use { unprocessed_transaction_storage::UnprocessedTransactionStorage, BankingStageStats, }, - crate::{banking_trace::BankingPacketReceiver, tracer_packet_stats::TracerPacketStats}, + crate::banking_trace::BankingPacketReceiver, crossbeam_channel::RecvTimeoutError, solana_measure::{measure::Measure, measure_us}, solana_sdk::{saturating_add_assign, timing::timestamp}, @@ -31,7 +31,6 @@ impl PacketReceiver { &mut self, unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, banking_stage_stats: &mut BankingStageStats, - tracer_packet_stats: &mut TracerPacketStats, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, ) -> Result<(), RecvTimeoutError> { let (result, recv_time_us) = measure_us!({ @@ -53,7 +52,6 @@ impl PacketReceiver { receive_packet_results, unprocessed_transaction_storage, banking_stage_stats, - tracer_packet_stats, slot_metrics_tracker, ); recv_and_buffer_measure.stop(); @@ -93,21 +91,16 @@ impl PacketReceiver { &self, ReceivePacketResults { deserialized_packets, - new_tracer_stats_option, packet_stats, }: ReceivePacketResults, unprocessed_transaction_storage: &mut UnprocessedTransactionStorage, banking_stage_stats: &mut BankingStageStats, - tracer_packet_stats: &mut TracerPacketStats, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, ) { let packet_count = deserialized_packets.len(); debug!("@{:?} txs: {} id: {}", timestamp(), packet_count, self.id); slot_metrics_tracker.increment_received_packet_counts(packet_stats); - if let Some(new_sigverify_stats) = &new_tracer_stats_option { - tracer_packet_stats.aggregate_sigverify_tracer_packet_stats(new_sigverify_stats); - } let mut dropped_packets_count = 0; let mut newly_buffered_packets_count = 0; @@ -120,7 +113,6 @@ impl PacketReceiver { &mut newly_buffered_forwarded_packets_count, banking_stage_stats, slot_metrics_tracker, - tracer_packet_stats, ); banking_stage_stats @@ -145,7 +137,6 @@ impl PacketReceiver { newly_buffered_forwarded_packets_count: &mut usize, banking_stage_stats: &mut BankingStageStats, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, - tracer_packet_stats: &mut TracerPacketStats, ) { if !deserialized_packets.is_empty() { let _ = banking_stage_stats @@ -168,9 +159,6 @@ impl PacketReceiver { *dropped_packets_count, insert_packet_batches_summary.total_dropped_packets() ); - tracer_packet_stats.increment_total_exceeded_banking_stage_buffer( - insert_packet_batches_summary.dropped_tracer_packets(), - ); } } } diff --git a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs index 950f506fd51af4..8edebc1f80c200 100644 --- a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs +++ b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs @@ -41,19 +41,38 @@ type SchedulerPrioGraph = PrioGraph< fn(&TransactionPriorityId, &GraphNode) -> TransactionPriorityId, >; +pub(crate) struct PrioGraphSchedulerConfig { + pub max_scheduled_cus: u64, + pub max_transactions_per_scheduling_pass: usize, + pub look_ahead_window_size: usize, + pub target_transactions_per_batch: usize, +} + +impl Default for PrioGraphSchedulerConfig { + fn default() -> Self { + Self { + max_scheduled_cus: MAX_BLOCK_UNITS, + max_transactions_per_scheduling_pass: 100_000, + look_ahead_window_size: 2048, + target_transactions_per_batch: TARGET_NUM_TRANSACTIONS_PER_BATCH, + } + } +} + pub(crate) struct PrioGraphScheduler { in_flight_tracker: InFlightTracker, account_locks: ThreadAwareAccountLocks, consume_work_senders: Vec>>, finished_consume_work_receiver: Receiver>, - look_ahead_window_size: usize, prio_graph: SchedulerPrioGraph, + config: PrioGraphSchedulerConfig, } impl PrioGraphScheduler { pub(crate) fn new( consume_work_senders: Vec>>, finished_consume_work_receiver: Receiver>, + config: PrioGraphSchedulerConfig, ) -> Self { let num_threads = consume_work_senders.len(); Self { @@ -61,8 +80,8 @@ impl PrioGraphScheduler { account_locks: ThreadAwareAccountLocks::new(num_threads), consume_work_senders, finished_consume_work_receiver, - look_ahead_window_size: 2048, prio_graph: PrioGraph::new(passthrough_priority), + config, } } @@ -89,7 +108,7 @@ impl PrioGraphScheduler { pre_lock_filter: impl Fn(&Tx) -> bool, ) -> Result { let num_threads = self.consume_work_senders.len(); - let max_cu_per_thread = MAX_BLOCK_UNITS / num_threads as u64; + let max_cu_per_thread = self.config.max_scheduled_cus / num_threads as u64; let mut schedulable_threads = ThreadSet::any(num_threads); for thread_id in 0..num_threads { @@ -106,7 +125,7 @@ impl PrioGraphScheduler { }); } - let mut batches = Batches::new(num_threads); + let mut batches = Batches::new(num_threads, self.config.target_transactions_per_batch); // Some transactions may be unschedulable due to multi-thread conflicts. // These transactions cannot be scheduled until some conflicting work is completed. // However, the scheduler should not allow other transactions that conflict with @@ -118,7 +137,7 @@ impl PrioGraphScheduler { let mut num_filtered_out: usize = 0; let mut total_filter_time_us: u64 = 0; - let mut window_budget = self.look_ahead_window_size; + let mut window_budget = self.config.look_ahead_window_size; let mut chunked_pops = |container: &mut S, prio_graph: &mut PrioGraph<_, _, _, _>, window_budget: &mut usize| { @@ -170,13 +189,13 @@ impl PrioGraphScheduler { // Check transactions against filter, remove from container if it fails. chunked_pops(container, &mut self.prio_graph, &mut window_budget); - let mut unblock_this_batch = - Vec::with_capacity(self.consume_work_senders.len() * TARGET_NUM_TRANSACTIONS_PER_BATCH); - const MAX_TRANSACTIONS_PER_SCHEDULING_PASS: usize = 100_000; + let mut unblock_this_batch = Vec::with_capacity( + self.consume_work_senders.len() * self.config.target_transactions_per_batch, + ); let mut num_scheduled: usize = 0; let mut num_sent: usize = 0; let mut num_unschedulable: usize = 0; - while num_scheduled < MAX_TRANSACTIONS_PER_SCHEDULING_PASS { + while num_scheduled < self.config.max_transactions_per_scheduling_pass { // If nothing is in the main-queue of the `PrioGraph` then there's nothing left to schedule. if self.prio_graph.is_empty() { break; @@ -229,7 +248,8 @@ impl PrioGraphScheduler { saturating_add_assign!(batches.total_cus[thread_id], cost); // If target batch size is reached, send only this batch. - if batches.ids[thread_id].len() >= TARGET_NUM_TRANSACTIONS_PER_BATCH { + if batches.ids[thread_id].len() >= self.config.target_transactions_per_batch + { saturating_add_assign!( num_sent, self.send_batch(&mut batches, thread_id)? @@ -248,7 +268,7 @@ impl PrioGraphScheduler { } } - if num_scheduled >= MAX_TRANSACTIONS_PER_SCHEDULING_PASS { + if num_scheduled >= self.config.max_transactions_per_scheduling_pass { break; } } @@ -408,7 +428,8 @@ impl PrioGraphScheduler { return Ok(0); } - let (ids, transactions, max_ages, total_cus) = batches.take_batch(thread_index); + let (ids, transactions, max_ages, total_cus) = + batches.take_batch(thread_index, self.config.target_transactions_per_batch); let batch_id = self .in_flight_tracker @@ -498,14 +519,14 @@ struct Batches { } impl Batches { - fn new(num_threads: usize) -> Self { + fn new(num_threads: usize, target_num_transactions_per_batch: usize) -> Self { Self { - ids: vec![Vec::with_capacity(TARGET_NUM_TRANSACTIONS_PER_BATCH); num_threads], + ids: vec![Vec::with_capacity(target_num_transactions_per_batch); num_threads], transactions: (0..num_threads) - .map(|_| Vec::with_capacity(TARGET_NUM_TRANSACTIONS_PER_BATCH)) + .map(|_| Vec::with_capacity(target_num_transactions_per_batch)) .collect(), - max_ages: vec![Vec::with_capacity(TARGET_NUM_TRANSACTIONS_PER_BATCH); num_threads], + max_ages: vec![Vec::with_capacity(target_num_transactions_per_batch); num_threads], total_cus: vec![0; num_threads], } } @@ -513,19 +534,20 @@ impl Batches { fn take_batch( &mut self, thread_id: ThreadId, + target_num_transactions_per_batch: usize, ) -> (Vec, Vec, Vec, u64) { ( core::mem::replace( &mut self.ids[thread_id], - Vec::with_capacity(TARGET_NUM_TRANSACTIONS_PER_BATCH), + Vec::with_capacity(target_num_transactions_per_batch), ), core::mem::replace( &mut self.transactions[thread_id], - Vec::with_capacity(TARGET_NUM_TRANSACTIONS_PER_BATCH), + Vec::with_capacity(target_num_transactions_per_batch), ), core::mem::replace( &mut self.max_ages[thread_id], - Vec::with_capacity(TARGET_NUM_TRANSACTIONS_PER_BATCH), + Vec::with_capacity(target_num_transactions_per_batch), ), core::mem::replace(&mut self.total_cus[thread_id], 0), ) @@ -605,7 +627,6 @@ mod tests { use { super::*, crate::banking_stage::{ - consumer::TARGET_NUM_TRANSACTIONS_PER_BATCH, immutable_deserialized_packet::ImmutableDeserializedPacket, transaction_scheduler::transaction_state_container::TransactionStateContainer, }, @@ -637,8 +658,11 @@ mod tests { let (consume_work_senders, consume_work_receivers) = (0..num_threads).map(|_| unbounded()).unzip(); let (finished_consume_work_sender, finished_consume_work_receiver) = unbounded(); - let scheduler = - PrioGraphScheduler::new(consume_work_senders, finished_consume_work_receiver); + let scheduler = PrioGraphScheduler::new( + consume_work_senders, + finished_consume_work_receiver, + PrioGraphSchedulerConfig::default(), + ); ( scheduler, consume_work_receivers, @@ -821,7 +845,7 @@ mod tests { fn test_schedule_priority_guard() { let (mut scheduler, work_receivers, finished_work_sender) = create_test_frame(2); // intentionally shorten the look-ahead window to cause unschedulable conflicts - scheduler.look_ahead_window_size = 2; + scheduler.config.look_ahead_window_size = 2; let accounts = (0..8).map(|_| Keypair::new()).collect_vec(); let mut container = create_container([ diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index 14a175b2018260..0a7bcf34fc0a01 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -441,10 +441,12 @@ mod tests { packet_deserializer::PacketDeserializer, scheduler_messages::{ConsumeWork, FinishedConsumeWork, TransactionBatchId}, tests::create_slow_genesis_config, - transaction_scheduler::receive_and_buffer::SanitizedTransactionReceiveAndBuffer, + transaction_scheduler::{ + prio_graph_scheduler::PrioGraphSchedulerConfig, + receive_and_buffer::SanitizedTransactionReceiveAndBuffer, + }, }, banking_trace::BankingPacketBatch, - sigverify::SigverifyTracerPacketStats, }, crossbeam_channel::{unbounded, Receiver, Sender}, itertools::Itertools, @@ -486,7 +488,7 @@ mod tests { _entry_receiver: Receiver, _record_receiver: Receiver, poh_recorder: Arc>, - banking_packet_sender: Sender, Option)>>, + banking_packet_sender: Sender>>, consume_work_receivers: Vec>>>, @@ -550,11 +552,16 @@ mod tests { false, ); + let scheduler = PrioGraphScheduler::new( + consume_work_senders, + finished_consume_work_receiver, + PrioGraphSchedulerConfig::default(), + ); let scheduler_controller = SchedulerController::new( decision_maker, receive_and_buffer, bank_forks, - PrioGraphScheduler::new(consume_work_senders, finished_consume_work_receiver), + scheduler, vec![], // no actual workers with metrics to report, this can be empty None, ); @@ -589,8 +596,7 @@ mod tests { } fn to_banking_packet_batch(txs: &[Transaction]) -> BankingPacketBatch { - let packet_batch = to_packet_batches(txs, NUM_PACKETS); - Arc::new((packet_batch, None)) + BankingPacketBatch::new(to_packet_batches(txs, NUM_PACKETS)) } // Helper function to let test receive and then schedule packets. diff --git a/core/src/banking_stage/unprocessed_packet_batches.rs b/core/src/banking_stage/unprocessed_packet_batches.rs index 3c4e0f66664dd2..493025e9cd635e 100644 --- a/core/src/banking_stage/unprocessed_packet_batches.rs +++ b/core/src/banking_stage/unprocessed_packet_batches.rs @@ -57,7 +57,6 @@ impl Ord for DeserializedPacket { #[derive(Debug)] pub struct PacketBatchInsertionMetrics { pub(crate) num_dropped_packets: usize, - pub(crate) num_dropped_tracer_packets: usize, } /// Currently each banking_stage thread has a `UnprocessedPacketBatches` buffer to store @@ -103,23 +102,13 @@ impl UnprocessedPacketBatches { deserialized_packets: impl Iterator, ) -> PacketBatchInsertionMetrics { let mut num_dropped_packets = 0; - let mut num_dropped_tracer_packets = 0; for deserialized_packet in deserialized_packets { - if let Some(dropped_packet) = self.push(deserialized_packet) { + if let Some(_dropped_packet) = self.push(deserialized_packet) { num_dropped_packets += 1; - if dropped_packet - .immutable_section() - .original_packet() - .meta() - .is_tracer_packet() - { - num_dropped_tracer_packets += 1; - } } } PacketBatchInsertionMetrics { num_dropped_packets, - num_dropped_tracer_packets, } } diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index e86780002ea694..024cca9b026a5e 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -93,13 +93,6 @@ impl InsertPacketBatchSummary { _ => 0, } } - - pub fn dropped_tracer_packets(&self) -> usize { - match self { - Self::PacketBatchInsertionMetrics(metrics) => metrics.num_dropped_tracer_packets, - _ => 0, - } - } } impl From for InsertPacketBatchSummary { @@ -193,8 +186,12 @@ fn consume_scan_should_process_packet( // because the priority guard requires that we always take locks // except in the cases of discarding transactions (i.e. `Never`). if payload.account_locks.check_locks(message) - && Consumer::check_fee_payer_unlocked(bank, message, &mut payload.error_counters) - .is_err() + && Consumer::check_fee_payer_unlocked( + bank, + &sanitized_transaction, + &mut payload.error_counters, + ) + .is_err() { payload .message_hash_to_transaction @@ -638,8 +635,6 @@ impl ThreadLocalUnprocessedPackets { bank: Arc, forward_buffer: &mut ForwardPacketBatchesByAccounts, ) -> FilterForwardingResults { - let mut total_forwardable_tracer_packets: usize = 0; - let mut total_tracer_packets_in_buffer: usize = 0; let mut total_forwardable_packets: usize = 0; let mut total_packet_conversion_us: u64 = 0; let mut total_filter_packets_us: u64 = 0; @@ -660,11 +655,8 @@ impl ThreadLocalUnprocessedPackets { .into_iter() .flat_map(|packets_to_process| { // Only process packets not yet forwarded - let (forwarded_packets, packets_to_forward, is_tracer_packet) = self - .prepare_packets_to_forward( - packets_to_process, - &mut total_tracer_packets_in_buffer, - ); + let (forwarded_packets, packets_to_forward) = + self.prepare_packets_to_forward(packets_to_process); [ forwarded_packets, @@ -689,15 +681,10 @@ impl ThreadLocalUnprocessedPackets { &mut total_dropped_packets )); saturating_add_assign!(total_filter_packets_us, filter_packets_us); - - for forwardable_transaction_index in &forwardable_transaction_indexes { - saturating_add_assign!(total_forwardable_packets, 1); - let forwardable_packet_index = - transaction_to_packet_indexes[*forwardable_transaction_index]; - if is_tracer_packet[forwardable_packet_index] { - saturating_add_assign!(total_forwardable_tracer_packets, 1); - } - } + saturating_add_assign!( + total_forwardable_packets, + forwardable_transaction_indexes.len() + ); let accepted_packet_indexes = Self::add_filtered_packets_to_forward_buffer( @@ -750,8 +737,6 @@ impl ThreadLocalUnprocessedPackets { FilterForwardingResults { total_forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, total_dropped_packets, total_packet_conversion_us, total_filter_packets_us, @@ -982,36 +967,27 @@ impl ThreadLocalUnprocessedPackets { fn prepare_packets_to_forward( &self, packets_to_forward: impl Iterator>, - total_tracer_packets_in_buffer: &mut usize, ) -> ( Vec>, Vec>, - Vec, ) { let mut forwarded_packets: Vec> = vec![]; - let (forwardable_packets, is_tracer_packet) = packets_to_forward + let forwardable_packets = packets_to_forward .into_iter() .filter_map(|immutable_deserialized_packet| { - let is_tracer_packet = immutable_deserialized_packet - .original_packet() - .meta() - .is_tracer_packet(); - if is_tracer_packet { - saturating_add_assign!(*total_tracer_packets_in_buffer, 1); - } if !self .unprocessed_packet_batches .is_forwarded(&immutable_deserialized_packet) { - Some((immutable_deserialized_packet, is_tracer_packet)) + Some(immutable_deserialized_packet) } else { forwarded_packets.push(immutable_deserialized_packet); None } }) - .unzip(); + .collect(); - (forwarded_packets, forwardable_packets, is_tracer_packet) + (forwarded_packets, forwardable_packets) } } @@ -1116,7 +1092,6 @@ mod tests { .map(|(packets_id, transaction)| { let mut p = Packet::from_data(None, transaction).unwrap(); p.meta_mut().port = packets_id as u16; - p.meta_mut().set_tracer(true); DeserializedPacket::new(p).unwrap() }) .collect_vec(); @@ -1134,16 +1109,12 @@ mod tests { let FilterForwardingResults { total_forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, .. } = transaction_storage.filter_forwardable_packets_and_add_batches( current_bank.clone(), &mut forward_packet_batches_by_accounts, ); assert_eq!(total_forwardable_packets, 256); - assert_eq!(total_tracer_packets_in_buffer, 256); - assert_eq!(total_forwardable_tracer_packets, 256); // packets in a batch are forwarded in arbitrary order; verify the ports match after // sorting @@ -1172,8 +1143,6 @@ mod tests { ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); let FilterForwardingResults { total_forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, .. } = transaction_storage.filter_forwardable_packets_and_add_batches( current_bank.clone(), @@ -1183,11 +1152,6 @@ mod tests { total_forwardable_packets, packets.len() - num_already_forwarded ); - assert_eq!(total_tracer_packets_in_buffer, packets.len()); - assert_eq!( - total_forwardable_tracer_packets, - packets.len() - num_already_forwarded - ); } // some packets are invalid (already processed) @@ -1206,8 +1170,6 @@ mod tests { ForwardPacketBatchesByAccounts::new_with_default_batch_limits(); let FilterForwardingResults { total_forwardable_packets, - total_tracer_packets_in_buffer, - total_forwardable_tracer_packets, .. } = transaction_storage.filter_forwardable_packets_and_add_batches( current_bank, @@ -1217,11 +1179,6 @@ mod tests { total_forwardable_packets, packets.len() - num_already_processed ); - assert_eq!(total_tracer_packets_in_buffer, packets.len()); - assert_eq!( - total_forwardable_tracer_packets, - packets.len() - num_already_processed - ); } } @@ -1383,17 +1340,14 @@ mod tests { .map(|(packets_id, transaction)| { let mut p = Packet::from_data(None, transaction).unwrap(); p.meta_mut().port = packets_id as u16; - p.meta_mut().set_tracer(true); DeserializedPacket::new(p).unwrap() }) .collect_vec(); // test preparing buffered packets for forwarding let test_prepareing_buffered_packets_for_forwarding = - |buffered_packet_batches: UnprocessedPacketBatches| -> (usize, usize, usize) { - let mut total_tracer_packets_in_buffer: usize = 0; + |buffered_packet_batches: UnprocessedPacketBatches| -> usize { let mut total_packets_to_forward: usize = 0; - let mut total_tracer_packets_to_forward: usize = 0; let mut unprocessed_transactions = ThreadLocalUnprocessedPackets { unprocessed_packet_batches: buffered_packet_batches, @@ -1406,35 +1360,21 @@ mod tests { .chunks(128usize) .into_iter() .flat_map(|packets_to_process| { - let (_, packets_to_forward, is_tracer_packet) = unprocessed_transactions - .prepare_packets_to_forward( - packets_to_process, - &mut total_tracer_packets_in_buffer, - ); + let (_, packets_to_forward) = + unprocessed_transactions.prepare_packets_to_forward(packets_to_process); total_packets_to_forward += packets_to_forward.len(); - total_tracer_packets_to_forward += is_tracer_packet.len(); packets_to_forward }) .collect::>>(); - ( - total_tracer_packets_in_buffer, - total_packets_to_forward, - total_tracer_packets_to_forward, - ) + total_packets_to_forward }; - // all tracer packets are forwardable { let buffered_packet_batches: UnprocessedPacketBatches = UnprocessedPacketBatches::from_iter(packets.clone(), packets.len()); - let ( - total_tracer_packets_in_buffer, - total_packets_to_forward, - total_tracer_packets_to_forward, - ) = test_prepareing_buffered_packets_for_forwarding(buffered_packet_batches); - assert_eq!(total_tracer_packets_in_buffer, 256); + let total_packets_to_forward = + test_prepareing_buffered_packets_for_forwarding(buffered_packet_batches); assert_eq!(total_packets_to_forward, 256); - assert_eq!(total_tracer_packets_to_forward, 256); } // some packets are forwarded @@ -1445,14 +1385,9 @@ mod tests { } let buffered_packet_batches: UnprocessedPacketBatches = UnprocessedPacketBatches::from_iter(packets.clone(), packets.len()); - let ( - total_tracer_packets_in_buffer, - total_packets_to_forward, - total_tracer_packets_to_forward, - ) = test_prepareing_buffered_packets_for_forwarding(buffered_packet_batches); - assert_eq!(total_tracer_packets_in_buffer, 256); + let total_packets_to_forward = + test_prepareing_buffered_packets_for_forwarding(buffered_packet_batches); assert_eq!(total_packets_to_forward, 256 - num_already_forwarded); - assert_eq!(total_tracer_packets_to_forward, 256 - num_already_forwarded); } // all packets are forwarded @@ -1462,14 +1397,9 @@ mod tests { } let buffered_packet_batches: UnprocessedPacketBatches = UnprocessedPacketBatches::from_iter(packets.clone(), packets.len()); - let ( - total_tracer_packets_in_buffer, - total_packets_to_forward, - total_tracer_packets_to_forward, - ) = test_prepareing_buffered_packets_for_forwarding(buffered_packet_batches); - assert_eq!(total_tracer_packets_in_buffer, 256); + let total_packets_to_forward = + test_prepareing_buffered_packets_for_forwarding(buffered_packet_batches); assert_eq!(total_packets_to_forward, 0); - assert_eq!(total_tracer_packets_to_forward, 0); } } } diff --git a/core/src/banking_trace.rs b/core/src/banking_trace.rs index 6e0797c8c3842f..a997f02843ac2e 100644 --- a/core/src/banking_trace.rs +++ b/core/src/banking_trace.rs @@ -1,5 +1,4 @@ use { - crate::sigverify::SigverifyTracerPacketStats, bincode::serialize_into, chrono::{DateTime, Local}, crossbeam_channel::{unbounded, Receiver, SendError, Sender, TryRecvError}, @@ -20,7 +19,7 @@ use { thiserror::Error, }; -pub type BankingPacketBatch = Arc<(Vec, Option)>; +pub type BankingPacketBatch = Arc>; pub type BankingPacketSender = TracedSender; pub type BankingPacketReceiver = Receiver; pub type TracerThreadResult = Result<(), TraceError>; @@ -62,11 +61,6 @@ pub struct BankingTracer { active_tracer: Option, } -#[cfg_attr( - feature = "frozen-abi", - derive(AbiExample), - frozen_abi(digest = "6PCDw6YSEivfbwhbPmE4NAsXb88ZX6hkFnruP8B38nma") -)] #[derive(Serialize, Deserialize, Debug)] pub struct TimedTracedEvent(pub std::time::SystemTime, pub TracedEvent); @@ -178,6 +172,15 @@ pub fn receiving_loop_with_minimized_sender_overhead( Ok(()) } +pub struct Channels { + pub non_vote_sender: BankingPacketSender, + pub non_vote_receiver: BankingPacketReceiver, + pub tpu_vote_sender: BankingPacketSender, + pub tpu_vote_receiver: BankingPacketReceiver, + pub gossip_vote_sender: BankingPacketSender, + pub gossip_vote_receiver: BankingPacketReceiver, +} + impl BankingTracer { pub fn new( maybe_config: Option<(&PathBuf, Arc, DirByteLimit)>, @@ -220,22 +223,85 @@ impl BankingTracer { self.active_tracer.is_some() } + pub fn create_channels(&self, unify_channels: bool) -> Channels { + if unify_channels { + // Returning the same channel is needed when unified scheduler supports block + // production because unified scheduler doesn't distinguish them and treats them as + // unified as the single source of incoming transactions. This is to reduce the number + // of recv operation per loop and load balance evenly as much as possible there. + let (non_vote_sender, non_vote_receiver) = self.create_channel_non_vote(); + // Tap into some private helper fns so that banking trace labelling works as before. + let (tpu_vote_sender, tpu_vote_receiver) = + self.create_unified_channel_tpu_vote(&non_vote_sender, &non_vote_receiver); + let (gossip_vote_sender, gossip_vote_receiver) = + self.create_unified_channel_gossip_vote(&non_vote_sender, &non_vote_receiver); + + Channels { + non_vote_sender, + non_vote_receiver, + tpu_vote_sender, + tpu_vote_receiver, + gossip_vote_sender, + gossip_vote_receiver, + } + } else { + let (non_vote_sender, non_vote_receiver) = self.create_channel_non_vote(); + let (tpu_vote_sender, tpu_vote_receiver) = self.create_channel_tpu_vote(); + let (gossip_vote_sender, gossip_vote_receiver) = self.create_channel_gossip_vote(); + + Channels { + non_vote_sender, + non_vote_receiver, + tpu_vote_sender, + tpu_vote_receiver, + gossip_vote_sender, + gossip_vote_receiver, + } + } + } + fn create_channel(&self, label: ChannelLabel) -> (BankingPacketSender, BankingPacketReceiver) { Self::channel(label, self.active_tracer.as_ref().cloned()) } - pub fn create_channel_non_vote(&self) -> (BankingPacketSender, BankingPacketReceiver) { + fn create_channel_non_vote(&self) -> (BankingPacketSender, BankingPacketReceiver) { self.create_channel(ChannelLabel::NonVote) } - pub fn create_channel_tpu_vote(&self) -> (BankingPacketSender, BankingPacketReceiver) { + fn create_channel_tpu_vote(&self) -> (BankingPacketSender, BankingPacketReceiver) { self.create_channel(ChannelLabel::TpuVote) } - pub fn create_channel_gossip_vote(&self) -> (BankingPacketSender, BankingPacketReceiver) { + fn create_channel_gossip_vote(&self) -> (BankingPacketSender, BankingPacketReceiver) { self.create_channel(ChannelLabel::GossipVote) } + fn create_unified_channel_tpu_vote( + &self, + sender: &TracedSender, + receiver: &BankingPacketReceiver, + ) -> (BankingPacketSender, BankingPacketReceiver) { + Self::channel_inner( + ChannelLabel::TpuVote, + self.active_tracer.as_ref().cloned(), + sender.sender.clone(), + receiver.clone(), + ) + } + + fn create_unified_channel_gossip_vote( + &self, + sender: &TracedSender, + receiver: &BankingPacketReceiver, + ) -> (BankingPacketSender, BankingPacketReceiver) { + Self::channel_inner( + ChannelLabel::GossipVote, + self.active_tracer.as_ref().cloned(), + sender.sender.clone(), + receiver.clone(), + ) + } + pub fn hash_event(&self, slot: Slot, blockhash: &Hash, bank_hash: &Hash) { self.trace_event(|| { TimedTracedEvent( @@ -264,6 +330,15 @@ impl BankingTracer { active_tracer: Option, ) -> (TracedSender, Receiver) { let (sender, receiver) = unbounded(); + Self::channel_inner(label, active_tracer, sender, receiver) + } + + fn channel_inner( + label: ChannelLabel, + active_tracer: Option, + sender: Sender, + receiver: BankingPacketReceiver, + ) -> (TracedSender, Receiver) { (TracedSender::new(label, sender, active_tracer), receiver) } @@ -378,7 +453,7 @@ pub mod for_test { }; pub fn sample_packet_batch() -> BankingPacketBatch { - BankingPacketBatch::new((to_packet_batches(&vec![test_tx(); 4], 10), None)) + BankingPacketBatch::new(to_packet_batches(&vec![test_tx(); 4], 10)) } pub fn drop_and_clean_temp_dir_unless_suppressed(temp_dir: TempDir) { @@ -435,7 +510,7 @@ mod tests { }); non_vote_sender - .send(BankingPacketBatch::new((vec![], None))) + .send(BankingPacketBatch::new(vec![])) .unwrap(); for_test::terminate_tracer(tracer, None, dummy_main_thread, non_vote_sender, None); } diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index 56869624812940..87ac964c3978a9 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -265,7 +265,7 @@ impl ClusterInfoVoteListener { if !votes.is_empty() { let (vote_txs, packets) = Self::verify_votes(votes, root_bank_cache); verified_vote_transactions_sender.send(vote_txs)?; - verified_packets_sender.send(BankingPacketBatch::new((packets, None)))?; + verified_packets_sender.send(BankingPacketBatch::new(packets))?; } sleep(Duration::from_millis(GOSSIP_SLEEP_MILLIS)); } diff --git a/core/src/consensus/tower_storage.rs b/core/src/consensus/tower_storage.rs index a84931d9692236..6b6512fded3a25 100644 --- a/core/src/consensus/tower_storage.rs +++ b/core/src/consensus/tower_storage.rs @@ -79,11 +79,12 @@ impl From for SavedTowerVersions { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "2XiuhmDfRzWGdwZdMbpH5NcjGnTRi9zY1XTNHSknddA7") + frozen_abi(digest = "GqJW8vVvSkSZwTJE6x6MFFhi7kcU6mqst8PF7493h2hk") )] #[derive(Default, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct SavedTower { signature: Signature, + #[serde(with = "serde_bytes")] data: Vec, #[serde(skip)] node_pubkey: Pubkey, diff --git a/core/src/lib.rs b/core/src/lib.rs index a7639993871fcb..c40eebdc09c4db 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -39,7 +39,6 @@ pub mod stats_reporter_service; pub mod system_monitor_service; pub mod tpu; mod tpu_entry_notifier; -pub mod tracer_packet_stats; pub mod tvu; pub mod unfrozen_gossip_verified_vote_hashes; pub mod validator; diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 4ca7f1bae080f4..97b8cd865adbc6 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -15,7 +15,6 @@ use { replay_stage::DUPLICATE_THRESHOLD, shred_fetch_stage::receive_quic_datagrams, }, - bincode::serialize, bytes::Bytes, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, dashmap::{mapref::entry::Entry::Occupied, DashMap}, @@ -454,11 +453,9 @@ impl AncestorHashesService { return None; } stats.ping_count += 1; - if let Ok(pong) = Pong::new(&ping, keypair) { - let pong = RepairProtocol::Pong(pong); - if let Ok(pong_bytes) = serialize(&pong) { - let _ignore = ancestor_socket.send_to(&pong_bytes[..], from_addr); - } + let pong = RepairProtocol::Pong(Pong::new(&ping, keypair)); + if let Ok(pong) = bincode::serialize(&pong) { + let _ = ancestor_socket.send_to(&pong, from_addr); } None } diff --git a/core/src/repair/quic_endpoint.rs b/core/src/repair/quic_endpoint.rs index e8085c522c8c2a..0c8cc54dffe629 100644 --- a/core/src/repair/quic_endpoint.rs +++ b/core/src/repair/quic_endpoint.rs @@ -17,7 +17,7 @@ use { solana_runtime::bank_forks::BankForks, solana_sdk::{pubkey::Pubkey, signature::Keypair}, solana_tls_utils::{ - new_dummy_x509_certificate, SkipClientVerification, SkipServerVerification, + new_dummy_x509_certificate, tls_client_config_builder, tls_server_config_builder, }, std::{ cmp::Reverse, @@ -300,9 +300,7 @@ fn new_server_config( cert: CertificateDer<'static>, key: PrivateKeyDer<'static>, ) -> Result { - let mut config = rustls::ServerConfig::builder() - .with_client_cert_verifier(SkipClientVerification::new()) - .with_single_cert(vec![cert], key)?; + let mut config = tls_server_config_builder().with_single_cert(vec![cert], key)?; config.alpn_protocols = vec![ALPN_REPAIR_PROTOCOL_ID.to_vec()]; config.key_log = Arc::new(KeyLogFile::new()); let Ok(config) = QuicServerConfig::try_from(config) else { @@ -321,10 +319,7 @@ fn new_client_config( cert: CertificateDer<'static>, key: PrivateKeyDer<'static>, ) -> Result { - let mut config = rustls::ClientConfig::builder() - .dangerous() - .with_custom_certificate_verifier(SkipServerVerification::new()) - .with_client_auth_cert(vec![cert], key)?; + let mut config = tls_client_config_builder().with_client_auth_cert(vec![cert], key)?; config.enable_early_data = true; config.alpn_protocols = vec![ALPN_REPAIR_PROTOCOL_ID.to_vec()]; let mut config = ClientConfig::new(Arc::new(QuicClientConfig::try_from(config).unwrap())); diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index 1e6b7635a0f5a6..8271e670d9ace0 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -21,7 +21,7 @@ use { solana_gossip::{ cluster_info::{ClusterInfo, ClusterInfoError}, contact_info::{ContactInfo, Protocol}, - ping_pong::{self, PingCache, Pong}, + ping_pong::{self, Pong}, weighted_shuffle::WeightedShuffle, }, solana_ledger::{ @@ -81,7 +81,7 @@ pub const MAX_ANCESTOR_BYTES_IN_PACKET: usize = pub const MAX_ANCESTOR_RESPONSES: usize = MAX_ANCESTOR_BYTES_IN_PACKET / std::mem::size_of::<(Slot, Hash)>(); /// Number of bytes in the randomly generated token sent with ping messages. -pub(crate) const REPAIR_PING_TOKEN_SIZE: usize = HASH_BYTES; +const REPAIR_PING_TOKEN_SIZE: usize = HASH_BYTES; pub const REPAIR_PING_CACHE_CAPACITY: usize = 65536; pub const REPAIR_PING_CACHE_TTL: Duration = Duration::from_secs(1280); const REPAIR_PING_CACHE_RATE_LIMIT_DELAY: Duration = Duration::from_secs(2); @@ -141,11 +141,6 @@ impl AncestorHashesRepairType { } } -#[cfg_attr( - feature = "frozen-abi", - derive(AbiEnumVisitor, AbiExample), - frozen_abi(digest = "GPS6e6pgUdbXLwXN6XHTqrUVMwAL2YKLPDawgMi5hHzi") -)] #[derive(Debug, Deserialize, Serialize)] pub enum AncestorHashesResponse { Hashes(Vec<(Slot, Hash)>), @@ -219,7 +214,8 @@ impl RepairRequestHeader { } } -pub(crate) type Ping = ping_pong::Ping<[u8; REPAIR_PING_TOKEN_SIZE]>; +type Ping = ping_pong::Ping; +type PingCache = ping_pong::PingCache; /// Window protocol messages #[cfg_attr( @@ -270,11 +266,6 @@ fn discard_malformed_repair_requests( requests.len() } -#[cfg_attr( - feature = "frozen-abi", - derive(AbiEnumVisitor, AbiExample), - frozen_abi(digest = "9A6ae44qpdT7PaxiDZbybMM2mewnSnPs3C4CxhpbbYuV") -)] #[derive(Debug, Deserialize, Serialize)] pub(crate) enum RepairResponse { Ping(Ping), @@ -824,6 +815,8 @@ impl ServeRepair { assert!(REPAIR_PING_CACHE_RATE_LIMIT_DELAY > Duration::from_millis(REPAIR_MS)); let mut ping_cache = PingCache::new( + &mut rand::thread_rng(), + Instant::now(), REPAIR_PING_CACHE_TTL, REPAIR_PING_CACHE_RATE_LIMIT_DELAY, REPAIR_PING_CACHE_CAPACITY, @@ -924,10 +917,16 @@ impl ServeRepair { identity_keypair: &Keypair, ) -> (bool, Option) { let mut rng = rand::thread_rng(); - let mut pingf = move || Ping::new_rand(&mut rng, identity_keypair).ok(); let (check, ping) = request .sender() - .map(|&sender| ping_cache.check(Instant::now(), (sender, *from_addr), &mut pingf)) + .map(|&sender| { + ping_cache.check( + &mut rng, + identity_keypair, + Instant::now(), + (sender, *from_addr), + ) + }) .unwrap_or_default(); let ping_pkt = if let Some(ping) = ping { match request { @@ -1232,12 +1231,10 @@ impl ServeRepair { } packet.meta_mut().set_discard(true); stats.ping_count += 1; - if let Ok(pong) = Pong::new(&ping, keypair) { - let pong = RepairProtocol::Pong(pong); - if let Ok(pong_bytes) = serialize(&pong) { - let from_addr = packet.meta().socket_addr(); - pending_pongs.push((pong_bytes, from_addr)); - } + let pong = RepairProtocol::Pong(Pong::new(&ping, keypair)); + if let Ok(pong) = bincode::serialize(&pong) { + let from_addr = packet.meta().socket_addr(); + pending_pongs.push((pong, from_addr)); } } } @@ -1462,7 +1459,7 @@ mod tests { fn test_serialized_ping_size() { let mut rng = rand::thread_rng(); let keypair = Keypair::new(); - let ping = Ping::new_rand(&mut rng, &keypair).unwrap(); + let ping = Ping::new(rng.gen(), &keypair); let ping = RepairResponse::Ping(ping); let pkt = Packet::from_data(None, ping).unwrap(); assert_eq!(pkt.meta().size, REPAIR_RESPONSE_SERIALIZED_PING_BYTES); @@ -1516,8 +1513,8 @@ mod tests { fn test_check_well_formed_repair_request() { let mut rng = rand::thread_rng(); let keypair = Keypair::new(); - let ping = ping_pong::Ping::<[u8; 32]>::new_rand(&mut rng, &keypair).unwrap(); - let pong = Pong::new(&ping, &keypair).unwrap(); + let ping = Ping::new(rng.gen(), &keypair); + let pong = Pong::new(&ping, &keypair); let request = RepairProtocol::Pong(pong); let mut pkt = Packet::from_data(None, request).unwrap(); let mut batch = vec![make_remote_request(&pkt)]; diff --git a/core/src/sigverify.rs b/core/src/sigverify.rs index 18984ecc4ef836..61da8cf9ef70dd 100644 --- a/core/src/sigverify.rs +++ b/core/src/sigverify.rs @@ -13,51 +13,10 @@ use { sigverify_stage::{SigVerifier, SigVerifyServiceError}, }, solana_perf::{cuda_runtime::PinnedVec, packet::PacketBatch, recycler::Recycler, sigverify}, - solana_sdk::{packet::Packet, saturating_add_assign}, }; -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct SigverifyTracerPacketStats { - pub total_removed_before_sigverify_stage: usize, - pub total_tracer_packets_received_in_sigverify_stage: usize, - pub total_tracer_packets_deduped: usize, - pub total_excess_tracer_packets: usize, - pub total_tracker_packets_passed_sigverify: usize, -} - -impl SigverifyTracerPacketStats { - pub fn is_default(&self) -> bool { - *self == SigverifyTracerPacketStats::default() - } - - pub fn aggregate(&mut self, other: &SigverifyTracerPacketStats) { - saturating_add_assign!( - self.total_removed_before_sigverify_stage, - other.total_removed_before_sigverify_stage - ); - saturating_add_assign!( - self.total_tracer_packets_received_in_sigverify_stage, - other.total_tracer_packets_received_in_sigverify_stage - ); - saturating_add_assign!( - self.total_tracer_packets_deduped, - other.total_tracer_packets_deduped - ); - saturating_add_assign!( - self.total_excess_tracer_packets, - other.total_excess_tracer_packets - ); - saturating_add_assign!( - self.total_tracker_packets_passed_sigverify, - other.total_tracker_packets_passed_sigverify - ); - } -} - pub struct TransactionSigVerifier { packet_sender: BankingPacketSender, - tracer_packet_stats: SigverifyTracerPacketStats, recycler: Recycler, recycler_out: Recycler>, reject_non_vote: bool, @@ -74,7 +33,6 @@ impl TransactionSigVerifier { init(); Self { packet_sender, - tracer_packet_stats: SigverifyTracerPacketStats::default(), recycler: Recycler::warmed(50, 4096), recycler_out: Recycler::warmed(50, 4096), reject_non_vote: false, @@ -85,52 +43,12 @@ impl TransactionSigVerifier { impl SigVerifier for TransactionSigVerifier { type SendType = BankingPacketBatch; - #[inline(always)] - fn process_received_packet( - &mut self, - packet: &mut Packet, - removed_before_sigverify_stage: bool, - is_dup: bool, - ) { - sigverify::check_for_tracer_packet(packet); - if packet.meta().is_tracer_packet() { - if removed_before_sigverify_stage { - self.tracer_packet_stats - .total_removed_before_sigverify_stage += 1; - } else { - self.tracer_packet_stats - .total_tracer_packets_received_in_sigverify_stage += 1; - if is_dup { - self.tracer_packet_stats.total_tracer_packets_deduped += 1; - } - } - } - } - - #[inline(always)] - fn process_excess_packet(&mut self, packet: &Packet) { - if packet.meta().is_tracer_packet() { - self.tracer_packet_stats.total_excess_tracer_packets += 1; - } - } - - #[inline(always)] - fn process_passed_sigverify_packet(&mut self, packet: &Packet) { - if packet.meta().is_tracer_packet() { - self.tracer_packet_stats - .total_tracker_packets_passed_sigverify += 1; - } - } - fn send_packets( &mut self, packet_batches: Vec, ) -> Result<(), SigVerifyServiceError> { - let tracer_packet_stats_to_send = std::mem::take(&mut self.tracer_packet_stats); - self.packet_sender.send(BankingPacketBatch::new(( - packet_batches, - Some(tracer_packet_stats_to_send), - )))?; + self.packet_sender + .send(BankingPacketBatch::new(packet_batches))?; Ok(()) } diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index ac7d9889db0ed8..a59df5ae36fea3 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -13,7 +13,7 @@ use { solana_measure::measure::Measure, solana_perf::{ deduper::{self, Deduper}, - packet::{Packet, PacketBatch}, + packet::PacketBatch, sigverify::{ count_discarded_packets, count_packets_in_batches, count_valid_packets, shrink_batches, }, @@ -57,15 +57,6 @@ pub struct SigVerifyStage { pub trait SigVerifier { type SendType: std::fmt::Debug; fn verify_batches(&self, batches: Vec, valid_packets: usize) -> Vec; - fn process_received_packet( - &mut self, - _packet: &mut Packet, - _removed_before_sigverify_stage: bool, - _is_dup: bool, - ) { - } - fn process_excess_packet(&mut self, _packet: &Packet) {} - fn process_passed_sigverify_packet(&mut self, _packet: &Packet) {} fn send_packets(&mut self, packet_batches: Vec) -> Result<(), Self::SendType>; } @@ -251,11 +242,7 @@ impl SigVerifyStage { Self { thread_hdl } } - pub fn discard_excess_packets( - batches: &mut [PacketBatch], - mut max_packets: usize, - mut process_excess_packet: impl FnMut(&Packet), - ) { + pub fn discard_excess_packets(batches: &mut [PacketBatch], mut max_packets: usize) { // Group packets by their incoming IP address. let mut addrs = batches .iter_mut() @@ -276,7 +263,6 @@ impl SigVerifyStage { } // Discard excess packets from each address. for packet in addrs.into_values().flatten() { - process_excess_packet(packet); packet.meta_mut().set_discard(true); } } @@ -322,30 +308,15 @@ impl SigVerifyStage { discard_random_time.stop(); let mut dedup_time = Measure::start("sigverify_dedup_time"); - let discard_or_dedup_fail = deduper::dedup_packets_and_count_discards( - deduper, - &mut batches, - #[inline(always)] - |received_packet, removed_before_sigverify_stage, is_dup| { - verifier.process_received_packet( - received_packet, - removed_before_sigverify_stage, - is_dup, - ); - }, - ) as usize; + let discard_or_dedup_fail = + deduper::dedup_packets_and_count_discards(deduper, &mut batches) as usize; dedup_time.stop(); let num_unique = non_discarded_packets.saturating_sub(discard_or_dedup_fail); let mut discard_time = Measure::start("sigverify_discard_time"); let mut num_packets_to_verify = num_unique; if num_unique > MAX_SIGVERIFY_BATCH { - Self::discard_excess_packets( - &mut batches, - MAX_SIGVERIFY_BATCH, - #[inline(always)] - |excess_packet| verifier.process_excess_packet(excess_packet), - ); + Self::discard_excess_packets(&mut batches, MAX_SIGVERIFY_BATCH); num_packets_to_verify = MAX_SIGVERIFY_BATCH; } let excess_fail = num_unique.saturating_sub(MAX_SIGVERIFY_BATCH); @@ -356,11 +327,7 @@ impl SigVerifyStage { let mut verify_time = Measure::start("sigverify_batch_time"); let mut batches = verifier.verify_batches(batches, num_packets_to_verify); - let num_valid_packets = count_valid_packets( - &batches, - #[inline(always)] - |valid_packet| verifier.process_passed_sigverify_packet(valid_packet), - ); + let num_valid_packets = count_valid_packets(&batches); verify_time.stop(); // Post-shrink packet batches if many packets are discarded from sigverify @@ -472,7 +439,6 @@ mod tests { packet::{to_packet_batches, Packet}, test_tx::test_tx, }, - solana_sdk::packet::PacketFlags, }; fn count_non_discard(packet_batches: &[PacketBatch]) -> usize { @@ -488,31 +454,15 @@ mod tests { solana_logger::setup(); let batch_size = 10; let mut batch = PacketBatch::with_capacity(batch_size); - let mut tracer_packet = Packet::default(); - tracer_packet.meta_mut().flags |= PacketFlags::TRACER_PACKET; - batch.resize(batch_size, tracer_packet); + let packet = Packet::default(); + batch.resize(batch_size, packet); batch[3].meta_mut().addr = std::net::IpAddr::from([1u16; 8]); batch[3].meta_mut().set_discard(true); - let num_discarded_before_filter = 1; batch[4].meta_mut().addr = std::net::IpAddr::from([2u16; 8]); - let total_num_packets = batch.len(); let mut batches = vec![batch]; let max = 3; - let mut total_tracer_packets_discarded = 0; - SigVerifyStage::discard_excess_packets(&mut batches, max, |packet| { - if packet.meta().is_tracer_packet() { - total_tracer_packets_discarded += 1; - } - }); + SigVerifyStage::discard_excess_packets(&mut batches, max); let total_non_discard = count_non_discard(&batches); - let total_discarded = total_num_packets - total_non_discard; - // Every packet except the packets already marked `discard` before the call - // to `discard_excess_packets()` should count towards the - // `total_tracer_packets_discarded` - assert_eq!( - total_tracer_packets_discarded, - total_discarded - num_discarded_before_filter - ); assert_eq!(total_non_discard, max); assert!(!batches[0][0].meta().discard()); assert!(batches[0][3].meta().discard()); @@ -565,69 +515,38 @@ mod tests { ); let mut sent_len = 0; - for mut batch in batches.into_iter() { + for batch in batches.into_iter() { sent_len += batch.len(); - batch - .iter_mut() - .for_each(|packet| packet.meta_mut().flags |= PacketFlags::TRACER_PACKET); assert_eq!(batch.len(), packets_per_batch); packet_s.send(batch).unwrap(); } - let mut received = 0; - let mut total_tracer_packets_received_in_sigverify_stage = 0; + let mut packet_s = Some(packet_s); + let mut valid_received = 0; trace!("sent: {}", sent_len); loop { - if let Ok(message) = verified_r.recv() { - let (verifieds, tracer_packet_stats) = (&message.0, message.1.as_ref().unwrap()); - total_tracer_packets_received_in_sigverify_stage += - tracer_packet_stats.total_tracer_packets_received_in_sigverify_stage; - assert_eq!( - tracer_packet_stats.total_tracer_packets_received_in_sigverify_stage - % packets_per_batch, - 0, - ); - - if use_same_tx { - // Every transaction other than the very first one in the very first batch - // should be deduped. - - // Also have to account for the fact that deduper could be cleared periodically, - // in which case the first transaction in the next batch won't be deduped - assert!( - (tracer_packet_stats.total_tracer_packets_deduped - == tracer_packet_stats - .total_tracer_packets_received_in_sigverify_stage - - 1) - || (tracer_packet_stats.total_tracer_packets_deduped - == tracer_packet_stats - .total_tracer_packets_received_in_sigverify_stage) - ); - assert!( - (tracer_packet_stats.total_tracker_packets_passed_sigverify == 1) - || (tracer_packet_stats.total_tracker_packets_passed_sigverify == 0) - ); - } else { - assert_eq!(tracer_packet_stats.total_tracer_packets_deduped, 0); - assert!( - (tracer_packet_stats.total_tracker_packets_passed_sigverify - == tracer_packet_stats - .total_tracer_packets_received_in_sigverify_stage) - ); - } - assert_eq!(tracer_packet_stats.total_excess_tracer_packets, 0); - received += verifieds.iter().map(|batch| batch.len()).sum::(); + if let Ok(verifieds) = verified_r.recv() { + valid_received += verifieds + .iter() + .map(|batch| batch.iter().filter(|p| !p.meta().discard()).count()) + .sum::(); + } else { + break; } - if total_tracer_packets_received_in_sigverify_stage >= sent_len { - break; + // Check if all the sent batches have been picked up by sigverify stage. + // Drop sender to exit the loop on next receive call, once the channel is + // drained. + if packet_s.as_ref().map(|s| s.is_empty()).unwrap_or(true) { + packet_s.take(); } } - trace!("received: {}", received); - assert_eq!( - total_tracer_packets_received_in_sigverify_stage, - total_packets - ); - drop(packet_s); + trace!("received: {}", valid_received); + + if use_same_tx { + assert_eq!(valid_received, 1); + } else { + assert_eq!(valid_received, total_packets); + } stage.join().unwrap(); } diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 091a5901c2311e..d715bb5c7b0534 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -5,7 +5,7 @@ pub use solana_sdk::net::DEFAULT_TPU_COALESCE; use { crate::{ banking_stage::BankingStage, - banking_trace::{BankingTracer, TracerThread}, + banking_trace::{BankingTracer, Channels, TracerThread}, cluster_info_vote_listener::{ ClusterInfoVoteListener, DuplicateConfirmedSlotsSender, GossipVerifiedVoteHashSender, VerifiedVoteSender, VoteTracker, @@ -156,7 +156,14 @@ impl Tpu { shared_staked_nodes_overrides, ); - let (non_vote_sender, non_vote_receiver) = banking_tracer.create_channel_non_vote(); + let Channels { + non_vote_sender, + non_vote_receiver, + tpu_vote_sender, + tpu_vote_receiver, + gossip_vote_sender, + gossip_vote_receiver, + } = banking_tracer.create_channels(false); // Streamer for Votes: let SpawnServerResult { @@ -235,8 +242,6 @@ impl Tpu { SigVerifyStage::new(packet_receiver, verifier, "solSigVerTpu", "tpu-verifier") }; - let (tpu_vote_sender, tpu_vote_receiver) = banking_tracer.create_channel_tpu_vote(); - let vote_sigverify_stage = { let verifier = TransactionSigVerifier::new_reject_non_vote(tpu_vote_sender); SigVerifyStage::new( @@ -247,8 +252,6 @@ impl Tpu { ) }; - let (gossip_vote_sender, gossip_vote_receiver) = - banking_tracer.create_channel_gossip_vote(); let cluster_info_vote_listener = ClusterInfoVoteListener::new( exit.clone(), cluster_info.clone(), diff --git a/core/src/tracer_packet_stats.rs b/core/src/tracer_packet_stats.rs deleted file mode 100644 index 2269b35cc702fb..00000000000000 --- a/core/src/tracer_packet_stats.rs +++ /dev/null @@ -1,214 +0,0 @@ -use { - crate::sigverify::SigverifyTracerPacketStats, - solana_sdk::{pubkey::Pubkey, saturating_add_assign, timing::timestamp}, - std::collections::HashSet, -}; - -#[derive(Debug, Default)] -pub struct BankingStageTracerPacketStats { - total_exceeded_banking_stage_buffer: usize, - // This is the total number of tracer packets removed from the buffer - // after a leader's set of slots. Of these, only a subset that were in - // the buffer were actually forwardable (didn't arrive on forward port and haven't been - // forwarded before) - total_cleared_from_buffer_after_forward: usize, - total_forwardable_tracer_packets: usize, - forward_target_leaders: HashSet, -} - -#[derive(Debug, Default)] -pub struct ModifiableTracerPacketStats { - sigverify_tracer_packet_stats: SigverifyTracerPacketStats, - banking_stage_tracer_packet_stats: BankingStageTracerPacketStats, -} - -#[derive(Debug, Default)] -pub struct TracerPacketStats { - id: String, - last_report: u64, - modifiable_tracer_packet_stats: Option, -} - -impl TracerPacketStats { - pub fn new(id: u32) -> Self { - Self { - id: id.to_string(), - ..Self::default() - } - } - - fn reset(id: String) -> Self { - Self { - id, - ..Self::default() - } - } - - pub fn get_mutable_stats(&mut self) -> &mut ModifiableTracerPacketStats { - if self.modifiable_tracer_packet_stats.is_none() { - self.modifiable_tracer_packet_stats = Some(ModifiableTracerPacketStats::default()); - } - self.modifiable_tracer_packet_stats.as_mut().unwrap() - } - - pub fn aggregate_sigverify_tracer_packet_stats( - &mut self, - new_sigverify_stats: &SigverifyTracerPacketStats, - ) { - if !new_sigverify_stats.is_default() { - let stats = self.get_mutable_stats(); - stats - .sigverify_tracer_packet_stats - .aggregate(new_sigverify_stats); - } - } - - pub fn increment_total_exceeded_banking_stage_buffer( - &mut self, - total_exceeded_banking_stage_buffer: usize, - ) { - if total_exceeded_banking_stage_buffer != 0 { - let stats = self.get_mutable_stats(); - saturating_add_assign!( - stats - .banking_stage_tracer_packet_stats - .total_exceeded_banking_stage_buffer, - total_exceeded_banking_stage_buffer - ); - } - } - - pub fn increment_total_cleared_from_buffer_after_forward( - &mut self, - total_cleared_from_buffer_after_forward: usize, - ) { - if total_cleared_from_buffer_after_forward != 0 { - let stats = self.get_mutable_stats(); - saturating_add_assign!( - stats - .banking_stage_tracer_packet_stats - .total_cleared_from_buffer_after_forward, - total_cleared_from_buffer_after_forward - ); - } - } - - pub fn increment_total_forwardable_tracer_packets( - &mut self, - total_forwardable_tracer_packets: usize, - forward_target_leader: Pubkey, - ) { - if total_forwardable_tracer_packets != 0 { - let stats = self.get_mutable_stats(); - stats - .banking_stage_tracer_packet_stats - .forward_target_leaders - .insert(forward_target_leader); - saturating_add_assign!( - stats - .banking_stage_tracer_packet_stats - .total_forwardable_tracer_packets, - total_forwardable_tracer_packets - ); - } - } - - pub fn report(&mut self, report_interval_ms: u64) { - let now = timestamp(); - const LEADER_REPORT_LIMIT: usize = 4; - if now.saturating_sub(self.last_report) > report_interval_ms { - // We don't want to report unless we actually saw/forwarded a tracer packet - // to prevent noisy metrics - if let Some(modifiable_tracer_packet_stats) = self.modifiable_tracer_packet_stats.take() - { - datapoint_info!( - "tracer-packet-stats", - "id" => &self.id, - ( - "total_removed_before_sigverify", - modifiable_tracer_packet_stats - .sigverify_tracer_packet_stats - .total_removed_before_sigverify_stage as i64, - i64 - ), - ( - "total_tracer_packets_received_in_sigverify", - modifiable_tracer_packet_stats - .sigverify_tracer_packet_stats - .total_tracer_packets_received_in_sigverify_stage - as i64, - i64 - ), - ( - "total_tracer_packets_deduped_in_sigverify", - modifiable_tracer_packet_stats - .sigverify_tracer_packet_stats - .total_tracer_packets_deduped as i64, - i64 - ), - ( - "total_excess_tracer_packets_discarded_in_sigverify", - modifiable_tracer_packet_stats - .sigverify_tracer_packet_stats - .total_excess_tracer_packets as i64, - i64 - ), - ( - "total_tracker_packets_passed_sigverify", - modifiable_tracer_packet_stats - .sigverify_tracer_packet_stats - .total_tracker_packets_passed_sigverify as i64, - i64 - ), - ( - "total_exceeded_banking_stage_buffer", - modifiable_tracer_packet_stats - .banking_stage_tracer_packet_stats - .total_exceeded_banking_stage_buffer as i64, - i64 - ), - ( - "total_cleared_from_buffer_after_forward", - modifiable_tracer_packet_stats - .banking_stage_tracer_packet_stats - .total_cleared_from_buffer_after_forward as i64, - i64 - ), - ( - "total_forwardable_tracer_packets", - modifiable_tracer_packet_stats - .banking_stage_tracer_packet_stats - .total_forwardable_tracer_packets as i64, - i64 - ), - ( - "exceeded_expected_forward_leader_count", - modifiable_tracer_packet_stats - .banking_stage_tracer_packet_stats - .forward_target_leaders - .len() - > LEADER_REPORT_LIMIT, - bool - ), - ( - "forward_target_leaders", - itertools::Itertools::intersperse( - modifiable_tracer_packet_stats - .banking_stage_tracer_packet_stats - .forward_target_leaders - .iter() - .take(LEADER_REPORT_LIMIT) - .map(|leader_pubkey| leader_pubkey.to_string()), - ", ".to_string() - ) - .collect::(), - String - ) - ); - - *self = Self::reset(self.id.clone()); - self.last_report = timestamp(); - } - } - } -} diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 66db11c0c57b87..e14a66971201fb 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -332,21 +332,16 @@ impl Tvu { cluster_info.clone(), poh_recorder.clone(), tower_storage, - vote_connection_cache, + vote_connection_cache.clone(), ); - let warm_quic_cache_service = connection_cache.and_then(|connection_cache| { - if connection_cache.use_quic() { - Some(WarmQuicCacheService::new( - connection_cache.clone(), - cluster_info.clone(), - poh_recorder.clone(), - exit.clone(), - )) - } else { - None - } - }); + let warm_quic_cache_service = create_cache_warmer_if_needed( + connection_cache, + vote_connection_cache, + cluster_info, + poh_recorder, + &exit, + ); let cost_update_service = CostUpdateService::new(blockstore.clone(), cost_update_receiver); @@ -417,6 +412,27 @@ impl Tvu { } } +fn create_cache_warmer_if_needed( + connection_cache: Option<&Arc>, + vote_connection_cache: Arc, + cluster_info: &Arc, + poh_recorder: &Arc>, + exit: &Arc, +) -> Option { + let tpu_connection_cache = connection_cache.filter(|cache| cache.use_quic()).cloned(); + let vote_connection_cache = Some(vote_connection_cache).filter(|cache| cache.use_quic()); + + (tpu_connection_cache.is_some() || vote_connection_cache.is_some()).then(|| { + WarmQuicCacheService::new( + tpu_connection_cache, + vote_connection_cache, + cluster_info.clone(), + poh_recorder.clone(), + exit.clone(), + ) + }) +} + #[cfg(test)] pub mod tests { use { diff --git a/core/src/validator.rs b/core/src/validator.rs index 08641e29cd1afb..c3318ee070f2bc 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -2509,7 +2509,7 @@ pub enum ValidatorError { )] PohTooSlow { mine: u64, target: u64 }, - #[error("shred version mistmatch: actual {actual}, expected {expected}")] + #[error("shred version mismatch: actual {actual}, expected {expected}")] ShredVersionMismatch { actual: u16, expected: u16 }, #[error(transparent)] diff --git a/core/src/warm_quic_cache_service.rs b/core/src/warm_quic_cache_service.rs index fd86e2af9f766f..13421c47a6b7d9 100644 --- a/core/src/warm_quic_cache_service.rs +++ b/core/src/warm_quic_cache_service.rs @@ -5,9 +5,14 @@ use { rand::{thread_rng, Rng}, solana_client::connection_cache::{ConnectionCache, Protocol}, solana_connection_cache::client_connection::ClientConnection as TpuConnection, - solana_gossip::cluster_info::ClusterInfo, + solana_gossip::{ + cluster_info::ClusterInfo, + contact_info::{ContactInfo, Error}, + }, solana_poh::poh_recorder::PohRecorder, + solana_sdk::pubkey::Pubkey, std::{ + net::SocketAddr, sync::{ atomic::{AtomicBool, Ordering}, Arc, RwLock, @@ -26,13 +31,43 @@ const CACHE_OFFSET_SLOT: i64 = 100; const CACHE_JITTER_SLOT: i64 = 20; impl WarmQuicCacheService { + fn warmup_connection( + cache: Option<&ConnectionCache>, + cluster_info: &ClusterInfo, + leader_pubkey: &Pubkey, + contact_info_selector: impl Fn(&ContactInfo) -> Result, + log_context: &str, + ) { + if let Some(connection_cache) = cache { + if let Some(Ok(addr)) = + cluster_info.lookup_contact_info(leader_pubkey, contact_info_selector) + { + let conn = connection_cache.get_connection(&addr); + if let Err(err) = conn.send_data(&[]) { + warn!( + "Failed to warmup QUIC connection to the leader {leader_pubkey:?} at {addr:?}, \ + Context: {log_context}, Error: {err:?}" + ); + } + } + } + } + pub fn new( - connection_cache: Arc, + tpu_connection_cache: Option>, + vote_connection_cache: Option>, cluster_info: Arc, poh_recorder: Arc>, exit: Arc, ) -> Self { - assert!(matches!(*connection_cache, ConnectionCache::Quic(_))); + assert!(matches!( + tpu_connection_cache.as_deref(), + None | Some(ConnectionCache::Quic(_)) + )); + assert!(matches!( + vote_connection_cache.as_deref(), + None | Some(ConnectionCache::Quic(_)) + )); let thread_hdl = Builder::new() .name("solWarmQuicSvc".to_string()) .spawn(move || { @@ -48,20 +83,22 @@ impl WarmQuicCacheService { .map_or(true, |last_leader| last_leader != leader_pubkey) { maybe_last_leader = Some(leader_pubkey); - if let Some(Ok(addr)) = cluster_info - .lookup_contact_info(&leader_pubkey, |node| { - node.tpu(Protocol::QUIC) - }) - { - let conn = connection_cache.get_connection(&addr); - if let Err(err) = conn.send_data(&[]) { - warn!( - "Failed to warmup QUIC connection to the leader {:?}, \ - Error {:?}", - leader_pubkey, err - ); - } - } + // Warm cache for regular transactions + Self::warmup_connection( + tpu_connection_cache.as_deref(), + &cluster_info, + &leader_pubkey, + |node| node.tpu(Protocol::QUIC), + "tpu", + ); + // Warm cache for vote + Self::warmup_connection( + vote_connection_cache.as_deref(), + &cluster_info, + &leader_pubkey, + |node| node.tpu_vote(Protocol::QUIC), + "vote", + ); } } sleep(Duration::from_millis(200)); diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 0d2e0b75317597..ebd56f02864b62 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -313,6 +313,7 @@ where let (mut shreds, mut repair_infos): (Vec<_>, Vec<_>) = thread_pool.install(|| { packets .par_iter() + .with_min_len(32) .flat_map_iter(|packets| packets.iter().filter_map(handle_packet)) .unzip() }); @@ -489,15 +490,19 @@ impl WindowService { let handle_error = || { inc_new_counter_error!("solana-window-insert-error", 1, 1); }; - let thread_pool = rayon::ThreadPoolBuilder::new() - .num_threads(get_thread_count().min(8)) - .thread_name(|i| format!("solWinInsert{i:02}")) - .build() - .unwrap(); let reed_solomon_cache = ReedSolomonCache::default(); Builder::new() .name("solWinInsert".to_string()) .spawn(move || { + let thread_pool = rayon::ThreadPoolBuilder::new() + .num_threads(get_thread_count().min(8)) + // Use the current thread as one of the workers. This reduces overhead when the + // pool is used to process a small number of shreds, since they'll be processed + // directly on the current thread. + .use_current_thread() + .thread_name(|i| format!("solWinInsert{i:02}")) + .build() + .unwrap(); let handle_duplicate = |possible_duplicate_shred| { let _ = check_duplicate_sender.send(possible_duplicate_shred); }; diff --git a/core/tests/unified_scheduler.rs b/core/tests/unified_scheduler.rs index 75795f2f6c01ee..a6a1e1e19582f0 100644 --- a/core/tests/unified_scheduler.rs +++ b/core/tests/unified_scheduler.rs @@ -17,16 +17,13 @@ use { solana_ledger::genesis_utils::create_genesis_config, solana_runtime::{ accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks, - genesis_utils::GenesisConfigInfo, prioritization_fee_cache::PrioritizationFeeCache, + genesis_utils::GenesisConfigInfo, installed_scheduler_pool::SchedulingContext, + prioritization_fee_cache::PrioritizationFeeCache, }, solana_runtime_transaction::runtime_transaction::RuntimeTransaction, - solana_sdk::{ - hash::Hash, - pubkey::Pubkey, - system_transaction, - transaction::{Result, SanitizedTransaction}, - }, + solana_sdk::{hash::Hash, pubkey::Pubkey, system_transaction, transaction::Result}, solana_timings::ExecuteTimings, + solana_unified_scheduler_logic::Task, solana_unified_scheduler_pool::{ DefaultTaskHandler, HandlerContext, PooledScheduler, SchedulerPool, TaskHandler, }, @@ -48,9 +45,8 @@ fn test_scheduler_waited_by_drop_bank_service() { fn handle( result: &mut Result<()>, timings: &mut ExecuteTimings, - bank: &Arc, - transaction: &RuntimeTransaction, - index: usize, + scheduling_context: &SchedulingContext, + task: &Task, handler_context: &HandlerContext, ) { info!("Stalling at StallingHandler::handle()..."); @@ -59,7 +55,7 @@ fn test_scheduler_waited_by_drop_bank_service() { std::thread::sleep(std::time::Duration::from_secs(3)); info!("Now entering into DefaultTaskHandler::handle()..."); - DefaultTaskHandler::handle(result, timings, bank, transaction, index, handler_context); + DefaultTaskHandler::handle(result, timings, scheduling_context, task, handler_context); } } diff --git a/cost-model/src/block_cost_limits.rs b/cost-model/src/block_cost_limits.rs index 7fc4b2fe670b39..912ee504f712fd 100644 --- a/cost-model/src/block_cost_limits.rs +++ b/cost-model/src/block_cost_limits.rs @@ -1,14 +1,6 @@ //! defines block cost related limits //! -/// Static configurations: -/// -/// Number of microseconds replaying a block should take, 400 millisecond block times -/// is currently publicly communicated on solana.com -pub const MAX_BLOCK_REPLAY_TIME_US: u64 = 400_000; -/// number of concurrent processes, -pub const MAX_CONCURRENCY: u64 = 4; - // Cluster data, method of collecting at https://github.com/solana-labs/solana/issues/19627 // Dashboard: https://metrics.solana.com/d/monitor-edge/cluster-telemetry?orgId=1 @@ -22,38 +14,42 @@ pub const SECP256K1_VERIFY_COST: u64 = COMPUTE_UNIT_TO_US_RATIO * 223; pub const ED25519_VERIFY_COST: u64 = COMPUTE_UNIT_TO_US_RATIO * 76; /// Number of compute units for one ed25519 strict signature verification. pub const ED25519_VERIFY_STRICT_COST: u64 = COMPUTE_UNIT_TO_US_RATIO * 80; +/// Number of compute units for one secp256r1 signature verification. +pub const SECP256R1_VERIFY_COST: u64 = COMPUTE_UNIT_TO_US_RATIO * 160; /// Number of compute units for one write lock pub const WRITE_LOCK_UNITS: u64 = COMPUTE_UNIT_TO_US_RATIO * 10; /// Number of data bytes per compute units pub const INSTRUCTION_DATA_BYTES_COST: u64 = 140 /*bytes per us*/ / COMPUTE_UNIT_TO_US_RATIO; -/// Statically computed data: -/// /// Number of compute units that a block is allowed. A block's compute units are /// accumulated by Transactions added to it; A transaction's compute units are /// calculated by cost_model, based on transaction's signatures, write locks, /// data size and built-in and SBF instructions. -pub const MAX_BLOCK_UNITS: u64 = - MAX_BLOCK_REPLAY_TIME_US * COMPUTE_UNIT_TO_US_RATIO * MAX_CONCURRENCY; - -#[cfg(test)] -static_assertions::const_assert_eq!(MAX_BLOCK_UNITS, 48_000_000); +pub const MAX_BLOCK_UNITS: u64 = 48_000_000; +pub const MAX_BLOCK_UNITS_SIMD_0207: u64 = 50_000_000; /// Number of compute units that a writable account in a block is allowed. The /// limit is to prevent too many transactions write to same account, therefore /// reduce block's parallelism. -pub const MAX_WRITABLE_ACCOUNT_UNITS: u64 = MAX_BLOCK_REPLAY_TIME_US * COMPUTE_UNIT_TO_US_RATIO; - -#[cfg(test)] -static_assertions::const_assert_eq!(MAX_WRITABLE_ACCOUNT_UNITS, 12_000_000); +pub const MAX_WRITABLE_ACCOUNT_UNITS: u64 = 12_000_000; /// Number of compute units that a block can have for vote transactions, -/// sets at ~75% of MAX_BLOCK_UNITS to leave room for non-vote transactions -pub const MAX_VOTE_UNITS: u64 = (MAX_BLOCK_UNITS as f64 * 0.75_f64) as u64; - -#[cfg(test)] -static_assertions::const_assert_eq!(MAX_VOTE_UNITS, 36_000_000); +/// set to less than MAX_BLOCK_UNITS to leave room for non-vote transactions +pub const MAX_VOTE_UNITS: u64 = 36_000_000; /// The maximum allowed size, in bytes, that accounts data can grow, per block. /// This can also be thought of as the maximum size of new allocations per block. pub const MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA: u64 = 100_000_000; + +/// Return the block limits that will be used upon activation of SIMD-0207. +/// Returns as +/// (account_limit, block_limit, vote_limit) +// ^ Above order is used to be consistent with the order of +// `CostTracker::set_limits`. +pub const fn simd_0207_block_limits() -> (u64, u64, u64) { + ( + MAX_WRITABLE_ACCOUNT_UNITS, + MAX_BLOCK_UNITS_SIMD_0207, + MAX_VOTE_UNITS, + ) +} diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index 612492b8b1126f..3f2a16b8ccba4c 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -12,7 +12,9 @@ use { DEFAULT_HEAP_COST, DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, }, solana_feature_set::{self as feature_set, FeatureSet}, - solana_runtime_transaction::transaction_with_meta::TransactionWithMeta, + solana_runtime_transaction::{ + transaction_meta::StaticMeta, transaction_with_meta::TransactionWithMeta, + }, solana_sdk::{ borsh1::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, @@ -46,24 +48,22 @@ impl CostModel { if transaction.is_simple_vote_transaction() { TransactionCost::SimpleVote { transaction } } else { - let signature_cost = Self::get_signature_cost(transaction, feature_set); - let write_lock_cost = Self::get_write_lock_cost(transaction, feature_set); + let num_write_locks = Self::num_write_locks(transaction, feature_set); let (programs_execution_cost, loaded_accounts_data_size_cost, data_bytes_cost) = - Self::get_transaction_cost(transaction, feature_set); - let allocated_accounts_data_size = - Self::calculate_allocated_accounts_data_size(transaction); - - let usage_cost_details = UsageCostDetails { + Self::get_transaction_cost( + transaction, + transaction.program_instructions_iter(), + feature_set, + ); + Self::calculate_non_vote_transaction_cost( transaction, - signature_cost, - write_lock_cost, - data_bytes_cost, + transaction.program_instructions_iter(), + num_write_locks, programs_execution_cost, loaded_accounts_data_size_cost, - allocated_accounts_data_size, - }; - - TransactionCost::Transaction(usage_cost_details) + data_bytes_cost, + feature_set, + ) } } @@ -78,35 +78,82 @@ impl CostModel { if transaction.is_simple_vote_transaction() { TransactionCost::SimpleVote { transaction } } else { - let signature_cost = Self::get_signature_cost(transaction, feature_set); - let write_lock_cost = Self::get_write_lock_cost(transaction, feature_set); - - let instructions_data_cost = Self::get_instructions_data_cost(transaction); - let allocated_accounts_data_size = - Self::calculate_allocated_accounts_data_size(transaction); - - let programs_execution_cost = actual_programs_execution_cost; + let num_write_locks = Self::num_write_locks(transaction, feature_set); let loaded_accounts_data_size_cost = Self::calculate_loaded_accounts_data_size_cost( actual_loaded_accounts_data_size_bytes, feature_set, ); + let instructions_data_cost = + Self::get_instructions_data_cost(transaction.program_instructions_iter()); - let usage_cost_details = UsageCostDetails { + Self::calculate_non_vote_transaction_cost( transaction, - signature_cost, - write_lock_cost, - data_bytes_cost: instructions_data_cost, - programs_execution_cost, + transaction.program_instructions_iter(), + num_write_locks, + actual_programs_execution_cost, loaded_accounts_data_size_cost, - allocated_accounts_data_size, - }; + instructions_data_cost, + feature_set, + ) + } + } - TransactionCost::Transaction(usage_cost_details) + /// Return an estimated total cost for a transaction given its': + /// - `meta` - transaction meta + /// - `instructions` - transaction instructions + /// - `num_write_locks` - number of requested write locks + pub fn estimate_cost<'a, Tx: StaticMeta>( + transaction: &'a Tx, + instructions: impl Iterator)> + Clone, + num_write_locks: u64, + feature_set: &FeatureSet, + ) -> TransactionCost<'a, Tx> { + if transaction.is_simple_vote_transaction() { + return TransactionCost::SimpleVote { transaction }; } + let (programs_execution_cost, loaded_accounts_data_size_cost, data_bytes_cost) = + Self::get_transaction_cost(transaction, instructions.clone(), feature_set); + Self::calculate_non_vote_transaction_cost( + transaction, + instructions, + num_write_locks, + programs_execution_cost, + loaded_accounts_data_size_cost, + data_bytes_cost, + feature_set, + ) + } + + fn calculate_non_vote_transaction_cost<'a, Tx: StaticMeta>( + transaction: &'a Tx, + instructions: impl Iterator)> + Clone, + num_write_locks: u64, + programs_execution_cost: u64, + loaded_accounts_data_size_cost: u64, + data_bytes_cost: u64, + feature_set: &FeatureSet, + ) -> TransactionCost<'a, Tx> { + let signature_cost = Self::get_signature_cost(transaction, feature_set); + let write_lock_cost = Self::get_write_lock_cost(num_write_locks); + + let allocated_accounts_data_size = + Self::calculate_allocated_accounts_data_size(instructions); + + let usage_cost_details = UsageCostDetails { + transaction, + signature_cost, + write_lock_cost, + data_bytes_cost, + programs_execution_cost, + loaded_accounts_data_size_cost, + allocated_accounts_data_size, + }; + + TransactionCost::Transaction(usage_cost_details) } /// Returns signature details and the total signature cost - fn get_signature_cost(transaction: &impl TransactionWithMeta, feature_set: &FeatureSet) -> u64 { + fn get_signature_cost(transaction: &impl StaticMeta, feature_set: &FeatureSet) -> u64 { let signatures_count_detail = transaction.signature_details(); let ed25519_verify_cost = @@ -116,6 +163,13 @@ impl CostModel { ED25519_VERIFY_COST }; + let secp256r1_verify_cost = + if feature_set.is_active(&feature_set::enable_secp256r1_precompile::id()) { + SECP256R1_VERIFY_COST + } else { + 0 + }; + signatures_count_detail .num_transaction_signatures() .saturating_mul(SIGNATURE_COST) @@ -129,6 +183,11 @@ impl CostModel { .num_ed25519_instruction_signatures() .saturating_mul(ed25519_verify_cost), ) + .saturating_add( + signatures_count_detail + .num_secp256r1_instruction_signatures() + .saturating_mul(secp256r1_verify_cost), + ) } fn get_writable_accounts(message: &impl SVMMessage) -> impl Iterator { @@ -139,38 +198,43 @@ impl CostModel { .filter_map(|(i, k)| message.is_writable(i).then_some(k)) } + /// Return the number of write-locks for a transaction. + fn num_write_locks(transaction: &impl SVMMessage, feature_set: &FeatureSet) -> u64 { + if feature_set.is_active(&feature_set::cost_model_requested_write_lock_cost::id()) { + transaction.num_write_locks() + } else { + Self::get_writable_accounts(transaction).count() as u64 + } + } + /// Returns the total write-lock cost. - fn get_write_lock_cost(transaction: &impl SVMMessage, feature_set: &FeatureSet) -> u64 { - let num_write_locks = - if feature_set.is_active(&feature_set::cost_model_requested_write_lock_cost::id()) { - transaction.num_write_locks() - } else { - Self::get_writable_accounts(transaction).count() as u64 - }; + fn get_write_lock_cost(num_write_locks: u64) -> u64 { WRITE_LOCK_UNITS.saturating_mul(num_write_locks) } /// Return (programs_execution_cost, loaded_accounts_data_size_cost, data_bytes_cost) - fn get_transaction_cost( - transaction: &impl TransactionWithMeta, + fn get_transaction_cost<'a>( + meta: &impl StaticMeta, + instructions: impl Iterator)>, feature_set: &FeatureSet, ) -> (u64, u64, u64) { if feature_set.is_active(&feature_set::reserve_minimal_cus_for_builtin_instructions::id()) { - let data_bytes_cost = Self::get_instructions_data_cost(transaction); + let data_bytes_cost = Self::get_instructions_data_cost(instructions); let (programs_execution_cost, loaded_accounts_data_size_cost) = - Self::get_estimated_execution_cost(transaction, feature_set); + Self::get_estimated_execution_cost(meta, feature_set); ( programs_execution_cost, loaded_accounts_data_size_cost, data_bytes_cost, ) } else { - Self::get_transaction_cost_without_minimal_builtin_cus(transaction, feature_set) + Self::get_transaction_cost_without_minimal_builtin_cus(meta, instructions, feature_set) } } - fn get_transaction_cost_without_minimal_builtin_cus( - transaction: &impl TransactionWithMeta, + fn get_transaction_cost_without_minimal_builtin_cus<'a>( + meta: &impl StaticMeta, + instructions: impl Iterator)>, feature_set: &FeatureSet, ) -> (u64, u64, u64) { let mut programs_execution_costs = 0u64; @@ -179,7 +243,7 @@ impl CostModel { let mut compute_unit_limit_is_set = false; let mut has_user_space_instructions = false; - for (program_id, instruction) in transaction.program_instructions_iter() { + for (program_id, instruction) in instructions { let ix_execution_cost = if let Some(builtin_cost) = get_builtin_instruction_cost(program_id, feature_set) { builtin_cost @@ -207,7 +271,7 @@ impl CostModel { // if failed to process compute budget instructions, the transaction // will not be executed by `bank`, therefore it should be considered // as no execution cost by cost model. - match transaction + match meta .compute_budget_instruction_details() .sanitize_and_convert_to_compute_budget_limits(feature_set) { @@ -244,7 +308,7 @@ impl CostModel { /// Return (programs_execution_cost, loaded_accounts_data_size_cost) fn get_estimated_execution_cost( - transaction: &impl TransactionWithMeta, + transaction: &impl StaticMeta, feature_set: &FeatureSet, ) -> (u64, u64) { // if failed to process compute_budget instructions, the transaction will not be executed @@ -267,10 +331,11 @@ impl CostModel { } /// Return the instruction data bytes cost. - fn get_instructions_data_cost(transaction: &impl SVMMessage) -> u64 { - let ix_data_bytes_len_total: u64 = transaction - .instructions_iter() - .map(|instruction| instruction.data.len() as u64) + fn get_instructions_data_cost<'a>( + instructions: impl Iterator)>, + ) -> u64 { + let ix_data_bytes_len_total: u64 = instructions + .map(|(_, instruction)| instruction.data.len() as u64) .sum(); ix_data_bytes_len_total / INSTRUCTION_DATA_BYTES_COST @@ -318,9 +383,11 @@ impl CostModel { /// eventually, potentially determine account data size of all writable accounts /// at the moment, calculate account data size of account creation - fn calculate_allocated_accounts_data_size(transaction: &impl SVMMessage) -> u64 { + fn calculate_allocated_accounts_data_size<'a>( + instructions: impl Iterator)>, + ) -> u64 { let mut tx_attempted_allocation_size: u64 = 0; - for (program_id, instruction) in transaction.program_instructions_iter() { + for (program_id, instruction) in instructions { match Self::calculate_account_data_size_on_instruction(program_id, instruction) { SystemProgramAccountAllocation::Failed => { // If any system program instructions can be statically @@ -393,7 +460,9 @@ mod tests { let sanitized_tx = RuntimeTransaction::from_transaction_for_tests(transaction); assert_eq!( - CostModel::calculate_allocated_accounts_data_size(&sanitized_tx), + CostModel::calculate_allocated_accounts_data_size( + sanitized_tx.program_instructions_iter() + ), 0 ); } @@ -418,7 +487,9 @@ mod tests { let sanitized_tx = RuntimeTransaction::from_transaction_for_tests(transaction); assert_eq!( - CostModel::calculate_allocated_accounts_data_size(&sanitized_tx), + CostModel::calculate_allocated_accounts_data_size( + sanitized_tx.program_instructions_iter() + ), space1 + space2 ); } @@ -459,7 +530,9 @@ mod tests { let sanitized_tx = RuntimeTransaction::from_transaction_for_tests(transaction); assert_eq!( - CostModel::calculate_allocated_accounts_data_size(&sanitized_tx), + CostModel::calculate_allocated_accounts_data_size( + sanitized_tx.program_instructions_iter() + ), MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION as u64, ); } @@ -483,7 +556,9 @@ mod tests { assert_eq!( 0, // SystemProgramAccountAllocation::Failed, - CostModel::calculate_allocated_accounts_data_size(&sanitized_tx), + CostModel::calculate_allocated_accounts_data_size( + sanitized_tx.program_instructions_iter() + ), ); } @@ -500,7 +575,9 @@ mod tests { assert_eq!( 0, // SystemProgramAccountAllocation::Failed, - CostModel::calculate_allocated_accounts_data_size(&sanitized_tx), + CostModel::calculate_allocated_accounts_data_size( + sanitized_tx.program_instructions_iter() + ), ); } @@ -571,7 +648,11 @@ mod tests { ), ] { let (program_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) = - CostModel::get_transaction_cost(&simple_transaction, &feature_set); + CostModel::get_transaction_cost( + &simple_transaction, + simple_transaction.program_instructions_iter(), + &feature_set, + ); assert_eq!(expected_execution_cost, program_execution_cost); } @@ -605,7 +686,11 @@ mod tests { ), ] { let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) = - CostModel::get_transaction_cost(&token_transaction, &feature_set); + CostModel::get_transaction_cost( + &token_transaction, + token_transaction.program_instructions_iter(), + &feature_set, + ); assert_eq!(expected_execution_cost, program_execution_cost); assert_eq!(0, data_bytes_cost); @@ -671,7 +756,11 @@ mod tests { (FeatureSet::all_enabled(), expected_cu_limit as u64), ] { let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) = - CostModel::get_transaction_cost(&token_transaction, &feature_set); + CostModel::get_transaction_cost( + &token_transaction, + token_transaction.program_instructions_iter(), + &feature_set, + ); assert_eq!(expected_execution_cost, program_execution_cost); assert_eq!(1, data_bytes_cost); @@ -714,7 +803,11 @@ mod tests { for feature_set in [FeatureSet::default(), FeatureSet::all_enabled()] { let (program_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) = - CostModel::get_transaction_cost(&token_transaction, &feature_set); + CostModel::get_transaction_cost( + &token_transaction, + token_transaction.program_instructions_iter(), + &feature_set, + ); assert_eq!(0, program_execution_cost); } } @@ -746,7 +839,7 @@ mod tests { ), ] { let (programs_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) = - CostModel::get_transaction_cost(&tx, &feature_set); + CostModel::get_transaction_cost(&tx, tx.program_instructions_iter(), &feature_set); assert_eq!(expected_execution_cost, programs_execution_cost); assert_eq!(6, data_bytes_cost); } @@ -786,7 +879,7 @@ mod tests { ), ] { let (program_execution_cost, _loaded_accounts_data_size_cost, data_bytes_cost) = - CostModel::get_transaction_cost(&tx, &feature_set); + CostModel::get_transaction_cost(&tx, tx.program_instructions_iter(), &feature_set); assert_eq!(expected_cost, program_execution_cost); assert_eq!(0, data_bytes_cost); } @@ -932,7 +1025,11 @@ mod tests { ), ] { let (programs_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) = - CostModel::get_transaction_cost(&transaction, &feature_set); + CostModel::get_transaction_cost( + &transaction, + transaction.program_instructions_iter(), + &feature_set, + ); assert_eq!(expected_execution_cost, programs_execution_cost); } @@ -962,7 +1059,11 @@ mod tests { (FeatureSet::all_enabled(), cu_limit as u64), ] { let (programs_execution_cost, _loaded_accounts_data_size_cost, _data_bytes_cost) = - CostModel::get_transaction_cost(&transaction, &feature_set); + CostModel::get_transaction_cost( + &transaction, + transaction.program_instructions_iter(), + &feature_set, + ); assert_eq!(expected_execution_cost, programs_execution_cost); } diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index 3ca2c84d048b98..b0ea2d396e3d58 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -76,6 +76,7 @@ pub struct CostTracker { /// the tracker, but are still waiting for an update with actual usage or /// removal if the transaction does not end up getting committed. in_flight_transaction_count: usize, + secp256r1_instruction_signature_count: u64, } impl Default for CostTracker { @@ -102,6 +103,7 @@ impl Default for CostTracker { secp256k1_instruction_signature_count: 0, ed25519_instruction_signature_count: 0, in_flight_transaction_count: 0, + secp256r1_instruction_signature_count: 0, } } } @@ -129,6 +131,11 @@ impl CostTracker { self.in_flight_transaction_count = 0; } + /// Get the overall block limit. + pub fn get_block_limit(&self) -> u64 { + self.block_cost_limit + } + /// allows to adjust limits initiated during construction pub fn set_limits( &mut self, @@ -256,6 +263,11 @@ impl CostTracker { self.in_flight_transaction_count, i64 ), + ( + "secp256r1_instruction_signature_count", + self.secp256r1_instruction_signature_count, + i64 + ) ); } @@ -334,6 +346,10 @@ impl CostTracker { self.ed25519_instruction_signature_count, tx_cost.num_ed25519_instruction_signatures() ); + saturating_add_assign!( + self.secp256r1_instruction_signature_count, + tx_cost.num_secp256r1_instruction_signatures() + ); self.add_transaction_execution_cost(tx_cost, tx_cost.sum()) } @@ -353,6 +369,9 @@ impl CostTracker { self.ed25519_instruction_signature_count = self .ed25519_instruction_signature_count .saturating_sub(tx_cost.num_ed25519_instruction_signatures()); + self.secp256r1_instruction_signature_count = self + .secp256r1_instruction_signature_count + .saturating_sub(tx_cost.num_secp256r1_instruction_signatures()); } /// Apply additional actual execution units to cost_tracker diff --git a/cost-model/src/transaction_cost.rs b/cost-model/src/transaction_cost.rs index b772e0b0d7b3ad..5042871f30831f 100644 --- a/cost-model/src/transaction_cost.rs +++ b/cost-model/src/transaction_cost.rs @@ -1,9 +1,8 @@ #[cfg(feature = "dev-context-only-utils")] use solana_compute_budget_instruction::compute_budget_instruction_details::ComputeBudgetInstructionDetails; use { - crate::block_cost_limits, - solana_runtime_transaction::transaction_with_meta::TransactionWithMeta, - solana_sdk::pubkey::Pubkey, + crate::block_cost_limits, solana_runtime_transaction::transaction_meta::StaticMeta, + solana_sdk::pubkey::Pubkey, solana_svm_transaction::svm_message::SVMMessage, }; /// TransactionCost is used to represent resources required to process @@ -17,12 +16,12 @@ use { const SIMPLE_VOTE_USAGE_COST: u64 = 3428; #[derive(Debug)] -pub enum TransactionCost<'a, Tx: TransactionWithMeta> { +pub enum TransactionCost<'a, Tx> { SimpleVote { transaction: &'a Tx }, Transaction(UsageCostDetails<'a, Tx>), } -impl<'a, Tx: TransactionWithMeta> TransactionCost<'a, Tx> { +impl<'a, Tx> TransactionCost<'a, Tx> { pub fn sum(&self) -> u64 { #![allow(clippy::assertions_on_constants)] match self { @@ -90,7 +89,9 @@ impl<'a, Tx: TransactionWithMeta> TransactionCost<'a, Tx> { Self::Transaction(usage_cost) => usage_cost.write_lock_cost, } } +} +impl TransactionCost<'_, Tx> { pub fn writable_accounts(&self) -> impl Iterator { let transaction = match self { Self::SimpleVote { transaction } => transaction, @@ -102,7 +103,9 @@ impl<'a, Tx: TransactionWithMeta> TransactionCost<'a, Tx> { .enumerate() .filter_map(|(index, key)| transaction.is_writable(index).then_some(key)) } +} +impl TransactionCost<'_, Tx> { pub fn num_transaction_signatures(&self) -> u64 { match self { Self::SimpleVote { .. } => 1, @@ -132,11 +135,21 @@ impl<'a, Tx: TransactionWithMeta> TransactionCost<'a, Tx> { .num_ed25519_instruction_signatures(), } } + + pub fn num_secp256r1_instruction_signatures(&self) -> u64 { + match self { + Self::SimpleVote { .. } => 0, + Self::Transaction(usage_cost) => usage_cost + .transaction + .signature_details() + .num_secp256r1_instruction_signatures(), + } + } } // costs are stored in number of 'compute unit's #[derive(Debug)] -pub struct UsageCostDetails<'a, Tx: TransactionWithMeta> { +pub struct UsageCostDetails<'a, Tx> { pub transaction: &'a Tx, pub signature_cost: u64, pub write_lock_cost: u64, @@ -146,7 +159,7 @@ pub struct UsageCostDetails<'a, Tx: TransactionWithMeta> { pub allocated_accounts_data_size: u64, } -impl<'a, Tx: TransactionWithMeta> UsageCostDetails<'a, Tx> { +impl<'a, Tx> UsageCostDetails<'a, Tx> { pub fn sum(&self) -> u64 { self.signature_cost .saturating_add(self.write_lock_cost) @@ -257,7 +270,9 @@ impl solana_runtime_transaction::transaction_meta::StaticMeta for WritableKeysTr } #[cfg(feature = "dev-context-only-utils")] -impl TransactionWithMeta for WritableKeysTransaction { +impl solana_runtime_transaction::transaction_with_meta::TransactionWithMeta + for WritableKeysTransaction +{ #[allow(refining_impl_trait)] fn as_sanitized_transaction( &self, diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 3ccc948ef09bc4..767c3b18f6bb3e 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -4,7 +4,7 @@ module.exports = { title: "Agave", tagline: "Agave is an open source project implementing a new, high-performance, permissionless blockchain.", - url: "https://docs.solanalabs.com", + url: "https://docs.anza.xyz", baseUrl: "/", favicon: "img/favicon.ico", organizationName: "anza-xyz", // Usually your GitHub org/user name. @@ -89,7 +89,7 @@ module.exports = { href: "https://solana.com/discord", // label: "Discord", className: "header-link-icon header-discord-link", - "aria-label": "Solana Discord", + "aria-label": "Solana Tech Discord", position: "right", }, { diff --git a/docs/src/operations/best-practices/security.md b/docs/src/operations/best-practices/security.md index 861f0ecdfe9214..c0c87051d63993 100644 --- a/docs/src/operations/best-practices/security.md +++ b/docs/src/operations/best-practices/security.md @@ -35,7 +35,7 @@ Your system should close all ports that do not need to be open to the outside wo ## Eliminate Brute Force Attacks With fail2ban -[fail2ban](https://github.com/fail2ban/fail2ban) is a network security tool that checks your logs for suspicious login attempts and bans those IP addresses after repeated attempts. This will help mitigate brute force attacks on your serve. +[fail2ban](https://github.com/fail2ban/fail2ban) is a network security tool that checks your logs for suspicious login attempts and bans those IP addresses after repeated attempts. This will help mitigate brute force attacks on your server. The default setup should work out-of-the-box by doing the simply installing `fail2ban`: diff --git a/dos/src/main.rs b/dos/src/main.rs index 62af3c83af381c..8651d3726c479f 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -970,9 +970,13 @@ pub mod test { let node = cluster.get_contact_info(&nodes[0]).unwrap().clone(); let nodes_slice = [node]; - let client = Arc::new(cluster.build_tpu_quic_client().unwrap_or_else(|err| { - panic!("Could not create TpuClient with Quic Cache {err:?}"); - })); + let client = Arc::new( + cluster + .build_validator_tpu_quic_client(cluster.entry_point_info.pubkey()) + .unwrap_or_else(|err| { + panic!("Could not create TpuClient with Quic Cache {err:?}"); + }), + ); // creates one transaction with 8 valid signatures and sends it 10 times run_dos( @@ -1100,9 +1104,13 @@ pub mod test { let node = cluster.get_contact_info(&nodes[0]).unwrap().clone(); let nodes_slice = [node]; - let client = Arc::new(cluster.build_tpu_quic_client().unwrap_or_else(|err| { - panic!("Could not create TpuClient with Quic Cache {err:?}"); - })); + let client = Arc::new( + cluster + .build_validator_tpu_quic_client(cluster.entry_point_info.pubkey()) + .unwrap_or_else(|err| { + panic!("Could not create TpuClient with Quic Cache {err:?}"); + }), + ); // creates one transaction and sends it 10 times // this is done in single thread diff --git a/geyser-plugin-interface/src/geyser_plugin_interface.rs b/geyser-plugin-interface/src/geyser_plugin_interface.rs index 0cfff715a7d82b..c79513c0a1adf5 100644 --- a/geyser-plugin-interface/src/geyser_plugin_interface.rs +++ b/geyser-plugin-interface/src/geyser_plugin_interface.rs @@ -457,6 +457,14 @@ pub trait GeyserPlugin: Any + Send + Sync + std::fmt::Debug { true } + /// Check if the plugin is interested in account data from snapshot + /// Default is true -- if the plugin is not interested in + /// account data snapshot, please return false because startup would be + /// improved significantly. + fn account_data_snapshot_notifications_enabled(&self) -> bool { + true + } + /// Check if the plugin is interested in transaction data /// Default is false -- if the plugin is interested in /// transaction data, please return true. diff --git a/geyser-plugin-manager/src/accounts_update_notifier.rs b/geyser-plugin-manager/src/accounts_update_notifier.rs index 60df441a7e3cef..98e43987df5fea 100644 --- a/geyser-plugin-manager/src/accounts_update_notifier.rs +++ b/geyser-plugin-manager/src/accounts_update_notifier.rs @@ -22,9 +22,14 @@ use { #[derive(Debug)] pub(crate) struct AccountsUpdateNotifierImpl { plugin_manager: Arc>, + snapshot_notifications_enabled: bool, } impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl { + fn snapshot_notifications_enabled(&self) -> bool { + self.snapshot_notifications_enabled + } + fn notify_account_update( &self, slot: Slot, @@ -97,8 +102,14 @@ impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl { } impl AccountsUpdateNotifierImpl { - pub fn new(plugin_manager: Arc>) -> Self { - AccountsUpdateNotifierImpl { plugin_manager } + pub fn new( + plugin_manager: Arc>, + snapshot_notifications_enabled: bool, + ) -> Self { + AccountsUpdateNotifierImpl { + plugin_manager, + snapshot_notifications_enabled, + } } fn accountinfo_from_shared_account_data<'a>( diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index beaa799109b5d7..1e4fb7dbba0aef 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -85,6 +85,16 @@ impl GeyserPluginManager { false } + /// Check if there is any plugin interested in account data from snapshot + pub fn account_data_snapshot_notifications_enabled(&self) -> bool { + for plugin in &self.plugins { + if plugin.account_data_snapshot_notifications_enabled() { + return true; + } + } + false + } + /// Check if there is any plugin interested in transaction data pub fn transaction_notifications_enabled(&self) -> bool { for plugin in &self.plugins { diff --git a/geyser-plugin-manager/src/geyser_plugin_service.rs b/geyser-plugin-manager/src/geyser_plugin_service.rs index f624fc66c90c3e..ddb81ceb8098a1 100644 --- a/geyser-plugin-manager/src/geyser_plugin_service.rs +++ b/geyser-plugin-manager/src/geyser_plugin_service.rs @@ -90,6 +90,8 @@ impl GeyserPluginService { let account_data_notifications_enabled = plugin_manager.account_data_notifications_enabled() || geyser_plugin_always_enabled; + let account_data_snapshot_notifications_enabled = + plugin_manager.account_data_snapshot_notifications_enabled(); let transaction_notifications_enabled = plugin_manager.transaction_notifications_enabled() || geyser_plugin_always_enabled; let entry_notifications_enabled = @@ -98,8 +100,10 @@ impl GeyserPluginService { let accounts_update_notifier: Option = if account_data_notifications_enabled { - let accounts_update_notifier = - AccountsUpdateNotifierImpl::new(plugin_manager.clone()); + let accounts_update_notifier = AccountsUpdateNotifierImpl::new( + plugin_manager.clone(), + account_data_snapshot_notifications_enabled, + ); Some(Arc::new(accounts_update_notifier)) } else { None diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 126d0bbe7efc4d..fc2ee43522dd5c 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -25,8 +25,10 @@ rand = { workspace = true } rand_chacha = { workspace = true } rayon = { workspace = true } serde = { workspace = true } +serde-big-array = { workspace = true } serde_bytes = { workspace = true } serde_derive = { workspace = true } +siphasher = { workspace = true } solana-bloom = { workspace = true } solana-clap-utils = { workspace = true } solana-client = { workspace = true } @@ -94,6 +96,9 @@ name = "crds_gossip_pull" [[bench]] name = "crds_shards" +[[bench]] +name = "weighted_shuffle" + [[bin]] name = "solana-gossip" path = "src/main.rs" diff --git a/gossip/benches/weighted_shuffle.rs b/gossip/benches/weighted_shuffle.rs index 09615c57bbca15..7744c2f938b1eb 100644 --- a/gossip/benches/weighted_shuffle.rs +++ b/gossip/benches/weighted_shuffle.rs @@ -25,6 +25,24 @@ fn bench_weighted_shuffle_new(bencher: &mut Bencher) { #[bench] fn bench_weighted_shuffle_shuffle(bencher: &mut Bencher) { + let mut seed = [0u8; 32]; + let mut rng = rand::thread_rng(); + let weights = make_weights(&mut rng); + let weighted_shuffle = WeightedShuffle::new("", &weights); + bencher.iter(|| { + rng.fill(&mut seed[..]); + let mut rng = ChaChaRng::from_seed(seed); + weighted_shuffle + .clone() + .shuffle(&mut rng) + .for_each(|index| { + std::hint::black_box(index); + }); + }); +} + +#[bench] +fn bench_weighted_shuffle_collect(bencher: &mut Bencher) { let mut seed = [0u8; 32]; let mut rng = rand::thread_rng(); let weights = make_weights(&mut rng); diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index a1ec2f21915d90..4dad5e083aba42 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -35,11 +35,11 @@ use { epoch_slots::EpochSlots, gossip_error::GossipError, legacy_contact_info::LegacyContactInfo, - ping_pong::{PingCache, Pong}, + ping_pong::Pong, protocol::{ - split_gossip_messages, Ping, Protocol, PruneData, DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, - MAX_INCREMENTAL_SNAPSHOT_HASHES, MAX_PRUNE_DATA_NODES, - PULL_RESPONSE_MIN_SERIALIZED_SIZE, PUSH_MESSAGE_MAX_PAYLOAD_SIZE, + split_gossip_messages, Ping, PingCache, Protocol, PruneData, + DUPLICATE_SHRED_MAX_PAYLOAD_SIZE, MAX_INCREMENTAL_SNAPSHOT_HASHES, + MAX_PRUNE_DATA_NODES, PULL_RESPONSE_MIN_SERIALIZED_SIZE, PUSH_MESSAGE_MAX_PAYLOAD_SIZE, }, restart_crds_values::{ RestartHeaviestFork, RestartLastVotedForkSlots, RestartLastVotedForkSlotsError, @@ -87,7 +87,7 @@ use { collections::{HashMap, HashSet, VecDeque}, fmt::Debug, fs::{self, File}, - io::BufReader, + io::{BufReader, BufWriter, Write}, iter::repeat, net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket}, num::NonZeroUsize, @@ -217,6 +217,8 @@ impl ClusterInfo { outbound_budget: DataBudget::default(), my_contact_info: RwLock::new(contact_info), ping_cache: Mutex::new(PingCache::new( + &mut rand::thread_rng(), + Instant::now(), GOSSIP_PING_CACHE_TTL, GOSSIP_PING_CACHE_RATE_LIMIT_DELAY, GOSSIP_PING_CACHE_CAPACITY, @@ -306,6 +308,7 @@ impl ClusterInfo { } pub fn save_contact_info(&self) { + let _st = ScopedTimer::from(&self.stats.save_contact_info_time); let nodes = { let entrypoint_gossip_addrs = self .entrypoints @@ -347,8 +350,9 @@ impl ClusterInfo { let tmp_filename = &filename.with_extension("tmp"); match File::create(tmp_filename) { - Ok(mut file) => { - if let Err(err) = bincode::serialize_into(&mut file, &nodes) { + Ok(file) => { + let mut writer = BufWriter::new(file); + if let Err(err) = bincode::serialize_into(&mut writer, &nodes) { warn!( "Failed to serialize contact info info {}: {}", tmp_filename.display(), @@ -356,6 +360,9 @@ impl ClusterInfo { ); return; } + if let Err(err) = writer.flush() { + warn!("Failed to save contact info: {err}"); + } } Err(err) => { warn!("Failed to create {}: {}", tmp_filename.display(), err); @@ -1729,24 +1736,19 @@ impl ClusterInfo { // Returns a predicate checking if the pull request is from a valid // address, and if the address have responded to a ping request. Also // appends ping packets for the addresses which need to be (re)verified. - // - // allow lint false positive trait bound requirement (`CryptoRng` only - // implemented on `&'a mut T` - #[allow(clippy::needless_pass_by_ref_mut)] fn check_pull_request<'a, R>( &'a self, now: Instant, - mut rng: &'a mut R, + rng: &'a mut R, packet_batch: &'a mut PacketBatch, ) -> impl FnMut(&PullData) -> bool + 'a where R: Rng + CryptoRng, { let mut cache = HashMap::<(Pubkey, SocketAddr), bool>::new(); - let mut pingf = move || Ping::new_rand(&mut rng, &self.keypair()).ok(); let mut ping_cache = self.ping_cache.lock().unwrap(); let mut hard_check = move |node| { - let (check, ping) = ping_cache.check(now, node, &mut pingf); + let (check, ping) = ping_cache.check(rng, &self.keypair(), now, node); if let Some(ping) = ping { let ping = Protocol::PingMessage(ping); match Packet::from_data(Some(&node.1), ping) { @@ -1964,10 +1966,9 @@ impl ClusterInfo { let keypair = self.keypair(); let pongs_and_dests: Vec<_> = pings .into_iter() - .filter_map(|(addr, ping)| { - let pong = Pong::new(&ping, &keypair).ok()?; - let pong = Protocol::PongMessage(pong); - Some((addr, pong)) + .map(|(addr, ping)| { + let pong = Pong::new(&ping, &keypair); + (addr, Protocol::PongMessage(pong)) }) .collect(); if pongs_and_dests.is_empty() { @@ -3110,9 +3111,8 @@ fn verify_gossip_addr( }; let (out, ping) = { let node = (*pubkey, addr); - let mut pingf = move || Ping::new_rand(rng, keypair).ok(); let mut ping_cache = ping_cache.lock().unwrap(); - ping_cache.check(Instant::now(), node, &mut pingf) + ping_cache.check(rng, keypair, Instant::now(), node) }; if let Some(ping) = ping { pings.push((addr, Protocol::PingMessage(ping))); @@ -3209,12 +3209,11 @@ mod tests { .collect(); let pings: Vec<_> = { let mut ping_cache = cluster_info.ping_cache.lock().unwrap(); - let mut pingf = || Ping::new_rand(&mut rng, &this_node).ok(); remote_nodes .iter() .map(|(keypair, socket)| { let node = (keypair.pubkey(), *socket); - let (check, ping) = ping_cache.check(now, node, &mut pingf); + let (check, ping) = ping_cache.check(&mut rng, &this_node, now, node); // Assert that initially remote nodes will not pass the // ping/pong check. assert!(!check); @@ -3225,7 +3224,7 @@ mod tests { let pongs: Vec<(SocketAddr, Pong)> = pings .iter() .zip(&remote_nodes) - .map(|(ping, (keypair, socket))| (*socket, Pong::new(ping, keypair).unwrap())) + .map(|(ping, (keypair, socket))| (*socket, Pong::new(ping, keypair))) .collect(); let now = now + Duration::from_millis(1); cluster_info.handle_batch_pong_messages(pongs, now); @@ -3234,7 +3233,7 @@ mod tests { let mut ping_cache = cluster_info.ping_cache.lock().unwrap(); for (keypair, socket) in &remote_nodes { let node = (keypair.pubkey(), *socket); - let (check, _) = ping_cache.check(now, node, || -> Option { None }); + let (check, _) = ping_cache.check(&mut rng, &this_node, now, node); assert!(check); } } @@ -3243,7 +3242,7 @@ mod tests { let mut ping_cache = cluster_info.ping_cache.lock().unwrap(); let (keypair, socket) = new_rand_remote_node(&mut rng); let node = (keypair.pubkey(), socket); - let (check, _) = ping_cache.check(now, node, || -> Option { None }); + let (check, _) = ping_cache.check(&mut rng, &this_node, now, node); assert!(!check); } } @@ -3263,11 +3262,11 @@ mod tests { .collect(); let pings: Vec<_> = remote_nodes .iter() - .map(|(keypair, _)| Ping::new_rand(&mut rng, keypair).unwrap()) + .map(|(keypair, _)| Ping::new(rng.gen(), keypair)) .collect(); let pongs: Vec<_> = pings .iter() - .map(|ping| Pong::new(ping, &this_node).unwrap()) + .map(|ping| Pong::new(ping, &this_node)) .collect(); let recycler = PacketBatchRecycler::default(); let packets = cluster_info diff --git a/gossip/src/cluster_info_metrics.rs b/gossip/src/cluster_info_metrics.rs index 9be73f19759979..b6faced039e1ea 100644 --- a/gossip/src/cluster_info_metrics.rs +++ b/gossip/src/cluster_info_metrics.rs @@ -171,6 +171,7 @@ pub struct GossipStats { pub(crate) push_vote_read: Counter, pub(crate) repair_peers: Counter, pub(crate) require_stake_for_gossip_unknown_stakes: Counter, + pub(crate) save_contact_info_time: Counter, pub(crate) skip_pull_response_shred_version: Counter, pub(crate) skip_pull_shred_version: Counter, pub(crate) skip_push_message_shred_version: Counter, @@ -318,6 +319,11 @@ pub(crate) fn submit_gossip_stats( stats.push_response_count.clear(), i64 ), + ( + "save_contact_info_time", + stats.save_contact_info_time.clear(), + i64 + ), ); datapoint_info!( "cluster_info_stats3", diff --git a/gossip/src/crds_gossip.rs b/gossip/src/crds_gossip.rs index 40ecb65771184d..36795f6720d04d 100644 --- a/gossip/src/crds_gossip.rs +++ b/gossip/src/crds_gossip.rs @@ -15,8 +15,7 @@ use { crds_gossip_push::CrdsGossipPush, crds_value::CrdsValue, duplicate_shred::{self, DuplicateShredIndex, MAX_DUPLICATE_SHREDS}, - ping_pong::PingCache, - protocol::Ping, + protocol::{Ping, PingCache}, }, itertools::Itertools, rand::{CryptoRng, Rng}, @@ -386,7 +385,6 @@ pub(crate) fn maybe_ping_gossip_addresses( pings: &mut Vec<(SocketAddr, Ping)>, ) -> Vec { let mut ping_cache = ping_cache.lock().unwrap(); - let mut pingf = move || Ping::new_rand(rng, keypair).ok(); let now = Instant::now(); nodes .into_iter() @@ -396,7 +394,7 @@ pub(crate) fn maybe_ping_gossip_addresses( }; let (check, ping) = { let node = (*node.pubkey(), node_gossip); - ping_cache.check(now, node, &mut pingf) + ping_cache.check(rng, keypair, now, node) }; if let Some(ping) = ping { pings.push((node_gossip, ping)); @@ -431,6 +429,8 @@ mod test { ) .unwrap(); let ping_cache = PingCache::new( + &mut rand::thread_rng(), + Instant::now(), Duration::from_secs(20 * 60), // ttl Duration::from_secs(20 * 60) / 64, // rate_limit_delay 128, // capacity diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index f650d91497fbb5..1f6ad65b432c8f 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -19,8 +19,7 @@ use { crds_gossip, crds_gossip_error::CrdsGossipError, crds_value::CrdsValue, - ping_pong::PingCache, - protocol::Ping, + protocol::{Ping, PingCache}, }, itertools::Itertools, rand::{ @@ -679,6 +678,16 @@ pub(crate) mod tests { #[cfg(not(debug_assertions))] pub(crate) const MIN_NUM_BLOOM_FILTERS: usize = 64; + fn new_ping_cache() -> PingCache { + PingCache::new( + &mut rand::thread_rng(), + Instant::now(), + Duration::from_secs(20 * 60), // ttl + Duration::from_secs(20 * 60) / 64, // rate_limit_delay + 128, // capacity + ) + } + #[test] fn test_hash_as_u64() { let arr: [u8; HASH_BYTES] = std::array::from_fn(|i| i as u8 + 1); @@ -851,11 +860,7 @@ pub(crate) mod tests { ))); let node = CrdsGossipPull::default(); let mut pings = Vec::new(); - let ping_cache = Mutex::new(PingCache::new( - Duration::from_secs(20 * 60), // ttl - Duration::from_secs(20 * 60) / 64, // rate_limit_delay - 128, // capacity - )); + let ping_cache = Mutex::new(new_ping_cache()); assert_eq!( node.new_pull_request( &thread_pool, @@ -949,11 +954,7 @@ pub(crate) mod tests { fn test_new_mark_creation_time() { let now: u64 = 1_605_127_770_789; let thread_pool = ThreadPoolBuilder::new().build().unwrap(); - let mut ping_cache = PingCache::new( - Duration::from_secs(20 * 60), // ttl - Duration::from_secs(20 * 60) / 64, // rate_limit_delay - 128, // capacity - ); + let mut ping_cache = new_ping_cache(); let mut crds = Crds::default(); let node_keypair = Keypair::new(); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( @@ -1011,11 +1012,7 @@ pub(crate) mod tests { let thread_pool = ThreadPoolBuilder::new().build().unwrap(); let node_keypair = Keypair::new(); let mut node_crds = Crds::default(); - let mut ping_cache = PingCache::new( - Duration::from_secs(20 * 60), // ttl - Duration::from_secs(20 * 60) / 64, // rate_limit_delay - 128, // capacity - ); + let mut ping_cache = new_ping_cache(); let now = timestamp(); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( &node_keypair.pubkey(), @@ -1125,11 +1122,7 @@ pub(crate) mod tests { node_crds .insert(entry, 0, GossipRoute::LocalMessage) .unwrap(); - let mut ping_cache = PingCache::new( - Duration::from_secs(20 * 60), // ttl - Duration::from_secs(20 * 60) / 64, // rate_limit_delay - 128, // capacity - ); + let mut ping_cache = new_ping_cache(); let new = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 1); ping_cache.mock_pong(*new.pubkey(), new.gossip().unwrap(), Instant::now()); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(new)); diff --git a/gossip/src/crds_gossip_push.rs b/gossip/src/crds_gossip_push.rs index 4dd0f4e06cc4c1..43c500cbdcfd4d 100644 --- a/gossip/src/crds_gossip_push.rs +++ b/gossip/src/crds_gossip_push.rs @@ -17,8 +17,7 @@ use { crds::{Crds, CrdsError, Cursor, GossipRoute}, crds_gossip, crds_value::CrdsValue, - ping_pong::PingCache, - protocol::Ping, + protocol::{Ping, PingCache}, push_active_set::PushActiveSet, received_cache::ReceivedCache, }, @@ -287,6 +286,8 @@ mod tests { fn new_ping_cache() -> PingCache { PingCache::new( + &mut rand::thread_rng(), + Instant::now(), Duration::from_secs(20 * 60), // ttl Duration::from_secs(20 * 60) / 64, // rate_limit_delay 128, // capacity diff --git a/gossip/src/deprecated.rs b/gossip/src/deprecated.rs index 73a2f37d2a0c0a..0dd34247cf9417 100644 --- a/gossip/src/deprecated.rs +++ b/gossip/src/deprecated.rs @@ -19,5 +19,6 @@ impl Default for CompressionType { pub(crate) struct EpochIncompleteSlots { first: Slot, compression: CompressionType, + #[serde(with = "serde_bytes")] compressed_list: Vec, } diff --git a/gossip/src/epoch_slots.rs b/gossip/src/epoch_slots.rs index 9ed1f8fa306433..7c76b7cefb3b26 100644 --- a/gossip/src/epoch_slots.rs +++ b/gossip/src/epoch_slots.rs @@ -45,6 +45,7 @@ impl Sanitize for Uncompressed { pub struct Flate2 { pub first_slot: Slot, pub num: usize, + #[serde(with = "serde_bytes")] pub compressed: Vec, } diff --git a/gossip/src/ping_pong.rs b/gossip/src/ping_pong.rs index 56b75862927e80..7fef7c93918d81 100644 --- a/gossip/src/ping_pong.rs +++ b/gossip/src/ping_pong.rs @@ -1,28 +1,34 @@ use { - bincode::{serialize, Error}, lru::LruCache, - rand::{CryptoRng, Fill, Rng}, - serde::Serialize, + rand::{CryptoRng, Rng}, + serde_big_array::BigArray, + siphasher::sip::SipHasher24, solana_sanitize::{Sanitize, SanitizeError}, solana_sdk::{ - hash::{self, Hash}, + hash::Hash, pubkey::Pubkey, signature::{Keypair, Signable, Signature, Signer}, }, std::{ borrow::Cow, + hash::{Hash as _, Hasher}, net::SocketAddr, time::{Duration, Instant}, }, }; +const KEY_REFRESH_CADENCE: Duration = Duration::from_secs(60); const PING_PONG_HASH_PREFIX: &[u8] = "SOLANA_PING_PONG".as_bytes(); +// For backward compatibility we are using a const generic parameter here. +// N should always be >= 8 and only the first 8 bytes are used. So the new code +// should only use N == 8. #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Debug, Deserialize, Serialize)] -pub struct Ping { +pub struct Ping { from: Pubkey, - token: T, + #[serde(with = "BigArray")] + token: [u8; N], signature: Signature, } @@ -37,48 +43,37 @@ pub struct Pong { /// Maintains records of remote nodes which have returned a valid response to a /// ping message, and on-the-fly ping messages pending a pong response from the /// remote node. -pub struct PingCache { +/// Const generic parameter N corresponds to token size in Ping type. +pub struct PingCache { // Time-to-live of received pong messages. ttl: Duration, // Rate limit delay to generate pings for a given address rate_limit_delay: Duration, + // Hashers initialized with random keys, rotated at KEY_REFRESH_CADENCE. + // Because at the moment that the keys are rotated some pings might already + // be in the flight, we need to keep the two most recent hashers. + hashers: [SipHasher24; 2], + // When hashers were last refreshed. + key_refresh: Instant, // Timestamp of last ping message sent to a remote node. // Used to rate limit pings to remote nodes. pings: LruCache<(Pubkey, SocketAddr), Instant>, // Verified pong responses from remote nodes. pongs: LruCache<(Pubkey, SocketAddr), Instant>, - // Hash of ping tokens sent out to remote nodes, - // pending a pong response back. - pending_cache: LruCache, } -impl Ping { - pub fn new(token: T, keypair: &Keypair) -> Result { - let signature = keypair.sign_message(&serialize(&token)?); - let ping = Ping { +impl Ping { + pub fn new(token: [u8; N], keypair: &Keypair) -> Self { + let signature = keypair.sign_message(&token); + Ping { from: keypair.pubkey(), token, signature, - }; - Ok(ping) - } -} - -impl Ping -where - T: Serialize + Fill + Default, -{ - pub fn new_rand(rng: &mut R, keypair: &Keypair) -> Result - where - R: Rng + CryptoRng, - { - let mut token = T::default(); - rng.fill(&mut token); - Ping::new(token, keypair) + } } } -impl Sanitize for Ping { +impl Sanitize for Ping { fn sanitize(&self) -> Result<(), SanitizeError> { self.from.sanitize()?; // TODO Add self.token.sanitize()?; when rust's @@ -87,15 +82,18 @@ impl Sanitize for Ping { } } -impl Signable for Ping { +impl Signable for Ping { + #[inline] fn pubkey(&self) -> Pubkey { self.from } + #[inline] fn signable_data(&self) -> Cow<[u8]> { - Cow::Owned(serialize(&self.token).unwrap()) + Cow::Borrowed(&self.token) } + #[inline] fn get_signature(&self) -> Signature { self.signature } @@ -106,15 +104,13 @@ impl Signable for Ping { } impl Pong { - pub fn new(ping: &Ping, keypair: &Keypair) -> Result { - let token = serialize(&ping.token)?; - let hash = hash::hashv(&[PING_PONG_HASH_PREFIX, &token]); - let pong = Pong { + pub fn new(ping: &Ping, keypair: &Keypair) -> Self { + let hash = hash_ping_token(&ping.token); + Pong { from: keypair.pubkey(), hash, signature: keypair.sign_message(hash.as_ref()), - }; - Ok(pong) + } } pub fn from(&self) -> &Pubkey { @@ -148,16 +144,23 @@ impl Signable for Pong { } } -impl PingCache { - pub fn new(ttl: Duration, rate_limit_delay: Duration, cap: usize) -> Self { +impl PingCache { + pub fn new( + rng: &mut R, + now: Instant, + ttl: Duration, + rate_limit_delay: Duration, + cap: usize, + ) -> Self { // Sanity check ttl/rate_limit_delay assert!(rate_limit_delay <= ttl / 2); Self { ttl, rate_limit_delay, + hashers: std::array::from_fn(|_| SipHasher24::new_with_key(&rng.gen())), + key_refresh: now, pings: LruCache::new(cap), pongs: LruCache::new(cap), - pending_cache: LruCache::new(cap), } } @@ -166,43 +169,37 @@ impl PingCache { /// returns true. /// Note: Does not verify the signature. pub fn add(&mut self, pong: &Pong, socket: SocketAddr, now: Instant) -> bool { - let node = (pong.pubkey(), socket); - match self.pending_cache.peek(&pong.hash) { - Some(value) if *value == node => { - self.pings.pop(&node); - self.pongs.put(node, now); - self.pending_cache.pop(&pong.hash); - true - } - _ => false, - } + let remote_node = (pong.pubkey(), socket); + if !self.hashers.iter().copied().any(|hasher| { + let token = make_ping_token::(hasher, &remote_node); + hash_ping_token(&token) == pong.hash + }) { + return false; + }; + self.pongs.put(remote_node, now); + true } /// Checks if the remote node has been pinged recently. If not, calls the /// given function to generates a new ping message, records current /// timestamp and hash of ping token, and returns the ping message. - fn maybe_ping( + fn maybe_ping( &mut self, + rng: &mut R, + keypair: &Keypair, now: Instant, - node: (Pubkey, SocketAddr), - mut pingf: F, - ) -> Option> - where - T: Serialize, - F: FnMut() -> Option>, - { - match self.pings.peek(&node) { - // Rate limit consecutive pings sent to a remote node. - Some(t) if now.saturating_duration_since(*t) < self.rate_limit_delay => None, - _ => { - let ping = pingf()?; - let token = serialize(&ping.token).ok()?; - let hash = hash::hashv(&[PING_PONG_HASH_PREFIX, &token]); - self.pending_cache.put(hash, node); - self.pings.put(node, now); - Some(ping) - } + remote_node: (Pubkey, SocketAddr), + ) -> Option> { + // Rate limit consecutive pings sent to a remote node. + if matches!(self.pings.peek(&remote_node), + Some(&t) if now.saturating_duration_since(t) < self.rate_limit_delay) + { + return None; } + self.pings.put(remote_node, now); + self.maybe_refresh_key(rng, now); + let token = make_ping_token::(self.hashers[0], &remote_node); + Some(Ping::new(token, keypair)) } /// Returns true if the remote node has responded to a ping message. @@ -213,43 +210,63 @@ impl PingCache { /// the ping message. /// Caller should verify if the socket address is valid. (e.g. by using /// ContactInfo::is_valid_address). - pub fn check( + pub fn check( &mut self, + rng: &mut R, + keypair: &Keypair, now: Instant, - node: (Pubkey, SocketAddr), - pingf: F, - ) -> (bool, Option>) - where - T: Serialize, - F: FnMut() -> Option>, - { - let (check, should_ping) = match self.pongs.get(&node) { + remote_node: (Pubkey, SocketAddr), + ) -> (bool, Option>) { + let (check, should_ping) = match self.pongs.get(&remote_node) { None => (false, true), Some(t) => { let age = now.saturating_duration_since(*t); // Pop if the pong message has expired. if age > self.ttl { - self.pongs.pop(&node); + self.pongs.pop(&remote_node); } // If the pong message is not too recent, generate a new ping // message to extend remote node verification. (true, age > self.ttl / 8) } }; - let ping = if should_ping { - self.maybe_ping(now, node, pingf) - } else { - None - }; + let ping = should_ping + .then(|| self.maybe_ping(rng, keypair, now, remote_node)) + .flatten(); (check, ping) } + fn maybe_refresh_key(&mut self, rng: &mut R, now: Instant) { + if now.checked_duration_since(self.key_refresh) > Some(KEY_REFRESH_CADENCE) { + let hasher = SipHasher24::new_with_key(&rng.gen()); + self.hashers[1] = std::mem::replace(&mut self.hashers[0], hasher); + self.key_refresh = now; + } + } + /// Only for tests and simulations. pub fn mock_pong(&mut self, node: Pubkey, socket: SocketAddr, now: Instant) { self.pongs.put((node, socket), now); } } +fn make_ping_token( + mut hasher: SipHasher24, + remote_node: &(Pubkey, SocketAddr), +) -> [u8; N] { + // TODO: Consider including local node's (pubkey, socket-addr). + remote_node.hash(&mut hasher); + let hash = hasher.finish().to_le_bytes(); + debug_assert!(N >= std::mem::size_of::()); + let mut token = [0u8; N]; + token[..std::mem::size_of::()].copy_from_slice(&hash); + token +} + +fn hash_ping_token(token: &[u8; N]) -> Hash { + solana_sdk::hash::hashv(&[PING_PONG_HASH_PREFIX, token]) +} + #[cfg(test)] mod tests { use { @@ -261,21 +278,19 @@ mod tests { }, }; - type Token = [u8; 32]; - #[test] fn test_ping_pong() { let mut rng = rand::thread_rng(); let keypair = Keypair::new(); - let ping = Ping::::new_rand(&mut rng, &keypair).unwrap(); + let ping = Ping::<32>::new(rng.gen(), &keypair); assert!(ping.verify()); assert!(ping.sanitize().is_ok()); - let pong = Pong::new(&ping, &keypair).unwrap(); + let pong = Pong::new(&ping, &keypair); assert!(pong.verify()); assert!(pong.sanitize().is_ok()); assert_eq!( - hash::hashv(&[PING_PONG_HASH_PREFIX, &ping.token]), + solana_sdk::hash::hashv(&[PING_PONG_HASH_PREFIX, &ping.token]), pong.hash ); } @@ -286,7 +301,7 @@ mod tests { let mut rng = rand::thread_rng(); let ttl = Duration::from_millis(256); let delay = ttl / 64; - let mut cache = PingCache::new(ttl, delay, /*cap=*/ 1000); + let mut cache = PingCache::new(&mut rng, Instant::now(), ttl, delay, /*cap=*/ 1000); let this_node = Keypair::new(); let keypairs: Vec<_> = repeat_with(Keypair::new).take(8).collect(); let sockets: Vec<_> = repeat_with(|| { @@ -308,12 +323,11 @@ mod tests { // Initially all checks should fail. The first observation of each node // should create a ping packet. let mut seen_nodes = HashSet::<(Pubkey, SocketAddr)>::new(); - let pings: Vec>> = remote_nodes + let pings: Vec>> = remote_nodes .iter() .map(|(keypair, socket)| { let node = (keypair.pubkey(), *socket); - let pingf = || Ping::::new_rand(&mut rng, &this_node).ok(); - let (check, ping) = cache.check(now, node, pingf); + let (check, ping) = cache.check(&mut rng, &this_node, now, node); assert!(!check); assert_eq!(seen_nodes.insert(node), ping.is_some()); ping @@ -321,19 +335,18 @@ mod tests { .collect(); let now = now + Duration::from_millis(1); - let panic_ping = || -> Option> { panic!("this should not happen!") }; for ((keypair, socket), ping) in remote_nodes.iter().zip(&pings) { match ping { None => { // Already have a recent ping packets for nodes, so no new // ping packet will be generated. let node = (keypair.pubkey(), *socket); - let (check, ping) = cache.check(now, node, panic_ping); + let (check, ping) = cache.check(&mut rng, &this_node, now, node); assert!(check); assert!(ping.is_none()); } Some(ping) => { - let pong = Pong::new(ping, keypair).unwrap(); + let pong = Pong::new(ping, keypair); assert!(cache.add(&pong, *socket, now)); } } @@ -343,7 +356,7 @@ mod tests { // All nodes now have a recent pong packet. for (keypair, socket) in &remote_nodes { let node = (keypair.pubkey(), *socket); - let (check, ping) = cache.check(now, node, panic_ping); + let (check, ping) = cache.check(&mut rng, &this_node, now, node); assert!(check); assert!(ping.is_none()); } @@ -354,8 +367,7 @@ mod tests { seen_nodes.clear(); for (keypair, socket) in &remote_nodes { let node = (keypair.pubkey(), *socket); - let pingf = || Ping::::new_rand(&mut rng, &this_node).ok(); - let (check, ping) = cache.check(now, node, pingf); + let (check, ping) = cache.check(&mut rng, &this_node, now, node); assert!(check); assert_eq!(seen_nodes.insert(node), ping.is_some()); } @@ -365,7 +377,7 @@ mod tests { // packet pending response. So no new ping packet will be created. for (keypair, socket) in &remote_nodes { let node = (keypair.pubkey(), *socket); - let (check, ping) = cache.check(now, node, panic_ping); + let (check, ping) = cache.check(&mut rng, &this_node, now, node); assert!(check); assert!(ping.is_none()); } @@ -377,8 +389,7 @@ mod tests { seen_nodes.clear(); for (keypair, socket) in &remote_nodes { let node = (keypair.pubkey(), *socket); - let pingf = || Ping::::new_rand(&mut rng, &this_node).ok(); - let (check, ping) = cache.check(now, node, pingf); + let (check, ping) = cache.check(&mut rng, &this_node, now, node); if seen_nodes.insert(node) { assert!(check); assert!(ping.is_some()); @@ -393,7 +404,7 @@ mod tests { // created, so no new one will be created. for (keypair, socket) in &remote_nodes { let node = (keypair.pubkey(), *socket); - let (check, ping) = cache.check(now, node, panic_ping); + let (check, ping) = cache.check(&mut rng, &this_node, now, node); assert!(!check); assert!(ping.is_none()); } @@ -404,8 +415,7 @@ mod tests { seen_nodes.clear(); for (keypair, socket) in &remote_nodes { let node = (keypair.pubkey(), *socket); - let pingf = || Ping::::new_rand(&mut rng, &this_node).ok(); - let (check, ping) = cache.check(now, node, pingf); + let (check, ping) = cache.check(&mut rng, &this_node, now, node); assert!(!check); assert_eq!(seen_nodes.insert(node), ping.is_some()); } diff --git a/gossip/src/protocol.rs b/gossip/src/protocol.rs index b4acd831627cad..61ee47750e04eb 100644 --- a/gossip/src/protocol.rs +++ b/gossip/src/protocol.rs @@ -43,11 +43,6 @@ const GOSSIP_PING_TOKEN_SIZE: usize = 32; pub(crate) const PULL_RESPONSE_MIN_SERIALIZED_SIZE: usize = 161; // TODO These messages should go through the gpu pipeline for spam filtering -#[cfg_attr( - feature = "frozen-abi", - derive(AbiExample, AbiEnumVisitor), - frozen_abi(digest = "CBR9G92mpd1WSXEmiH6dAKHziLjJky9aYWPw6S5WmJkG") -)] #[derive(Serialize, Deserialize, Debug)] #[allow(clippy::large_enum_variant)] pub(crate) enum Protocol { @@ -63,7 +58,8 @@ pub(crate) enum Protocol { // Update count_packets_received if new variants are added here. } -pub(crate) type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>; +pub(crate) type Ping = ping_pong::Ping; +pub(crate) type PingCache = ping_pong::PingCache; #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)] diff --git a/gossip/src/weighted_shuffle.rs b/gossip/src/weighted_shuffle.rs index 656615449b2a79..4064dc63481bf3 100644 --- a/gossip/src/weighted_shuffle.rs +++ b/gossip/src/weighted_shuffle.rs @@ -1,7 +1,7 @@ //! The `weighted_shuffle` module provides an iterator over shuffled weights. use { - num_traits::CheckedAdd, + num_traits::{CheckedAdd, ConstZero}, rand::{ distributions::uniform::{SampleUniform, UniformSampler}, Rng, @@ -12,7 +12,10 @@ use { // Each internal tree node has FANOUT many child nodes with indices: // (index << BIT_SHIFT) + 1 ..= (index << BIT_SHIFT) + FANOUT // Conversely, for each node, the parent node is obtained by: -// (index - 1) >> BIT_SHIFT +// parent: (index - 1) >> BIT_SHIFT +// and the subtree weight is stored at +// offset: (index - 1) & BIT_MASK +// of its parent node. const BIT_SHIFT: usize = 4; const FANOUT: usize = 1 << BIT_SHIFT; const BIT_MASK: usize = FANOUT - 1; @@ -26,37 +29,46 @@ const BIT_MASK: usize = FANOUT - 1; /// non-zero weighted indices. #[derive(Clone)] pub struct WeightedShuffle { + // Number of "internal" nodes of the tree. + num_nodes: usize, // Underlying array implementing the tree. + // Nodes without children are never accessed and don't need to be + // allocated, so tree.len() < num_nodes. // tree[i][j] is the sum of all weights in the j'th sub-tree of node i. - tree: Vec<[T; FANOUT - 1]>, + tree: Vec<[T; FANOUT]>, // Current sum of all weights, excluding already sampled ones. weight: T, // Indices of zero weighted entries. zeros: Vec, } +impl WeightedShuffle { + const ZERO: T = ::ZERO; +} + impl WeightedShuffle where - T: Copy + Default + PartialOrd + AddAssign + CheckedAdd, + T: Copy + ConstZero + PartialOrd + AddAssign + CheckedAdd, { /// If weights are negative or overflow the total sum /// they are treated as zero. pub fn new(name: &'static str, weights: &[T]) -> Self { - let zero = ::default(); - let mut tree = vec![[zero; FANOUT - 1]; get_tree_size(weights.len())]; - let mut sum = zero; + let (num_nodes, size) = get_num_nodes_and_tree_size(weights.len()); + debug_assert!(size <= num_nodes); + let mut tree = vec![[Self::ZERO; FANOUT]; size]; + let mut sum = Self::ZERO; let mut zeros = Vec::default(); - let mut num_negative = 0; - let mut num_overflow = 0; + let mut num_negative: usize = 0; + let mut num_overflow: usize = 0; for (k, &weight) in weights.iter().enumerate() { #[allow(clippy::neg_cmp_op_on_partial_ord)] // weight < zero does not work for NaNs. - if !(weight >= zero) { + if !(weight >= Self::ZERO) { zeros.push(k); num_negative += 1; continue; } - if weight == zero { + if weight == Self::ZERO { zeros.push(k); continue; } @@ -70,13 +82,11 @@ where }; // Traverse the tree from the leaf node upwards to the root, // updating the sub-tree sums along the way. - let mut index = tree.len() + k; // leaf node + let mut index = num_nodes + k; // leaf node while index != 0 { - let offset = index & BIT_MASK; + let offset = (index - 1) & BIT_MASK; index = (index - 1) >> BIT_SHIFT; // parent node - if offset > 0 { - tree[index][offset - 1] += weight; - } + tree[index][offset] += weight; } } if num_negative > 0 { @@ -86,6 +96,7 @@ where datapoint_error!("weighted-shuffle-overflow", (name, num_overflow, i64)); } Self { + num_nodes, tree, weight: sum, zeros, @@ -95,7 +106,7 @@ where impl WeightedShuffle where - T: Copy + Default + PartialOrd + AddAssign + SubAssign + Sub, + T: Copy + ConstZero + PartialOrd + AddAssign + SubAssign + Sub, { // Removes given weight at index k. fn remove(&mut self, k: usize, weight: T) { @@ -103,73 +114,49 @@ where self.weight -= weight; // Traverse the tree from the leaf node upwards to the root, // updating the sub-tree sums along the way. - let mut index = self.tree.len() + k; // leaf node + let mut index = self.num_nodes + k; // leaf node while index != 0 { - let offset = index & BIT_MASK; + let offset = (index - 1) & BIT_MASK; index = (index - 1) >> BIT_SHIFT; // parent node - if offset > 0 { - debug_assert!(self.tree[index][offset - 1] >= weight); - self.tree[index][offset - 1] -= weight; - } + debug_assert!(self.tree[index][offset] >= weight); + self.tree[index][offset] -= weight; } } // Returns smallest index such that sum of weights[..=k] > val, // along with its respective weight. fn search(&self, mut val: T) -> (/*index:*/ usize, /*weight:*/ T) { - let zero = ::default(); - debug_assert!(val >= zero); + debug_assert!(val >= Self::ZERO); debug_assert!(val < self.weight); // Traverse the tree downwards from the root while maintaining the // weight of the subtree which contains the target leaf node. let mut index = 0; // root let mut weight = self.weight; - 'outer: while index < self.tree.len() { - for (j, &node) in self.tree[index].iter().enumerate() { + while let Some(tree) = self.tree.get(index) { + for (j, &node) in tree.iter().enumerate() { if val < node { - // Traverse to the j+1 subtree of self.tree[index]. + // Traverse to the j'th subtree of self.tree[index]. weight = node; index = (index << BIT_SHIFT) + j + 1; - continue 'outer; + break; } else { debug_assert!(weight >= node); weight -= node; val -= node; } } - // Traverse to the right-most subtree of self.tree[index]. - index = (index << BIT_SHIFT) + FANOUT; } - (index - self.tree.len(), weight) + (index - self.num_nodes, weight) } pub fn remove_index(&mut self, k: usize) { - // Traverse the tree from the leaf node upwards to the root, while - // maintaining the sum of weights of subtrees *not* containing the leaf - // node. - let mut index = self.tree.len() + k; // leaf node - let mut weight = ::default(); // zero - while index != 0 { - let offset = index & BIT_MASK; - index = (index - 1) >> BIT_SHIFT; // parent node - if offset > 0 { - if self.tree[index][offset - 1] != weight { - self.remove(k, self.tree[index][offset - 1] - weight); - } else { - self.remove_zero(k); - } - return; - } - // The leaf node is in the right-most subtree of self.tree[index]. - for &node in &self.tree[index] { - weight += node; - } - } - // The leaf node is the right-most node of the whole tree. - if self.weight != weight { - self.remove(k, self.weight - weight); - } else { + let index = self.num_nodes + k; // leaf node + let offset = (index - 1) & BIT_MASK; + let index = (index - 1) >> BIT_SHIFT; // parent node + if self.tree[index][offset] == Self::ZERO { self.remove_zero(k); + } else { + self.remove(k, self.tree[index][offset]); } } @@ -182,13 +169,12 @@ where impl WeightedShuffle where - T: Copy + Default + PartialOrd + AddAssign + SampleUniform + SubAssign + Sub, + T: Copy + ConstZero + PartialOrd + AddAssign + SampleUniform + SubAssign + Sub, { // Equivalent to weighted_shuffle.shuffle(&mut rng).next() pub fn first(&self, rng: &mut R) -> Option { - let zero = ::default(); - if self.weight > zero { - let sample = ::Sampler::sample_single(zero, self.weight, rng); + if self.weight > Self::ZERO { + let sample = ::Sampler::sample_single(Self::ZERO, self.weight, rng); let (index, _weight) = WeightedShuffle::search(self, sample); return Some(index); } @@ -202,13 +188,13 @@ where impl<'a, T: 'a> WeightedShuffle where - T: Copy + Default + PartialOrd + AddAssign + SampleUniform + SubAssign + Sub, + T: Copy + ConstZero + PartialOrd + AddAssign + SampleUniform + SubAssign + Sub, { pub fn shuffle(mut self, rng: &'a mut R) -> impl Iterator + 'a { std::iter::from_fn(move || { - let zero = ::default(); - if self.weight > zero { - let sample = ::Sampler::sample_single(zero, self.weight, rng); + if self.weight > Self::ZERO { + let sample = + ::Sampler::sample_single(Self::ZERO, self.weight, rng); let (index, weight) = WeightedShuffle::search(&self, sample); self.remove(index, weight); return Some(index); @@ -223,16 +209,18 @@ where } } -// Maps number of items to the "internal" size of the tree +// Maps number of items to the number of "internal" nodes of the tree // which "implicitly" holds those items on the leaves. -fn get_tree_size(count: usize) -> usize { - let mut size = if count == 1 { 1 } else { 0 }; - let mut nodes = 1; - while nodes < count { +// Nodes without children are never accessed and don't need to be +// allocated, so the tree size is the second smaller number. +fn get_num_nodes_and_tree_size(count: usize) -> (/*num_nodes:*/ usize, /*tree_size:*/ usize) { + let mut size: usize = 0; + let mut nodes: usize = 1; + while nodes * FANOUT < count { size += nodes; nodes *= FANOUT; } - size + (size + nodes, size + (count + FANOUT - 1) / FANOUT) } #[cfg(test)] @@ -278,19 +266,25 @@ mod tests { } #[test] - fn test_get_tree_size() { - assert_eq!(get_tree_size(0), 0); + fn test_get_num_nodes_and_tree_size() { + assert_eq!(get_num_nodes_and_tree_size(0), (1, 0)); for count in 1..=16 { - assert_eq!(get_tree_size(count), 1); + assert_eq!(get_num_nodes_and_tree_size(count), (1, 1)); } + let num_nodes = 1 + 16; for count in 17..=256 { - assert_eq!(get_tree_size(count), 1 + 16); + let tree_size = 1 + (count + 15) / 16; + assert_eq!(get_num_nodes_and_tree_size(count), (num_nodes, tree_size)); } + let num_nodes = 1 + 16 + 16 * 16; for count in 257..=4096 { - assert_eq!(get_tree_size(count), 1 + 16 + 16 * 16); + let tree_size = 1 + 16 + (count + 15) / 16; + assert_eq!(get_num_nodes_and_tree_size(count), (num_nodes, tree_size)); } + let num_nodes = 1 + 16 + 16 * 16 + 16 * 16 * 16; for count in 4097..=65536 { - assert_eq!(get_tree_size(count), 1 + 16 + 16 * 16 + 16 * 16 * 16); + let tree_size = 1 + 16 + 16 * 16 + (count + 15) / 16; + assert_eq!(get_num_nodes_and_tree_size(count), (num_nodes, tree_size)); } } diff --git a/gossip/tests/crds_gossip.rs b/gossip/tests/crds_gossip.rs index eb2c7517f1eac4..4bf34b98bdbae0 100644 --- a/gossip/tests/crds_gossip.rs +++ b/gossip/tests/crds_gossip.rs @@ -14,7 +14,6 @@ use { crds_gossip_pull::{CrdsTimeouts, ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS}, crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS, crds_value::{CrdsValue, CrdsValueLabel}, - ping_pong::PingCache, }, solana_rayon_threadlimit::get_thread_count, solana_sdk::{ @@ -33,6 +32,8 @@ use { }, }; +type PingCache = solana_gossip::ping_pong::PingCache<32>; + #[derive(Clone)] struct Node { keypair: Arc, @@ -650,6 +651,8 @@ fn build_gossip_thread_pool() -> ThreadPool { fn new_ping_cache() -> Mutex { let ping_cache = PingCache::new( + &mut rand::thread_rng(), + Instant::now(), Duration::from_secs(20 * 60), // ttl Duration::from_secs(20 * 60) / 64, // rate_limit_delay 2048, // capacity diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index d8e63900bd4474..cc2f5918a497e7 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -24,6 +24,7 @@ num_cpus = { workspace = true } rayon = { workspace = true } regex = { workspace = true } serde = { workspace = true } +serde_bytes = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } solana-account-decoder = { workspace = true } @@ -46,6 +47,7 @@ solana-program-runtime = { workspace = true } solana-rpc = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-runtime-transaction = { workspace = true } +solana-sbpf = { workspace = true, features = ["debugger"] } solana-sdk = { workspace = true, features = ["openssl-vendored"] } solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } @@ -55,7 +57,6 @@ solana-type-overrides = { workspace = true } solana-unified-scheduler-pool = { workspace = true } solana-version = { workspace = true } solana-vote-program = { workspace = true } -solana_rbpf = { workspace = true, features = ["debugger"] } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/ledger-tool/src/output.rs b/ledger-tool/src/output.rs index 2f9254df7b2f2d..8765c425f6f295 100644 --- a/ledger-tool/src/output.rs +++ b/ledger-tool/src/output.rs @@ -361,6 +361,7 @@ pub struct CliDuplicateShred { merkle_root: Option, chained_merkle_root: Option, last_in_slot: bool, + #[serde(with = "serde_bytes")] payload: Vec, } diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index 463d017b17dbed..92165d4d9bab60 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -17,11 +17,11 @@ use { }, with_mock_invoke_context, }, - solana_rbpf::{ + solana_runtime::bank::Bank, + solana_sbpf::{ assembler::assemble, elf::Executable, static_analysis::Analysis, verifier::RequisiteVerifier, }, - solana_runtime::bank::Bank, solana_sdk::{ account::{create_account_shared_data_for_test, AccountSharedData}, account_utils::StateMut, @@ -60,6 +60,7 @@ struct Account { struct Input { program_id: String, accounts: Vec, + #[serde(with = "serde_bytes")] instruction_data: Vec, } fn load_accounts(path: &Path) -> Result { diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index b746f6e4ada6fc..4762db4ca6898c 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -234,27 +234,28 @@ pub struct Blockstore { ledger_path: PathBuf, db: Arc, // Column families - address_signatures_cf: LedgerColumn, - bank_hash_cf: LedgerColumn, - block_height_cf: LedgerColumn, - blocktime_cf: LedgerColumn, - code_shred_cf: LedgerColumn, - data_shred_cf: LedgerColumn, - dead_slots_cf: LedgerColumn, - duplicate_slots_cf: LedgerColumn, - erasure_meta_cf: LedgerColumn, - index_cf: LedgerColumn, - merkle_root_meta_cf: LedgerColumn, - meta_cf: LedgerColumn, - optimistic_slots_cf: LedgerColumn, - orphans_cf: LedgerColumn, - perf_samples_cf: LedgerColumn, - program_costs_cf: LedgerColumn, - rewards_cf: LedgerColumn, - roots_cf: LedgerColumn, - transaction_memos_cf: LedgerColumn, - transaction_status_cf: LedgerColumn, - transaction_status_index_cf: LedgerColumn, + address_signatures_cf: LedgerColumn, + bank_hash_cf: LedgerColumn, + block_height_cf: LedgerColumn, + blocktime_cf: LedgerColumn, + code_shred_cf: LedgerColumn, + data_shred_cf: LedgerColumn, + dead_slots_cf: LedgerColumn, + duplicate_slots_cf: LedgerColumn, + erasure_meta_cf: LedgerColumn, + index_cf: LedgerColumn, + merkle_root_meta_cf: LedgerColumn, + meta_cf: LedgerColumn, + optimistic_slots_cf: LedgerColumn, + orphans_cf: LedgerColumn, + perf_samples_cf: LedgerColumn, + program_costs_cf: LedgerColumn, + rewards_cf: LedgerColumn, + roots_cf: LedgerColumn, + transaction_memos_cf: LedgerColumn, + transaction_status_cf: LedgerColumn, + transaction_status_index_cf: + LedgerColumn, highest_primary_index_slot: RwLock>, max_root: AtomicU64, @@ -675,7 +676,7 @@ impl Blockstore { cf_name: &str, ) -> Result, Box<[u8]>)> + '_> { let cf = self.db.cf_handle(cf_name); - let iterator = self.db.iterator_cf_raw_key(cf, IteratorMode::Start); + let iterator = self.db.iterator_cf(cf, rocksdb::IteratorMode::Start); Ok(iterator.map(|pair| pair.unwrap())) } @@ -759,11 +760,11 @@ impl Blockstore { } fn get_recovery_data_shreds<'a>( + &'a self, index: &'a Index, slot: Slot, erasure_meta: &'a ErasureMeta, prev_inserted_shreds: &'a HashMap, - data_cf: &'a LedgerColumn, ) -> impl Iterator + 'a { erasure_meta.data_shreds_indices().filter_map(move |i| { let key = ShredId::new(slot, u32::try_from(i).unwrap(), ShredType::Data); @@ -773,7 +774,7 @@ impl Blockstore { if !index.data().contains(i) { return None; } - match data_cf.get_bytes((slot, i)).unwrap() { + match self.data_shred_cf.get_bytes((slot, i)).unwrap() { None => { error!( "Unable to read the data shred with slot {slot}, index {i} for shred \ @@ -788,11 +789,11 @@ impl Blockstore { } fn get_recovery_coding_shreds<'a>( + &'a self, index: &'a Index, slot: Slot, erasure_meta: &'a ErasureMeta, prev_inserted_shreds: &'a HashMap, - code_cf: &'a LedgerColumn, ) -> impl Iterator + 'a { erasure_meta.coding_shreds_indices().filter_map(move |i| { let key = ShredId::new(slot, u32::try_from(i).unwrap(), ShredType::Code); @@ -802,7 +803,7 @@ impl Blockstore { if !index.coding().contains(i) { return None; } - match code_cf.get_bytes((slot, i)).unwrap() { + match self.code_shred_cf.get_bytes((slot, i)).unwrap() { None => { error!( "Unable to read the coding shred with slot {slot}, index {i} for shred \ @@ -817,31 +818,19 @@ impl Blockstore { } fn recover_shreds( + &self, index: &Index, erasure_meta: &ErasureMeta, prev_inserted_shreds: &HashMap, recovered_shreds: &mut Vec, - data_cf: &LedgerColumn, - code_cf: &LedgerColumn, reed_solomon_cache: &ReedSolomonCache, ) { // Find shreds for this erasure set and try recovery let slot = index.slot; - let available_shreds: Vec<_> = Self::get_recovery_data_shreds( - index, - slot, - erasure_meta, - prev_inserted_shreds, - data_cf, - ) - .chain(Self::get_recovery_coding_shreds( - index, - slot, - erasure_meta, - prev_inserted_shreds, - code_cf, - )) - .collect(); + let available_shreds: Vec<_> = self + .get_recovery_data_shreds(index, slot, erasure_meta, prev_inserted_shreds) + .chain(self.get_recovery_coding_shreds(index, slot, erasure_meta, prev_inserted_shreds)) + .collect(); if let Ok(mut result) = shred::recover(available_shreds, reed_solomon_cache) { Self::submit_metrics(slot, erasure_meta, true, "complete".into(), result.len()); recovered_shreds.append(&mut result); @@ -990,13 +979,11 @@ impl Blockstore { let index = &mut index_meta_entry.index; match erasure_meta.status(index) { ErasureMetaStatus::CanRecover => { - Self::recover_shreds( + self.recover_shreds( index, erasure_meta, prev_inserted_shreds, &mut recovered_shreds, - &self.data_shred_cf, - &self.code_shred_cf, reed_solomon_cache, ); } @@ -2350,7 +2337,7 @@ impl Blockstore { self.slot_data_iterator(slot, start_index) .expect("blockstore couldn't fetch iterator") .map(|(_, bytes)| { - Shred::new_from_serialized_shred(bytes.to_vec()).map_err(|err| { + Shred::new_from_serialized_shred(Vec::from(bytes)).map_err(|err| { BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom( format!("Could not reconstruct shred from shred payload: {err:?}"), ))) @@ -2410,7 +2397,7 @@ impl Blockstore { ) -> std::result::Result, shred::Error> { self.slot_coding_iterator(slot, start_index) .expect("blockstore couldn't fetch iterator") - .map(|code| Shred::new_from_serialized_shred(code.1.to_vec())) + .map(|(_, bytes)| Shred::new_from_serialized_shred(Vec::from(bytes))) .collect() } diff --git a/ledger/src/blockstore/blockstore_purge.rs b/ledger/src/blockstore/blockstore_purge.rs index 8b7895612b3a6d..66cf51e7970e63 100644 --- a/ledger/src/blockstore/blockstore_purge.rs +++ b/ledger/src/blockstore/blockstore_purge.rs @@ -804,18 +804,17 @@ pub mod tests { } fn get_index_bounds(blockstore: &Blockstore) -> (Box<[u8]>, Box<[u8]>) { - let first_index = { - let mut status_entry_iterator = blockstore - .transaction_status_cf - .iterator_cf_raw_key(IteratorMode::Start); - status_entry_iterator.next().unwrap().unwrap().0 - }; - let last_index = { - let mut status_entry_iterator = blockstore - .transaction_status_cf - .iterator_cf_raw_key(IteratorMode::End); - status_entry_iterator.next().unwrap().unwrap().0 - }; + let (first_index, _value) = blockstore + .transaction_status_cf + .iterator_cf_raw_key(IteratorMode::Start) + .next() + .unwrap(); + let (last_index, _value) = blockstore + .transaction_status_cf + .iterator_cf_raw_key(IteratorMode::End) + .next() + .unwrap(); + (first_index, last_index) } @@ -866,22 +865,22 @@ pub mod tests { .put(1, &index1) .unwrap(); - let statuses: Vec<_> = blockstore + let num_statuses = blockstore .transaction_status_cf - .iterator_cf_raw_key(IteratorMode::Start) - .collect(); - assert_eq!(statuses.len(), 15); + .iter(IteratorMode::Start) + .unwrap() + .count(); + assert_eq!(num_statuses, 15); // Delete some of primary-index 0 let oldest_slot = 3; purge(&blockstore, oldest_slot); let status_entry_iterator = blockstore .transaction_status_cf - .iterator_cf_raw_key(IteratorMode::Start); + .iter(IteratorMode::Start) + .unwrap(); let mut count = 0; - for entry in status_entry_iterator { - let (key, _value) = entry.unwrap(); - let (_signature, slot) = ::index(&key); + for ((_signature, slot), _value) in status_entry_iterator { assert!(slot >= oldest_slot); count += 1; } @@ -892,11 +891,10 @@ pub mod tests { purge(&blockstore, oldest_slot); let status_entry_iterator = blockstore .transaction_status_cf - .iterator_cf_raw_key(IteratorMode::Start); + .iter(IteratorMode::Start) + .unwrap(); let mut count = 0; - for entry in status_entry_iterator { - let (key, _value) = entry.unwrap(); - let (_signature, slot) = ::index(&key); + for ((_signature, slot), _value) in status_entry_iterator { assert!(slot >= oldest_slot); count += 1; } @@ -907,11 +905,10 @@ pub mod tests { purge(&blockstore, oldest_slot); let status_entry_iterator = blockstore .transaction_status_cf - .iterator_cf_raw_key(IteratorMode::Start); + .iter(IteratorMode::Start) + .unwrap(); let mut count = 0; - for entry in status_entry_iterator { - let (key, _value) = entry.unwrap(); - let (_signature, slot) = ::index(&key); + for ((_signature, slot), _value) in status_entry_iterator { assert!(slot >= oldest_slot); count += 1; } @@ -922,11 +919,10 @@ pub mod tests { purge(&blockstore, oldest_slot); let status_entry_iterator = blockstore .transaction_status_cf - .iterator_cf_raw_key(IteratorMode::Start); + .iter(IteratorMode::Start) + .unwrap(); let mut count = 0; - for entry in status_entry_iterator { - let (key, _value) = entry.unwrap(); - let (_signature, slot) = ::index(&key); + for ((_signature, slot), _value) in status_entry_iterator { assert!(slot >= oldest_slot); count += 1; } @@ -937,11 +933,10 @@ pub mod tests { purge(&blockstore, oldest_slot); let status_entry_iterator = blockstore .transaction_status_cf - .iterator_cf_raw_key(IteratorMode::Start); + .iter(IteratorMode::Start) + .unwrap(); let mut count = 0; - for entry in status_entry_iterator { - let (key, _value) = entry.unwrap(); - let (_signature, slot) = ::index(&key); + for ((_signature, slot), _value) in status_entry_iterator { assert!(slot >= oldest_slot); count += 1; } @@ -952,7 +947,8 @@ pub mod tests { purge(&blockstore, oldest_slot); let mut status_entry_iterator = blockstore .transaction_status_cf - .iterator_cf_raw_key(IteratorMode::Start); + .iter(IteratorMode::Start) + .unwrap(); assert!(status_entry_iterator.next().is_none()); } @@ -993,27 +989,13 @@ pub mod tests { let max_slot = 19; clear_and_repopulate_transaction_statuses_for_test(&blockstore, max_slot); - let first_index = { - let mut status_entry_iterator = blockstore - .transaction_status_cf - .iter(IteratorMode::Start) - .unwrap(); - status_entry_iterator.next().unwrap().0 - }; - let last_index = { - let mut status_entry_iterator = blockstore - .transaction_status_cf - .iter(IteratorMode::End) - .unwrap(); - status_entry_iterator.next().unwrap().0 - }; + let (first_index, last_index) = get_index_bounds(&blockstore); let oldest_slot = 3; blockstore.db.set_oldest_slot(oldest_slot); - blockstore.transaction_status_cf.compact_range_raw_key( - &cf::TransactionStatus::key(first_index), - &cf::TransactionStatus::key(last_index), - ); + blockstore + .transaction_status_cf + .compact_range_raw_key(&first_index, &last_index); let status_entry_iterator = blockstore .transaction_status_cf @@ -1027,27 +1009,13 @@ pub mod tests { assert_eq!(count, max_slot - (oldest_slot - 1)); clear_and_repopulate_transaction_statuses_for_test(&blockstore, max_slot); - let first_index = { - let mut status_entry_iterator = blockstore - .transaction_status_cf - .iter(IteratorMode::Start) - .unwrap(); - status_entry_iterator.next().unwrap().0 - }; - let last_index = { - let mut status_entry_iterator = blockstore - .transaction_status_cf - .iter(IteratorMode::End) - .unwrap(); - status_entry_iterator.next().unwrap().0 - }; + let (first_index, last_index) = get_index_bounds(&blockstore); let oldest_slot = 12; blockstore.db.set_oldest_slot(oldest_slot); - blockstore.transaction_status_cf.compact_range_raw_key( - &cf::TransactionStatus::key(first_index), - &cf::TransactionStatus::key(last_index), - ); + blockstore + .transaction_status_cf + .compact_range_raw_key(&first_index, &last_index); let status_entry_iterator = blockstore .transaction_status_cf @@ -1103,33 +1071,28 @@ pub mod tests { ) .unwrap(); - let first_index = { - let mut memos_iterator = blockstore - .transaction_memos_cf - .iterator_cf_raw_key(IteratorMode::Start); - memos_iterator.next().unwrap().unwrap().0 - }; - let last_index = { - let mut memos_iterator = blockstore - .transaction_memos_cf - .iterator_cf_raw_key(IteratorMode::End); - memos_iterator.next().unwrap().unwrap().0 - }; + let (first_index, _value) = blockstore + .transaction_memos_cf + .iterator_cf_raw_key(IteratorMode::Start) + .next() + .unwrap(); + let (last_index, _value) = blockstore + .transaction_memos_cf + .iterator_cf_raw_key(IteratorMode::End) + .next() + .unwrap(); // Purge at slot 0 should not affect any memos blockstore.db.set_oldest_slot(0); blockstore .transaction_memos_cf .compact_range_raw_key(&first_index, &last_index); - let memos_iterator = blockstore + let num_memos = blockstore .transaction_memos_cf - .iterator_cf_raw_key(IteratorMode::Start); - let mut count = 0; - for item in memos_iterator { - let _item = item.unwrap(); - count += 1; - } - assert_eq!(count, 4); + .iter(IteratorMode::Start) + .unwrap() + .count(); + assert_eq!(num_memos, 4); // Purge at oldest_slot without clean_slot_0 only purges the current memo at slot 4 blockstore.db.set_oldest_slot(oldest_slot); @@ -1138,11 +1101,10 @@ pub mod tests { .compact_range_raw_key(&first_index, &last_index); let memos_iterator = blockstore .transaction_memos_cf - .iterator_cf_raw_key(IteratorMode::Start); + .iter(IteratorMode::Start) + .unwrap(); let mut count = 0; - for item in memos_iterator { - let (key, _value) = item.unwrap(); - let slot = ::index(&key).1; + for ((_signature, slot), _value) in memos_iterator { assert!(slot == 0 || slot >= oldest_slot); count += 1; } @@ -1155,11 +1117,10 @@ pub mod tests { .compact_range_raw_key(&first_index, &last_index); let memos_iterator = blockstore .transaction_memos_cf - .iterator_cf_raw_key(IteratorMode::Start); + .iter(IteratorMode::Start) + .unwrap(); let mut count = 0; - for item in memos_iterator { - let (key, _value) = item.unwrap(); - let slot = ::index(&key).1; + for ((_signature, slot), _value) in memos_iterator { assert!(slot >= oldest_slot); count += 1; } diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 7e85a8bb49e2ff..f05cefd8d5c526 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -35,6 +35,7 @@ use { ffi::{CStr, CString}, fs, marker::PhantomData, + mem, path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -622,7 +623,7 @@ impl Rocks { } } - pub(crate) fn column(self: &Arc) -> LedgerColumn + pub(crate) fn column(self: &Arc) -> LedgerColumn where C: Column + ColumnName, { @@ -694,36 +695,11 @@ impl Rocks { Ok(()) } - fn iterator_cf(&self, cf: &ColumnFamily, iterator_mode: IteratorMode) -> DBIterator - where - C: Column, - { - let start_key; - let iterator_mode = match iterator_mode { - IteratorMode::From(start_from, direction) => { - start_key = C::key(start_from); - RocksIteratorMode::From(&start_key, direction) - } - IteratorMode::Start => RocksIteratorMode::Start, - IteratorMode::End => RocksIteratorMode::End, - }; - self.db.iterator_cf(cf, iterator_mode) - } - - pub(crate) fn iterator_cf_raw_key( + pub(crate) fn iterator_cf( &self, cf: &ColumnFamily, - iterator_mode: IteratorMode>, + iterator_mode: RocksIteratorMode, ) -> DBIterator { - let start_key; - let iterator_mode = match iterator_mode { - IteratorMode::From(start_from, direction) => { - start_key = start_from; - RocksIteratorMode::From(&start_key, direction) - } - IteratorMode::Start => RocksIteratorMode::Start, - IteratorMode::End => RocksIteratorMode::End, - }; self.db.iterator_cf(cf, iterator_mode) } @@ -797,8 +773,14 @@ impl Rocks { pub trait Column { type Index; + const KEY_LEN: usize; - fn key(index: Self::Index) -> Vec; + fn serialize_index(key: &mut [u8], index: Self::Index); + fn key(index: Self::Index) -> Vec { + let mut key = vec![0; Self::KEY_LEN]; + Self::serialize_index(&mut key, index); + key + } fn index(key: &[u8]) -> Self::Index; // This trait method is primarily used by `Database::delete_range_cf()`, and is therefore only // relevant for columns keyed by Slot: ie. SlotColumns and columns that feature a Slot as the @@ -846,12 +828,10 @@ pub trait SlotColumn {} impl Column for T { type Index = Slot; + const KEY_LEN: usize = mem::size_of::(); - /// Converts a u64 Index to its RocksDB key. - fn key(slot: u64) -> Vec { - let mut key = vec![0; 8]; + fn serialize_index(key: &mut [u8], slot: Self::Index) { BigEndian::write_u64(&mut key[..], slot); - key } /// Converts a RocksDB key to its u64 Index. @@ -901,12 +881,11 @@ pub trait ColumnIndexDeprecation: Column { impl Column for columns::TransactionStatus { type Index = (Signature, Slot); + const KEY_LEN: usize = mem::size_of::() + mem::size_of::(); - fn key((signature, slot): Self::Index) -> Vec { - let mut key = vec![0; Self::CURRENT_INDEX_LEN]; + fn serialize_index(key: &mut [u8], (signature, slot): Self::Index) { key[0..64].copy_from_slice(&signature.as_ref()[0..64]); BigEndian::write_u64(&mut key[64..72], slot); - key } fn index(key: &[u8]) -> (Signature, Slot) { @@ -970,14 +949,16 @@ impl ColumnIndexDeprecation for columns::TransactionStatus { impl Column for columns::AddressSignatures { type Index = (Pubkey, Slot, u32, Signature); + const KEY_LEN: usize = mem::size_of::() + + mem::size_of::() + + mem::size_of::() + + mem::size_of::(); - fn key((pubkey, slot, transaction_index, signature): Self::Index) -> Vec { - let mut key = vec![0; Self::CURRENT_INDEX_LEN]; + fn serialize_index(key: &mut [u8], (pubkey, slot, transaction_index, signature): Self::Index) { key[0..32].copy_from_slice(&pubkey.as_ref()[0..32]); BigEndian::write_u64(&mut key[32..40], slot); BigEndian::write_u32(&mut key[40..44], transaction_index); key[44..108].copy_from_slice(&signature.as_ref()[0..64]); - key } fn index(key: &[u8]) -> Self::Index { @@ -1042,12 +1023,11 @@ impl ColumnIndexDeprecation for columns::AddressSignatures { impl Column for columns::TransactionMemos { type Index = (Signature, Slot); + const KEY_LEN: usize = mem::size_of::() + mem::size_of::(); - fn key((signature, slot): Self::Index) -> Vec { - let mut key = vec![0; Self::CURRENT_INDEX_LEN]; + fn serialize_index(key: &mut [u8], (signature, slot): Self::Index) { key[0..64].copy_from_slice(&signature.as_ref()[0..64]); BigEndian::write_u64(&mut key[64..72], slot); - key } fn index(key: &[u8]) -> Self::Index { @@ -1097,11 +1077,10 @@ impl ColumnIndexDeprecation for columns::TransactionMemos { impl Column for columns::TransactionStatusIndex { type Index = u64; + const KEY_LEN: usize = mem::size_of::(); - fn key(index: u64) -> Vec { - let mut key = vec![0; 8]; + fn serialize_index(key: &mut [u8], index: Self::Index) { BigEndian::write_u64(&mut key[..], index); - key } fn index(key: &[u8]) -> u64 { @@ -1157,11 +1136,10 @@ impl TypedColumn for columns::ProgramCosts { } impl Column for columns::ProgramCosts { type Index = Pubkey; + const KEY_LEN: usize = mem::size_of::(); - fn key(pubkey: Pubkey) -> Vec { - let mut key = vec![0; 32]; // size_of Pubkey + fn serialize_index(key: &mut [u8], pubkey: Self::Index) { key[0..32].copy_from_slice(&pubkey.as_ref()[0..32]); - key } fn index(key: &[u8]) -> Self::Index { @@ -1179,9 +1157,11 @@ impl Column for columns::ProgramCosts { impl Column for columns::ShredCode { type Index = (Slot, u64); + const KEY_LEN: usize = mem::size_of::() + mem::size_of::(); - fn key(index: (Slot, u64)) -> Vec { - columns::ShredData::key(index) + fn serialize_index(key: &mut [u8], index: Self::Index) { + // ShredCode and ShredData have the same key format + columns::ShredData::serialize_index(key, index); } fn index(key: &[u8]) -> (Slot, u64) { @@ -1202,12 +1182,11 @@ impl ColumnName for columns::ShredCode { impl Column for columns::ShredData { type Index = (Slot, u64); + const KEY_LEN: usize = mem::size_of::() + mem::size_of::(); - fn key((slot, index): (Slot, u64)) -> Vec { - let mut key = vec![0; 16]; + fn serialize_index(key: &mut [u8], (slot, index): Self::Index) { BigEndian::write_u64(&mut key[..8], slot); BigEndian::write_u64(&mut key[8..16], index); - key } fn index(key: &[u8]) -> (Slot, u64) { @@ -1310,6 +1289,12 @@ impl TypedColumn for columns::SlotMeta { impl Column for columns::ErasureMeta { type Index = (Slot, u64); + const KEY_LEN: usize = mem::size_of::() + mem::size_of::(); + + fn serialize_index(key: &mut [u8], (slot, set_index): Self::Index) { + BigEndian::write_u64(&mut key[..8], slot); + BigEndian::write_u64(&mut key[8..16], set_index); + } fn index(key: &[u8]) -> (Slot, u64) { let slot = BigEndian::read_u64(&key[..8]); @@ -1318,13 +1303,6 @@ impl Column for columns::ErasureMeta { (slot, set_index) } - fn key((slot, set_index): (Slot, u64)) -> Vec { - let mut key = vec![0; 16]; - BigEndian::write_u64(&mut key[..8], slot); - BigEndian::write_u64(&mut key[8..], set_index); - key - } - fn slot(index: Self::Index) -> Slot { index.0 } @@ -1350,6 +1328,12 @@ impl TypedColumn for columns::OptimisticSlots { impl Column for columns::MerkleRootMeta { type Index = (Slot, /*fec_set_index:*/ u32); + const KEY_LEN: usize = mem::size_of::() + mem::size_of::(); + + fn serialize_index(key: &mut [u8], (slot, fec_set_index): Self::Index) { + BigEndian::write_u64(&mut key[..8], slot); + BigEndian::write_u32(&mut key[8..], fec_set_index); + } fn index(key: &[u8]) -> Self::Index { let slot = BigEndian::read_u64(&key[..8]); @@ -1358,13 +1342,6 @@ impl Column for columns::MerkleRootMeta { (slot, fec_set_index) } - fn key((slot, fec_set_index): Self::Index) -> Vec { - let mut key = vec![0; 12]; - BigEndian::write_u64(&mut key[..8], slot); - BigEndian::write_u32(&mut key[8..], fec_set_index); - key - } - fn slot((slot, _fec_set_index): Self::Index) -> Slot { slot } @@ -1382,7 +1359,7 @@ impl TypedColumn for columns::MerkleRootMeta { } #[derive(Debug)] -pub struct LedgerColumn +pub struct LedgerColumn where C: Column + ColumnName, { @@ -1393,7 +1370,7 @@ where write_perf_status: PerfSamplingStatus, } -impl LedgerColumn { +impl LedgerColumn { pub fn submit_rocksdb_cf_metrics(&self) { let cf_rocksdb_metrics = BlockstoreRocksDbColumnFamilyMetrics { total_sst_files_size: self @@ -1470,16 +1447,26 @@ impl WriteBatch { } } -impl LedgerColumn +impl LedgerColumn where C: Column + ColumnName, { - pub fn get_bytes(&self, key: C::Index) -> Result>> { + #[inline] + fn key_from_index(index: C::Index) -> [u8; K] { + let mut key = [0u8; K]; + C::serialize_index(&mut key, index); + key + } + + pub fn get_bytes(&self, index: C::Index) -> Result>> { let is_perf_enabled = maybe_enable_rocksdb_perf( self.column_options.rocks_perf_sample_interval, &self.read_perf_status, ); - let result = self.backend.get_cf(self.handle(), &C::key(key)); + + let key = Self::key_from_index(index); + let result = self.backend.get_cf(self.handle(), &key); + if let Some(op_start_instant) = is_perf_enabled { report_rocksdb_read_perf( C::NAME, @@ -1524,8 +1511,17 @@ where &self, iterator_mode: IteratorMode, ) -> Result)> + '_> { - let cf = self.handle(); - let iter = self.backend.iterator_cf::(cf, iterator_mode); + let mut start_key = [0u8; K]; + let iterator_mode = match iterator_mode { + IteratorMode::Start => RocksIteratorMode::Start, + IteratorMode::End => RocksIteratorMode::End, + IteratorMode::From(start, direction) => { + C::serialize_index(&mut start_key, start); + RocksIteratorMode::From(&start_key, direction) + } + }; + + let iter = self.backend.iterator_cf(self.handle(), iterator_mode); Ok(iter.map(|pair| { let (key, value) = pair.unwrap(); (C::index(&key), value) @@ -1536,17 +1532,20 @@ where where C::Index: PartialOrd + Copy, { - let cf = self.handle(); - let from = Some(C::key(C::as_index(from))); - let to = Some(C::key(C::as_index(to))); - self.backend.db.compact_range_cf(cf, from, to); + let from_key = Self::key_from_index(C::as_index(from)); + let to_key = Self::key_from_index(C::as_index(to)); + + self.backend + .db + .compact_range_cf(self.handle(), Some(&from_key), Some(&to_key)); Ok(true) } #[cfg(test)] pub fn compact_range_raw_key(&self, from: &[u8], to: &[u8]) { - let cf = self.handle(); - self.backend.db.compact_range_cf(cf, Some(from), Some(to)); + self.backend + .db + .compact_range_cf(self.handle(), Some(from), Some(to)); } #[inline] @@ -1561,12 +1560,15 @@ where Ok(!iter.valid()) } - pub fn put_bytes(&self, key: C::Index, value: &[u8]) -> Result<()> { + pub fn put_bytes(&self, index: C::Index, value: &[u8]) -> Result<()> { let is_perf_enabled = maybe_enable_rocksdb_perf( self.column_options.rocks_perf_sample_interval, &self.write_perf_status, ); - let result = self.backend.put_cf(self.handle(), &C::key(key), value); + + let key = Self::key_from_index(index); + let result = self.backend.put_cf(self.handle(), &key, value); + if let Some(op_start_instant) = is_perf_enabled { report_rocksdb_write_perf( C::NAME, @@ -1581,10 +1583,10 @@ where pub fn put_bytes_in_batch( &self, batch: &mut WriteBatch, - key: C::Index, + index: C::Index, value: &[u8], ) -> Result<()> { - let key = C::key(key); + let key = Self::key_from_index(index); batch.put_cf(self.handle(), &key, value) } @@ -1597,12 +1599,15 @@ where self.backend.get_int_property_cf(self.handle(), name) } - pub fn delete(&self, key: C::Index) -> Result<()> { + pub fn delete(&self, index: C::Index) -> Result<()> { let is_perf_enabled = maybe_enable_rocksdb_perf( self.column_options.rocks_perf_sample_interval, &self.write_perf_status, ); - let result = self.backend.delete_cf(self.handle(), &C::key(key)); + + let key = Self::key_from_index(index); + let result = self.backend.delete_cf(self.handle(), &key); + if let Some(op_start_instant) = is_perf_enabled { report_rocksdb_write_perf( C::NAME, @@ -1614,8 +1619,8 @@ where result } - pub fn delete_in_batch(&self, batch: &mut WriteBatch, key: C::Index) -> Result<()> { - let key = C::key(key); + pub fn delete_in_batch(&self, batch: &mut WriteBatch, index: C::Index) -> Result<()> { + let key = Self::key_from_index(index); batch.delete_cf(self.handle(), &key) } @@ -1631,8 +1636,10 @@ where // // For consistency, we make our delete_range_cf works for [from, to] by // adjusting the `to` slot range by 1. - let from_key = C::key(C::as_index(from)); - let to_key = C::key(C::as_index(to.saturating_add(1))); + let mut from_key = [0u8; K]; + C::serialize_index(&mut from_key, C::as_index(from)); + let mut to_key = [0u8; K]; + C::serialize_index(&mut to_key, C::as_index(to.saturating_add(1))); batch.delete_range_cf(self.handle(), &from_key, &to_key) } @@ -1641,15 +1648,16 @@ where where C: Column + ColumnName, { - self.backend.delete_file_in_range_cf( - self.handle(), - &C::key(C::as_index(from)), - &C::key(C::as_index(to)), - ) + let mut from_key = [0u8; K]; + C::serialize_index(&mut from_key, C::as_index(from)); + let mut to_key = [0u8; K]; + C::serialize_index(&mut to_key, C::as_index(to)); + self.backend + .delete_file_in_range_cf(self.handle(), &from_key, &to_key) } } -impl LedgerColumn +impl LedgerColumn where C: TypedColumn + ColumnName, { @@ -1682,8 +1690,9 @@ where } } - pub fn get(&self, key: C::Index) -> Result> { - self.get_raw(&C::key(key)) + pub fn get(&self, index: C::Index) -> Result> { + let key = Self::key_from_index(index); + self.get_raw(&key) } pub fn get_raw(&self, key: &[u8]) -> Result> { @@ -1708,16 +1717,15 @@ where result } - pub fn put(&self, key: C::Index, value: &C::Type) -> Result<()> { + pub fn put(&self, index: C::Index, value: &C::Type) -> Result<()> { let is_perf_enabled = maybe_enable_rocksdb_perf( self.column_options.rocks_perf_sample_interval, &self.write_perf_status, ); let serialized_value = serialize(value)?; - let result = self - .backend - .put_cf(self.handle(), &C::key(key), &serialized_value); + let key = Self::key_from_index(index); + let result = self.backend.put_cf(self.handle(), &key, &serialized_value); if let Some(op_start_instant) = is_perf_enabled { report_rocksdb_write_perf( @@ -1733,24 +1741,25 @@ where pub fn put_in_batch( &self, batch: &mut WriteBatch, - key: C::Index, + index: C::Index, value: &C::Type, ) -> Result<()> { - let key = C::key(key); + let key = Self::key_from_index(index); let serialized_value = serialize(value)?; batch.put_cf(self.handle(), &key, &serialized_value) } } -impl LedgerColumn +impl LedgerColumn where C: ProtobufColumn + ColumnName, { pub fn get_protobuf_or_bincode>( &self, - key: C::Index, + index: C::Index, ) -> Result> { - self.get_raw_protobuf_or_bincode::(&C::key(key)) + let key = Self::key_from_index(index); + self.get_raw_protobuf_or_bincode::(&key) } pub(crate) fn get_raw_protobuf_or_bincode>( @@ -1782,12 +1791,15 @@ where } } - pub fn get_protobuf(&self, key: C::Index) -> Result> { + pub fn get_protobuf(&self, index: C::Index) -> Result> { let is_perf_enabled = maybe_enable_rocksdb_perf( self.column_options.rocks_perf_sample_interval, &self.read_perf_status, ); - let result = self.backend.get_pinned_cf(self.handle(), &C::key(key)); + + let key = Self::key_from_index(index); + let result = self.backend.get_pinned_cf(self.handle(), &key); + if let Some(op_start_instant) = is_perf_enabled { report_rocksdb_read_perf( C::NAME, @@ -1804,7 +1816,7 @@ where } } - pub fn put_protobuf(&self, key: C::Index, value: &C::Type) -> Result<()> { + pub fn put_protobuf(&self, index: C::Index, value: &C::Type) -> Result<()> { let mut buf = Vec::with_capacity(value.encoded_len()); value.encode(&mut buf)?; @@ -1812,7 +1824,10 @@ where self.column_options.rocks_perf_sample_interval, &self.write_perf_status, ); - let result = self.backend.put_cf(self.handle(), &C::key(key), &buf); + + let key = Self::key_from_index(index); + let result = self.backend.put_cf(self.handle(), &key, &buf); + if let Some(op_start_instant) = is_perf_enabled { report_rocksdb_write_perf( C::NAME, @@ -1826,7 +1841,7 @@ where } } -impl LedgerColumn +impl LedgerColumn where C: ColumnIndexDeprecation + ColumnName, { @@ -1834,8 +1849,17 @@ where &self, iterator_mode: IteratorMode, ) -> Result)> + '_> { - let cf = self.handle(); - let iter = self.backend.iterator_cf::(cf, iterator_mode); + let mut start_key = [0u8; K]; + let iterator_mode = match iterator_mode { + IteratorMode::Start => RocksIteratorMode::Start, + IteratorMode::End => RocksIteratorMode::End, + IteratorMode::From(start, direction) => { + C::serialize_index(&mut start_key, start); + RocksIteratorMode::From(&start_key, direction) + } + }; + + let iter = self.backend.iterator_cf(self.handle(), iterator_mode); Ok(iter.filter_map(|pair| { let (key, value) = pair.unwrap(); C::try_current_index(&key).ok().map(|index| (index, value)) @@ -1846,17 +1870,18 @@ where &self, iterator_mode: IteratorMode, ) -> Result)> + '_> { - let cf = self.handle(); - let iterator_mode_raw_key = match iterator_mode { - IteratorMode::Start => IteratorMode::Start, - IteratorMode::End => IteratorMode::End, + let start_key; + let iterator_mode = match iterator_mode { + IteratorMode::Start => RocksIteratorMode::Start, + IteratorMode::End => RocksIteratorMode::End, IteratorMode::From(start_from, direction) => { - let raw_key = C::deprecated_key(start_from); - IteratorMode::From(raw_key, direction) + start_key = C::deprecated_key(start_from); + RocksIteratorMode::From(&start_key, direction) } }; - let iter = self.backend.iterator_cf_raw_key(cf, iterator_mode_raw_key); - Ok(iter.filter_map(|pair| { + + let iterator = self.backend.iterator_cf(self.handle(), iterator_mode); + Ok(iterator.filter_map(|pair| { let (key, value) = pair.unwrap(); C::try_deprecated_index(&key) .ok() @@ -1867,9 +1892,9 @@ where pub(crate) fn delete_deprecated_in_batch( &self, batch: &mut WriteBatch, - key: C::DeprecatedIndex, + index: C::DeprecatedIndex, ) -> Result<()> { - let key = C::deprecated_key(key); + let key = C::deprecated_key(index); batch.delete_cf(self.handle(), &key) } } @@ -2198,43 +2223,47 @@ pub mod tests { } } - impl LedgerColumn + impl LedgerColumn where C: ColumnIndexDeprecation + ProtobufColumn + ColumnName, { pub fn put_deprecated_protobuf( &self, - key: C::DeprecatedIndex, + index: C::DeprecatedIndex, value: &C::Type, ) -> Result<()> { let mut buf = Vec::with_capacity(value.encoded_len()); value.encode(&mut buf)?; self.backend - .put_cf(self.handle(), &C::deprecated_key(key), &buf) + .put_cf(self.handle(), &C::deprecated_key(index), &buf) } } - impl LedgerColumn + impl LedgerColumn where C: ColumnIndexDeprecation + TypedColumn + ColumnName, { - pub fn put_deprecated(&self, key: C::DeprecatedIndex, value: &C::Type) -> Result<()> { + pub fn put_deprecated(&self, index: C::DeprecatedIndex, value: &C::Type) -> Result<()> { let serialized_value = serialize(value)?; self.backend - .put_cf(self.handle(), &C::deprecated_key(key), &serialized_value) + .put_cf(self.handle(), &C::deprecated_key(index), &serialized_value) } } - impl LedgerColumn + impl LedgerColumn where C: ColumnIndexDeprecation + ColumnName, { pub(crate) fn iterator_cf_raw_key( &self, - iterator_mode: IteratorMode>, - ) -> DBIterator { - let cf = self.handle(); - self.backend.iterator_cf_raw_key(cf, iterator_mode) + iterator_mode: IteratorMode, + ) -> impl Iterator, Box<[u8]>)> + '_ { + // The conversion of key back into Box<[u8]> incurs an extra + // allocation. However, this is test code and the goal is to + // maximize code reuse over efficiency + self.iter(iterator_mode) + .unwrap() + .map(|(key, value)| (Box::from(C::key(key)), value)) } } } diff --git a/local-cluster/src/cluster.rs b/local-cluster/src/cluster.rs index c2e3acc60751a3..6f443f91740576 100644 --- a/local-cluster/src/cluster.rs +++ b/local-cluster/src/cluster.rs @@ -40,10 +40,10 @@ impl ClusterValidatorInfo { pub trait Cluster { fn get_node_pubkeys(&self) -> Vec; - fn get_validator_client(&self, pubkey: &Pubkey) -> Option; - fn build_tpu_quic_client(&self) -> Result; - fn build_tpu_quic_client_with_commitment( + fn build_validator_tpu_quic_client(&self, pubkey: &Pubkey) -> Result; + fn build_validator_tpu_quic_client_with_commitment( &self, + pubkey: &Pubkey, commitment_config: CommitmentConfig, ) -> Result; fn get_contact_info(&self, pubkey: &Pubkey) -> Option<&ContactInfo>; diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 35fcc5f2ed9333..ce3d82dcddfe84 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -61,7 +61,7 @@ use { collections::HashMap, io::{Error, ErrorKind, Result}, iter, - net::{IpAddr, Ipv4Addr}, + net::{IpAddr, Ipv4Addr, SocketAddr}, path::{Path, PathBuf}, sync::{Arc, RwLock}, time::Instant, @@ -493,7 +493,9 @@ impl LocalCluster { mut voting_keypair: Option>, socket_addr_space: SocketAddrSpace, ) -> Pubkey { - let client = self.build_tpu_quic_client().expect("tpu_client"); + let client = self + .build_validator_tpu_quic_client(self.entry_point_info.pubkey()) + .expect("tpu_client"); // Must have enough tokens to fund vote account and set delegate let should_create_vote_pubkey = voting_keypair.is_none(); @@ -592,7 +594,9 @@ impl LocalCluster { } pub fn transfer(&self, source_keypair: &Keypair, dest_pubkey: &Pubkey, lamports: u64) -> u64 { - let client = self.build_tpu_quic_client().expect("new tpu quic client"); + let client = self + .build_validator_tpu_quic_client(self.entry_point_info.pubkey()) + .expect("new tpu quic client"); Self::transfer_with_client(&client, source_keypair, dest_pubkey, lamports) } @@ -941,12 +945,12 @@ impl LocalCluster { } } - fn build_tpu_client(&self, rpc_client_builder: F) -> Result - where - F: FnOnce(String) -> Arc, - { - let rpc_pubsub_url = format!("ws://{}/", self.entry_point_info.rpc_pubsub().unwrap()); - let rpc_url = format!("http://{}", self.entry_point_info.rpc().unwrap()); + fn build_tpu_client( + &self, + rpc_client: Arc, + rpc_pubsub_addr: SocketAddr, + ) -> Result { + let rpc_pubsub_url = format!("ws://{}/", rpc_pubsub_addr); let cache = match &*self.connection_cache { ConnectionCache::Quic(cache) => cache, @@ -959,7 +963,7 @@ impl LocalCluster { }; let tpu_client = TpuClient::new_with_connection_cache( - rpc_client_builder(rpc_url), + rpc_client, rpc_pubsub_url.as_str(), TpuClientConfig::default(), cache.clone(), @@ -975,24 +979,22 @@ impl Cluster for LocalCluster { self.validators.keys().cloned().collect() } - fn get_validator_client(&self, pubkey: &Pubkey) -> Option { - self.validators.get(pubkey).map(|_| { - self.build_tpu_quic_client() - .expect("should build tpu quic client") - }) + fn build_validator_tpu_quic_client(&self, pubkey: &Pubkey) -> Result { + let contact_info = self.get_contact_info(pubkey).unwrap(); + let rpc_url: String = format!("http://{}", contact_info.rpc().unwrap()); + let rpc_client = Arc::new(RpcClient::new(rpc_url)); + self.build_tpu_client(rpc_client, contact_info.rpc_pubsub().unwrap()) } - fn build_tpu_quic_client(&self) -> Result { - self.build_tpu_client(|rpc_url| Arc::new(RpcClient::new(rpc_url))) - } - - fn build_tpu_quic_client_with_commitment( + fn build_validator_tpu_quic_client_with_commitment( &self, + pubkey: &Pubkey, commitment_config: CommitmentConfig, ) -> Result { - self.build_tpu_client(|rpc_url| { - Arc::new(RpcClient::new_with_commitment(rpc_url, commitment_config)) - }) + let contact_info = self.get_contact_info(pubkey).unwrap(); + let rpc_url = format!("http://{}", contact_info.rpc().unwrap()); + let rpc_client = Arc::new(RpcClient::new_with_commitment(rpc_url, commitment_config)); + self.build_tpu_client(rpc_client, contact_info.rpc_pubsub().unwrap()) } fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo { diff --git a/local-cluster/src/local_cluster_snapshot_utils.rs b/local-cluster/src/local_cluster_snapshot_utils.rs index 3df5b61d3b8359..6240ade176c086 100644 --- a/local-cluster/src/local_cluster_snapshot_utils.rs +++ b/local-cluster/src/local_cluster_snapshot_utils.rs @@ -73,7 +73,7 @@ impl LocalCluster { ) -> NextSnapshotResult { // Get slot after which this was generated let client = self - .get_validator_client(self.entry_point_info.pubkey()) + .build_validator_tpu_quic_client(self.entry_point_info.pubkey()) .unwrap(); let last_slot = client .rpc_client() diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 7f847d848a0ada..79c6fcd08908d5 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -222,7 +222,9 @@ fn test_local_cluster_signature_subscribe() { .unwrap(); let non_bootstrap_info = cluster.get_contact_info(&non_bootstrap_id).unwrap(); - let tx_client = cluster.build_tpu_quic_client().unwrap(); + let tx_client = cluster + .build_validator_tpu_quic_client(cluster.entry_point_info.pubkey()) + .unwrap(); let (blockhash, _) = tx_client .rpc_client() @@ -431,7 +433,9 @@ fn test_mainnet_beta_cluster_type() { .unwrap(); assert_eq!(cluster_nodes.len(), 1); - let client = cluster.build_tpu_quic_client().unwrap(); + let client = cluster + .build_validator_tpu_quic_client(cluster.entry_point_info.pubkey()) + .unwrap(); // Programs that are available at epoch 0 for program_id in [ @@ -1002,7 +1006,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st let timer = Instant::now(); loop { let validator_current_slot = cluster - .get_validator_client(&validator_identity.pubkey()) + .build_validator_tpu_quic_client(&validator_identity.pubkey()) .unwrap() .rpc_client() .get_slot_with_commitment(CommitmentConfig::finalized()) @@ -1377,7 +1381,9 @@ fn test_snapshots_blockstore_floor() { .into_iter() .find(|x| x != cluster.entry_point_info.pubkey()) .unwrap(); - let validator_client = cluster.get_validator_client(&validator_id).unwrap(); + let validator_client = cluster + .build_validator_tpu_quic_client(&validator_id) + .unwrap(); let mut current_slot = 0; // Let this validator run a while with repair @@ -1611,7 +1617,7 @@ fn test_no_voting() { }; let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); let client = cluster - .get_validator_client(cluster.entry_point_info.pubkey()) + .build_validator_tpu_quic_client(cluster.entry_point_info.pubkey()) .unwrap(); loop { let last_slot = client @@ -1638,6 +1644,7 @@ fn test_no_voting() { #[test] #[serial] +#[ignore] fn test_optimistic_confirmation_violation_detection() { solana_logger::setup_with_default(RUST_LOG_FILTER); // First set up the cluster with 2 nodes @@ -1675,13 +1682,16 @@ fn test_optimistic_confirmation_violation_detection() { // so that the vote on `S-1` is definitely in gossip and optimistic confirmation is // detected on slot `S-1` for sure, then stop the heavier of the two // validators - let client = cluster.get_validator_client(&node_to_restart).unwrap(); + let client = cluster + .build_validator_tpu_quic_client(&node_to_restart) + .unwrap(); let mut prev_voted_slot = 0; loop { let last_voted_slot = client .rpc_client() .get_slot_with_commitment(CommitmentConfig::processed()) .unwrap(); + info!("last voted slot: {}", last_voted_slot); if last_voted_slot > 50 { if prev_voted_slot == 0 { prev_voted_slot = last_voted_slot; @@ -1692,7 +1702,10 @@ fn test_optimistic_confirmation_violation_detection() { sleep(Duration::from_millis(100)); } + info!("exiting node"); + drop(client); let exited_validator_info = cluster.exit_node(&node_to_restart); + info!("exiting node success"); // Mark fork as dead on the heavier validator, this should make the fork effectively // dead, even though it was optimistically confirmed. The smaller validator should @@ -1730,8 +1743,11 @@ fn test_optimistic_confirmation_violation_detection() { // Wait for a root > prev_voted_slot to be set. Because the root is on a // different fork than `prev_voted_slot`, then optimistic confirmation is // violated - let client = cluster.get_validator_client(&node_to_restart).unwrap(); + let client = cluster + .build_validator_tpu_quic_client(&node_to_restart) + .unwrap(); loop { + info!("Client connecting to: {}", client.rpc_client().url()); let last_root = client .rpc_client() .get_slot_with_commitment(CommitmentConfig::finalized()) @@ -1797,7 +1813,9 @@ fn test_validator_saves_tower() { }; let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); - let validator_client = cluster.get_validator_client(&validator_id).unwrap(); + let validator_client = cluster + .build_validator_tpu_quic_client(&validator_id) + .unwrap(); let ledger_path = cluster .validators @@ -1832,7 +1850,9 @@ fn test_validator_saves_tower() { // Restart the validator and wait for a new root cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified); - let validator_client = cluster.get_validator_client(&validator_id).unwrap(); + let validator_client = cluster + .build_validator_tpu_quic_client(&validator_id) + .unwrap(); // Wait for the first new root let last_replayed_root = loop { @@ -1861,7 +1881,9 @@ fn test_validator_saves_tower() { .unwrap(); cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified); - let validator_client = cluster.get_validator_client(&validator_id).unwrap(); + let validator_client = cluster + .build_validator_tpu_quic_client(&validator_id) + .unwrap(); // Wait for a new root, demonstrating the validator was able to make progress from the older `tower1` let new_root = loop { @@ -1894,7 +1916,9 @@ fn test_validator_saves_tower() { validator_info.config.require_tower = false; cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified); - let validator_client = cluster.get_validator_client(&validator_id).unwrap(); + let validator_client = cluster + .build_validator_tpu_quic_client(&validator_id) + .unwrap(); // Wait for another new root let new_root = loop { @@ -2552,11 +2576,11 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) { let on_partition_start = |cluster: &mut LocalCluster, _: &mut ()| { let update_client = cluster - .get_validator_client(cluster.entry_point_info.pubkey()) + .build_validator_tpu_quic_client(cluster.entry_point_info.pubkey()) .unwrap(); update_client_sender.send(update_client).unwrap(); let scan_client = cluster - .get_validator_client(cluster.entry_point_info.pubkey()) + .build_validator_tpu_quic_client(cluster.entry_point_info.pubkey()) .unwrap(); scan_client_sender.send(scan_client).unwrap(); }; @@ -2709,7 +2733,9 @@ fn test_oc_bad_signatures() { ); // 3) Start up a spy to listen for and push votes to leader TPU - let client = cluster.build_tpu_quic_client().unwrap(); + let client = cluster + .build_validator_tpu_quic_client(cluster.entry_point_info.pubkey()) + .unwrap(); let cluster_funding_keypair = cluster.funding_keypair.insecure_clone(); let voter_thread_sleep_ms: usize = 100; let num_votes_simulated = Arc::new(AtomicUsize::new(0)); @@ -3079,10 +3105,12 @@ fn run_test_load_program_accounts(scan_commitment: CommitmentConfig) { .find(|x| x != cluster.entry_point_info.pubkey()) .unwrap(); let client = cluster - .get_validator_client(cluster.entry_point_info.pubkey()) + .build_validator_tpu_quic_client(cluster.entry_point_info.pubkey()) .unwrap(); update_client_sender.send(client).unwrap(); - let scan_client = cluster.get_validator_client(&other_validator_id).unwrap(); + let scan_client = cluster + .build_validator_tpu_quic_client(&other_validator_id) + .unwrap(); scan_client_sender.send(scan_client).unwrap(); // Wait for some roots to pass @@ -3859,7 +3887,6 @@ fn test_kill_partition_switch_threshold_progress() { #[test] #[serial] -#[ignore] #[allow(unused_attributes)] fn test_duplicate_shreds_broadcast_leader() { run_duplicate_shreds_broadcast_leader(true); @@ -3891,7 +3918,13 @@ fn run_duplicate_shreds_broadcast_leader(vote_on_duplicate: bool) { // Critical that bad_leader_stake + good_node_stake < DUPLICATE_THRESHOLD and that // bad_leader_stake + good_node_stake + our_node_stake > DUPLICATE_THRESHOLD so that - // our vote is the determining factor + // our vote is the determining factor. + // + // Also critical that bad_leader_stake > 1 - DUPLICATE_THRESHOLD, so that the leader + // doesn't try and dump his own block, which will happen if: + // 1. A version is duplicate confirmed + // 2. The version they played/stored into blockstore isn't the one that is duplicated + // confirmed. let bad_leader_stake = 10_000_000 * DEFAULT_NODE_STAKE; // Ensure that the good_node_stake is always on the critical path, and the partition node // should never be on the critical path. This way, none of the bad shreds sent to the partition @@ -3919,6 +3952,7 @@ fn run_duplicate_shreds_broadcast_leader(vote_on_duplicate: bool) { (bad_leader_stake + good_node_stake + our_node_stake) as f64 / total_stake as f64 > DUPLICATE_THRESHOLD ); + assert!((bad_leader_stake as f64 / total_stake as f64) >= 1.0 - DUPLICATE_THRESHOLD); // Important that the partition node stake is the smallest so that it gets selected // for the partition. diff --git a/net/README.md b/net/README.md index a3dd929fcf40b7..063670d0ab2597 100644 --- a/net/README.md +++ b/net/README.md @@ -1,17 +1,27 @@ +# Test Network Management +The `./net/` directory in the monorepo contains scripts useful for creation and manipulation of a test network. +The test network allows you to run a fully isolated set of validators and clients on a configurable hardware setup. +It's intended to be both dev and CD friendly. -# Network Management -This directory contains scripts useful for working with a test network. It's -intended to be both dev and CD friendly. -### User Account Prerequisites +### Cloud account prerequisites -GCP, AWS, colo are supported. +The test networks to be created can run in GCP, AWS or colo. Whichever cloud provider you choose, you will need the credentials set up on your machine. #### GCP -First authenticate with +You will need a working `gcloud` command from google SDK, +if you do not have it follow the guide in [https://cloud.google.com/sdk?hl=en](https://cloud.google.com/sdk?hl=en) + +Before running any scripts, authenticate with ```bash $ gcloud auth login ``` +If you are running the scripts on a headless machine, you can use curl to issue requests to confirm your auth. + +If you are doing it the first time, you might need to set up project +```bash +gcloud config set project principal-lane-200702 +``` #### AWS Obtain your credentials from the AWS IAM Console and configure the AWS CLI with @@ -20,9 +30,57 @@ $ aws configure ``` More information on AWS CLI configuration can be found [here](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-quick-configuration) -### Metrics configuration (Optional) -Ensure that `$(whoami)` is the name of an InfluxDB user account with enough -access to create a new InfluxDB database. Ask mvines@ for help if needed. +## Metrics configuration (Optional) +Metrics collection relies on 2 environment variables that are patched to the remote nodes by net.sh: + * `RUST_LOG` to enable metrics reporting in principle + * `SOLANA_METRICS_CONFIG` to tell agave where to log the metrics + +### Preparation +> [!NOTE] +> Anza employees should follow the guide in notion to set up the influxDB account. + + * Ensure that `${host}` is the host name of the InfluxDB you can access, for example `https://internal-metrics.solana.com:8086` + * Ensure that `${user}` is the name of an InfluxDB user account with enough +rights to create a new InfluxDB database, for example `solana`. + +### To set up the metrics +You will normally only need to do this once. Once this is done, you will be able to save the metrics configuration and load it later from the environment. + +* Go to ./net/ in agave repo +* Run `./init-metrics.sh -c testnet-dev-${user} ${user} ` + * Script will ask for a password, it is the same one you’ve created when making a user in the InfluxDB UI + * Put the username you have used in preparation, not your login user name + * If you need to set influxDb host, edit the script +* The script will configure the database (recreating one if necessary) and append a config line in the very end of `net/config/config` file like the following: + * `export SOLANA_METRICS_CONFIG="host=${host},db=testnet-dev-${user},u=${user},p=some_secret"` + * You can store that line somewhere and append it to the config file when you need to reuse the database. + * You can also store it into your shell’s environment so you can run `./init-metrics.sh -e` to quickly load it + * Alternatively, you'll need to run `./init-metrics.sh` with appropriate arguments every time you set up a new cluster +* Assuming no errors, your influxDB setup is now done. +* For simple cases, storing `SOLANA_METRICS_CONFIG` in your env is appropriate, but you may want to use different databases for different runs of net.sh + * You can call ./init-metrics.sh before you call net.sh start, this will change the metrics config for a particular run. + * You can manually write `SOLANA_METRICS_CONFIG` in the `./net/config/config` file +* By default, metrics are only logged by agave if `RUST_LOG` is set to `info` or higher. You can provide it as environment for `./net.sh start` command, or set this in your shell environment. + ```bash + RUST_LOG="info,solana_runtime=debug" + ``` + +### To validate that your database and environment variables are set up 100% correctly + +Note: this only works if you store `SOLANA_METRICS_CONFIG` in your shell environment + +```bash + cd ./scripts/ + source ./configure-metrics.sh + INFLUX_HOST=https://internal-metrics.solana.com:8086 + INFLUX_DATABASE=testnet-dev-solana + INFLUX_USERNAME=solana + INFLUX_PASSWORD=******** + ./metrics-write-datapoint.sh "testnet-deploy net-create-begin=1" + + ``` + * All commands should complete with no errors, this indicates your influxDB config is usable + * Ensure that `RUST_LOG` is set to `info` or `debug` ## Quick Start @@ -30,17 +88,101 @@ NOTE: This example uses GCE. If you are using AWS EC2, replace `./gce.sh` with `./ec2.sh` in the commands. ```bash -$ cd net/ -$ ./gce.sh create -n 5 -c 1 #<-- Create a GCE testnet with 5 additional nodes (beyond the bootstrap node) and 1 client (billing starts here) -$ ./init-metrics.sh $(whoami) #<-- Recreate a metrics database for the testnet and configure credentials -$ ./net.sh start #<-- Deploy the network from the local workspace and start processes on all nodes including bench-tps on the client node -$ ./ssh.sh #<-- Show a help to ssh into any testnet node to access logs/etc -$ ./net.sh stop #<-- Stop running processes on all nodes -$ ./gce.sh delete #<-- Dispose of the network (billing stops here) +# In Agave repo +cd net/ + +# Create a GCE testnet with 4 additional validator nodes (beyond the bootstrap node) and 1 client (billing starts here) +./gce.sh create -n 4 -c 1 + +# Configure the metrics database and validate credentials using environment variable `SOLANA_METRICS_CONFIG` (skip this if you are not using metrics) +./init-metrics.sh -c testnet-dev-${USER} ${USER} + +# Deploy the network from the local workspace and start processes on all nodes including bench-tps on the client node +RUST_LOG=info ./net.sh start + +# Show a help to ssh into any testnet node to access logs/etc +./ssh.sh + +# Stop running processes on all nodes +./net.sh stop + +# Dispose of the network (billing stops here) +./gce.sh delete +``` + +## Full guide +* If you expect metrics to work, make sure you have configured them before proceeding +* Go to `./net/` directory in agave repo +* `./gce.sh` command controls creation and destruction of the nodes in the test net. It does not actually run any software. + * `./gce.sh create \-n 4 \-c 2` creates cluster with 4 validators and 1 node for load generation, this is minimal viable setup for all solana features to work + * If the creation succeeds, `net/config/config` will contain the config file of the testnet just created + * If you do not have `SOLANA_METRICS_CONFIG` set in your shell env, `gce.sh` may complain about metrics not being configured, this is perfectly fine + * `./gce.sh info` lists active test cluster nodes, this allows you to get their IP addresses for SSH access and/or debugging + * `./gce.sh delete` destroys the nodes (save the electricity and $$$ - destroy your test nets the moment you no longer need them). + * On GCE, if you do not delete nodes, they will self-destruct in 8 hours anyway, you can configure self-destruct timer by supplying `--self-destruct-hours=N` argument to `gce.sh` + * On other cloud platforms the testnet will not self-destruct! +* To enable metrics in the testnet, at this point you need to either: + * `./init-metrics.sh -c testnet-dev-${user} ${user}` to create a new metrics database from scratch + * Manually set `SOLANA_METRICS_CONFIG` in `./net/config/config` (which is exactly what `init-metrics.sh` does for you) + * `./init-metrics.sh -e` to load metrics config from `SOLANA_METRICS_CONFIG` into the testnet config file or +* `./net.sh` controls the payload on the testnet nodes, i.e. bootstrapping, the validators and bench-tps. In principle, you can run everything by hand, but `./net.sh` makes it easier. + * `./net.sh start` to actually run the test network. + * This will actually upload your current sources to the bootstrap host, build them there and upload the result to all the nodes + * The script will take 5-10 of minutes to run, in the end it should print something like + ``` + --- Deployment Successful + Bootstrap validator deployment took 164 seconds + `Additional validator deployment (5 validators, 0 blockstreamer nodes) took 120 seconds + Client deployment (1 instances) took 11 seconds + Network start logs in /home/sol/agave/net/log + ``` + * You can also make sure it logs successful test transfers: + ```✅ 1 lamport(s) transferred: seq=0 time= 402ms signature=33uJtPJM6ekBGrWCgWHKw1TTQJVrLxYMe3sp2PUmSRVb21LyXn3nDbQmzsgQyihE7VP2zD2iR66Du8aDUnSSd6pb``` + * `./net.sh start bench-tps=2="--tx_count 2500"` will start 2 clients with bench-tps workload sending 2500 transactions per batch. + * --tx_count argument is passed to the bench-tps program, see its manual for more options + * `./net.sh sanity` to test the deployment, it is also run by start command + * `./net.sh stop` to stop the validators and client. This does not kill the machines, so you can study the logs etc. + * `./net.sh start --nobuild` will skip the source compilation, you will generally want that if you are only changing configuration files rather than code, or just want to re-run the last test. +* To connect to the nodes: + * `./gce.sh info ` to get the public IPs + * `./ssh.sh ` to get a shell on the node + * `sudo su` will give you root access on the nodes + * Nodes run latest ubuntu LTS image +* You can also interact with the nodes using solana cli: +```bash +# source ip list use as ${validatorIpList[4]} +source net/config/config + +# airdrop +../target/release/solana -u http://${validatorIpList[1]}:8899 airdrop 1 + +# check feature +../target/release/solana -u http://${validatorIpList[1]}:8899 feature status + +# activate a feature +../target/release/solana -u http://${validatorIpList[1]}:8899 feature activate + +# check the stakes on current validators +../target/release/solana --url http://${validatorIpList[0]}:8899 validators ``` ## Tips +### Automation +You will want to have a script like this pretty much immediately to avoid making mistakes in the init process: +```bash +# Create the testnet with reasonable node sizes for a small test +./gce.sh create -n4 -c2 --custom-machine-type "--machine-type n1-standard-16" --client-machine-type "--machine-type n1-standard-4" +# Patch metrics config from env into config file +./init-metrics.sh -e +# Enable metrics and start the network (this will also build software) +RUST_LOG=info ./net.sh start -c bench-tps=2="--tx_count 25000" +``` + +### Inscrutable "nothing works everything times out state" + Note that net.sh and `gce.sh info` commands do not actually check if all the nodes are still alive in gcloud, + they just assume the config file information is correct. So if your nodes got killed/timed out they will lie to you. In such case, just use `gce.sh delete` to reset. + ### Running the network over public IP addresses By default private IP addresses are used with all instances in the same availability zone to avoid GCE network egress charges. However to run the @@ -62,6 +204,8 @@ $ ./net.sh start -t edge ``` ### Enabling CUDA +> [!NOTE] +> CUDA is currently not available on GCE First ensure the network instances are created with GPU enabled: ```bash $ ./gce.sh create -g ... diff --git a/net/init-metrics.sh b/net/init-metrics.sh index f79cd065b17cfa..93567dcdb7ec30 100755 --- a/net/init-metrics.sh +++ b/net/init-metrics.sh @@ -83,11 +83,13 @@ else query "CREATE DATABASE \"$netBasename\"" query "ALTER RETENTION POLICY autogen ON \"$netBasename\" DURATION 7d" query "GRANT READ ON \"$netBasename\" TO \"ro\"" - query "GRANT WRITE ON \"$netBasename\" TO \"scratch_writer\"" + query "GRANT WRITE ON \"$netBasename\" TO \"${username}\"" - SOLANA_METRICS_CONFIG="host=$host,db=$netBasename,u=scratch_writer,p=topsecret" + SOLANA_METRICS_CONFIG="host=$host,db=$netBasename,u=${username},p=${password}" + set +x fi +set -x echo "export SOLANA_METRICS_CONFIG=\"$SOLANA_METRICS_CONFIG\"" >> "$configFile" exit 0 diff --git a/perf/benches/dedup.rs b/perf/benches/dedup.rs index 8d2198d319b22f..3c00baa609e363 100644 --- a/perf/benches/dedup.rs +++ b/perf/benches/dedup.rs @@ -27,7 +27,7 @@ fn do_bench_dedup_packets(bencher: &mut Bencher, mut batches: Vec) let mut rng = rand::thread_rng(); let mut deduper = Deduper::<2, [u8]>::new(&mut rng, /*num_bits:*/ 63_999_979); bencher.iter(|| { - let _ans = deduper::dedup_packets_and_count_discards(&deduper, &mut batches, |_, _, _| ()); + let _ans = deduper::dedup_packets_and_count_discards(&deduper, &mut batches); deduper.maybe_reset( &mut rng, 0.001, // false_positive_rate diff --git a/perf/benches/shrink.rs b/perf/benches/shrink.rs index 4459f5f9a6b0c3..92f377548cf2d5 100644 --- a/perf/benches/shrink.rs +++ b/perf/benches/shrink.rs @@ -79,6 +79,6 @@ fn bench_shrink_count_packets(bencher: &mut Bencher) { }); bencher.iter(|| { - let _ = sigverify::count_valid_packets(&batches, |_| ()); + let _ = sigverify::count_valid_packets(&batches); }); } diff --git a/perf/src/deduper.rs b/perf/src/deduper.rs index afb60bd9419593..b25bcc93ed4449 100644 --- a/perf/src/deduper.rs +++ b/perf/src/deduper.rs @@ -1,7 +1,7 @@ //! Utility to deduplicate baches of incoming network packets. use { - crate::packet::{Packet, PacketBatch}, + crate::packet::PacketBatch, ahash::RandomState, rand::Rng, std::{ @@ -90,23 +90,18 @@ fn new_random_state(rng: &mut R) -> RandomState { pub fn dedup_packets_and_count_discards( deduper: &Deduper, batches: &mut [PacketBatch], - mut process_received_packet: impl FnMut(&mut Packet, bool, bool), ) -> u64 { batches .iter_mut() .flat_map(PacketBatch::iter_mut) .map(|packet| { - if packet.meta().discard() { - process_received_packet(packet, true, false); - } else if packet - .data(..) - .map(|data| deduper.dedup(data)) - .unwrap_or(true) + if !packet.meta().discard() + && packet + .data(..) + .map(|data| deduper.dedup(data)) + .unwrap_or(true) { packet.meta_mut().set_discard(true); - process_received_packet(packet, false, true); - } else { - process_received_packet(packet, false, false); } u64::from(packet.meta().discard()) }) @@ -118,7 +113,11 @@ pub fn dedup_packets_and_count_discards( mod tests { use { super::*, - crate::{packet::to_packet_batches, sigverify, test_tx::test_tx}, + crate::{ + packet::{to_packet_batches, Packet}, + sigverify, + test_tx::test_tx, + }, rand::SeedableRng, rand_chacha::ChaChaRng, solana_packet::{Meta, PACKET_DATA_SIZE}, @@ -134,15 +133,7 @@ mod tests { let packet_count = sigverify::count_packets_in_batches(&batches); let mut rng = rand::thread_rng(); let filter = Deduper::<2, [u8]>::new(&mut rng, /*num_bits:*/ 63_999_979); - let mut num_deduped = 0; - let discard = dedup_packets_and_count_discards( - &filter, - &mut batches, - |_deduped_packet, _removed_before_sigverify_stage, _is_dup| { - num_deduped += 1; - }, - ) as usize; - assert_eq!(num_deduped, discard + 1); + let discard = dedup_packets_and_count_discards(&filter, &mut batches) as usize; assert_eq!(packet_count, discard + 1); } @@ -151,8 +142,7 @@ mod tests { let mut rng = rand::thread_rng(); let mut filter = Deduper::<2, [u8]>::new(&mut rng, /*num_bits:*/ 63_999_979); let mut batches = to_packet_batches(&(0..1024).map(|_| test_tx()).collect::>(), 128); - let discard = - dedup_packets_and_count_discards(&filter, &mut batches, |_, _, _| ()) as usize; + let discard = dedup_packets_and_count_discards(&filter, &mut batches) as usize; // because dedup uses a threadpool, there maybe up to N threads of txs that go through assert_eq!(discard, 0); assert!(!filter.maybe_reset( @@ -182,8 +172,7 @@ mod tests { for i in 0..1000 { let mut batches = to_packet_batches(&(0..1000).map(|_| test_tx()).collect::>(), 128); - discard += - dedup_packets_and_count_discards(&filter, &mut batches, |_, _, _| ()) as usize; + discard += dedup_packets_and_count_discards(&filter, &mut batches) as usize; trace!("{} {}", i, discard); if filter.popcount.load(Ordering::Relaxed) > capacity { break; @@ -206,8 +195,7 @@ mod tests { for i in 0..10 { let mut batches = to_packet_batches(&(0..1024).map(|_| test_tx()).collect::>(), 128); - discard += - dedup_packets_and_count_discards(&filter, &mut batches, |_, _, _| ()) as usize; + discard += dedup_packets_and_count_discards(&filter, &mut batches) as usize; debug!("false positive rate: {}/{}", discard, i * 1024); } //allow for 1 false positive even if extremely unlikely diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index 5394d0462b263f..e07da9bcecf732 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -20,13 +20,6 @@ use { std::{convert::TryFrom, mem::size_of}, }; -// Representing key tKeYE4wtowRb8yRroZShTipE18YVnqwXjsSAoNsFU6g -const TRACER_KEY_BYTES: [u8; 32] = [ - 13, 37, 180, 170, 252, 137, 36, 194, 183, 143, 161, 193, 201, 207, 211, 23, 189, 93, 33, 110, - 155, 90, 30, 39, 116, 115, 238, 38, 126, 21, 232, 133, -]; -const TRACER_KEY: Pubkey = Pubkey::new_from_array(TRACER_KEY_BYTES); -const TRACER_KEY_OFFSET_IN_TRANSACTION: usize = 69; // Empirically derived to constrain max verify latency to ~8ms at lower packet counts pub const VERIFY_PACKET_CHUNK_SIZE: usize = 128; @@ -153,24 +146,10 @@ pub fn count_packets_in_batches(batches: &[PacketBatch]) -> usize { batches.iter().map(|batch| batch.len()).sum() } -pub fn count_valid_packets( - batches: &[PacketBatch], - mut process_valid_packet: impl FnMut(&Packet), -) -> usize { +pub fn count_valid_packets(batches: &[PacketBatch]) -> usize { batches .iter() - .map(|batch| { - batch - .iter() - .filter(|p| { - let should_keep = !p.meta().discard(); - if should_keep { - process_valid_packet(p); - } - should_keep - }) - .count() - }) + .map(|batch| batch.iter().filter(|p| !p.meta().discard()).count()) .sum() } @@ -308,21 +287,6 @@ fn do_get_packet_offsets( )) } -pub fn check_for_tracer_packet(packet: &mut Packet) -> bool { - let first_pubkey_start: usize = TRACER_KEY_OFFSET_IN_TRANSACTION; - let Some(first_pubkey_end) = first_pubkey_start.checked_add(size_of::()) else { - return false; - }; - // Check for tracer pubkey - match packet.data(first_pubkey_start..first_pubkey_end) { - Some(pubkey) if pubkey == TRACER_KEY.as_ref() => { - packet.meta_mut().set_tracer(true); - true - } - _ => false, - } -} - fn get_packet_offsets( packet: &mut Packet, current_offset: usize, @@ -1484,7 +1448,7 @@ mod tests { }); start.sort_by(|a, b| a.data(..).cmp(&b.data(..))); - let packet_count = count_valid_packets(&batches, |_| ()); + let packet_count = count_valid_packets(&batches); shrink_batches(&mut batches); //make sure all the non discarded packets are the same @@ -1495,7 +1459,7 @@ mod tests { .for_each(|p| end.push(p.clone())) }); end.sort_by(|a, b| a.data(..).cmp(&b.data(..))); - let packet_count2 = count_valid_packets(&batches, |_| ()); + let packet_count2 = count_valid_packets(&batches); assert_eq!(packet_count, packet_count2); assert_eq!(start, end); } @@ -1659,13 +1623,13 @@ mod tests { PACKETS_PER_BATCH, ); assert_eq!(batches.len(), BATCH_COUNT); - assert_eq!(count_valid_packets(&batches, |_| ()), PACKET_COUNT); + assert_eq!(count_valid_packets(&batches), PACKET_COUNT); batches.iter_mut().enumerate().for_each(|(i, b)| { b.iter_mut() .enumerate() .for_each(|(j, p)| p.meta_mut().set_discard(set_discard(i, j))) }); - assert_eq!(count_valid_packets(&batches, |_| ()), *expect_valid_packets); + assert_eq!(count_valid_packets(&batches), *expect_valid_packets); debug!("show valid packets for case {}", i); batches.iter_mut().enumerate().for_each(|(i, b)| { b.iter_mut().enumerate().for_each(|(j, p)| { @@ -1679,7 +1643,7 @@ mod tests { let shrunken_batch_count = batches.len(); debug!("shrunk batch test {} count: {}", i, shrunken_batch_count); assert_eq!(shrunken_batch_count, *expect_batch_count); - assert_eq!(count_valid_packets(&batches, |_| ()), *expect_valid_packets); + assert_eq!(count_valid_packets(&batches), *expect_valid_packets); } } } diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index 0cb6a7207b6af8..ca323514669ba6 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -14,7 +14,6 @@ base64 = { workspace = true } bincode = { workspace = true } enum-iterator = { workspace = true } itertools = { workspace = true } -libc = { workspace = true } log = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } @@ -42,6 +41,7 @@ solana-metrics = { workspace = true } solana-precompiles = { workspace = true } solana-pubkey = { workspace = true } solana-rent = { workspace = true } +solana-sbpf = { workspace = true } solana-sdk-ids = { workspace = true } solana-slot-hashes = { workspace = true } solana-stable-layout = { workspace = true } @@ -50,13 +50,11 @@ solana-sysvar-id = { workspace = true } solana-timings = { workspace = true } solana-transaction-context = { workspace = true } solana-type-overrides = { workspace = true } -solana_rbpf = { workspace = true } thiserror = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } solana-instruction = { workspace = true, features = ["bincode"] } -solana-logger = { workspace = true } solana-pubkey = { workspace = true, features = ["rand"] } solana-transaction-context = { workspace = true, features = [ "dev-context-only-utils", @@ -78,7 +76,7 @@ frozen-abi = [ ] shuttle-test = [ "solana-type-overrides/shuttle-test", - "solana_rbpf/shuttle-test", + "solana-sbpf/shuttle-test", ] [lints] diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index db6486a6194dd6..6f11dd058c592d 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -21,7 +21,7 @@ use { solana_measure::measure::Measure, solana_precompiles::Precompile, solana_pubkey::Pubkey, - solana_rbpf::{ + solana_sbpf::{ ebpf::MM_HEAP_START, error::{EbpfError, ProgramResult}, memory_region::MemoryMapping, @@ -49,7 +49,7 @@ pub type BuiltinFunctionWithContext = BuiltinFunction>; #[macro_export] macro_rules! declare_process_instruction { ($process_instruction:ident, $cu_to_consume:expr, |$invoke_context:ident| $inner:tt) => { - $crate::solana_rbpf::declare_builtin_function!( + $crate::solana_sbpf::declare_builtin_function!( $process_instruction, fn rust( invoke_context: &mut $crate::invoke_context::InvokeContext, @@ -58,7 +58,7 @@ macro_rules! declare_process_instruction { _arg2: u64, _arg3: u64, _arg4: u64, - _memory_mapping: &mut $crate::solana_rbpf::memory_region::MemoryMapping, + _memory_mapping: &mut $crate::solana_sbpf::memory_region::MemoryMapping, ) -> std::result::Result> { fn process_instruction_inner( $invoke_context: &mut $crate::invoke_context::InvokeContext, @@ -536,7 +536,7 @@ impl<'a> InvokeContext<'a> { .ok_or(InstructionError::UnsupportedProgramId)?; let function = match &entry.program { ProgramCacheEntryType::Builtin(program) => program - .get_function_registry() + .get_function_registry(SBPFVersion::V0) .lookup_by_key(ENTRYPOINT_KEY) .map(|(_name, function)| function), _ => None, @@ -555,13 +555,13 @@ impl<'a> InvokeContext<'a> { // For now, only built-ins are invoked from here, so the VM and its Config are irrelevant. let mock_config = Config::default(); let empty_memory_mapping = - MemoryMapping::new(Vec::new(), &mock_config, &SBPFVersion::V1).unwrap(); + MemoryMapping::new(Vec::new(), &mock_config, SBPFVersion::V0).unwrap(); let mut vm = EbpfVm::new( self.program_cache_for_tx_batch .environments .program_runtime_v2 .clone(), - &SBPFVersion::V1, + SBPFVersion::V0, // Removes lifetime tracking unsafe { std::mem::transmute::<&mut InvokeContext, &mut InvokeContext>(self) }, empty_memory_mapping, diff --git a/program-runtime/src/lib.rs b/program-runtime/src/lib.rs index 974ecee64f94d4..3adf1ae222a6b7 100644 --- a/program-runtime/src/lib.rs +++ b/program-runtime/src/lib.rs @@ -5,7 +5,7 @@ #[macro_use] extern crate solana_metrics; -pub use solana_rbpf; +pub use solana_sbpf; pub mod invoke_context; pub mod loaded_programs; pub mod mem_pool; diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 9e11477ea897bb..20399d6f1f34b0 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -5,7 +5,7 @@ use { solana_clock::{Epoch, Slot}, solana_measure::measure::Measure, solana_pubkey::Pubkey, - solana_rbpf::{ + solana_sbpf::{ elf::Executable, program::{BuiltinProgram, FunctionRegistry}, verifier::RequisiteVerifier, @@ -1370,7 +1370,7 @@ mod tests { percentage::Percentage, solana_clock::Slot, solana_pubkey::Pubkey, - solana_rbpf::{elf::Executable, program::BuiltinProgram}, + solana_sbpf::{elf::Executable, program::BuiltinProgram}, std::{ fs::File, io::Read, diff --git a/program-runtime/src/mem_pool.rs b/program-runtime/src/mem_pool.rs index a92ec1603ddb27..0c39557605bfe6 100644 --- a/program-runtime/src/mem_pool.rs +++ b/program-runtime/src/mem_pool.rs @@ -3,7 +3,7 @@ use { compute_budget::{MAX_CALL_DEPTH, MAX_INSTRUCTION_STACK_DEPTH, STACK_FRAME_SIZE}, compute_budget_limits::{MAX_HEAP_FRAME_BYTES, MIN_HEAP_FRAME_BYTES}, }, - solana_rbpf::{aligned_memory::AlignedMemory, ebpf::HOST_ALIGN}, + solana_sbpf::{aligned_memory::AlignedMemory, ebpf::HOST_ALIGN}, std::array, }; diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index d3df42d38abe44..3eb57298f8e281 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -30,11 +30,11 @@ solana-log-collector = { workspace = true } solana-logger = { workspace = true } solana-program-runtime = { workspace = true } solana-runtime = { workspace = true } +solana-sbpf = { workspace = true } solana-sdk = { workspace = true } solana-svm = { workspace = true } solana-timings = { workspace = true } solana-vote-program = { workspace = true } -solana_rbpf = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 1115330888d8a4..c3d0d9f5d00dd1 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -69,7 +69,7 @@ pub use { solana_banks_client::{BanksClient, BanksClientError}, solana_banks_interface::BanksTransactionResultWithMetadata, solana_program_runtime::invoke_context::InvokeContext, - solana_rbpf::{ + solana_sbpf::{ error::EbpfError, vm::{get_runtime_environment_key, EbpfVm}, }, @@ -497,7 +497,7 @@ impl Default for ProgramTest { /// fn default() -> Self { solana_logger::setup_with_default( - "solana_rbpf::vm=debug,\ + "solana_sbpf::vm=debug,\ solana_runtime::message_processor=debug,\ solana_runtime::system_instruction_processor=trace,\ solana_program_test=info", diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index 3c62ccc69fcfa1..fc99553d3fefe4 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -24,10 +24,10 @@ solana-measure = { workspace = true } solana-poseidon = { workspace = true } solana-program-memory = { workspace = true } solana-program-runtime = { workspace = true } +solana-sbpf = { workspace = true } solana-sdk = { workspace = true } solana-timings = { workspace = true } solana-type-overrides = { workspace = true } -solana_rbpf = { workspace = true } thiserror = { workspace = true } [dev-dependencies] @@ -48,5 +48,5 @@ targets = ["x86_64-unknown-linux-gnu"] shuttle-test = [ "solana-type-overrides/shuttle-test", "solana-program-runtime/shuttle-test", - "solana_rbpf/shuttle-test" + "solana-sbpf/shuttle-test" ] diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 7dbc8cdb3e1ec9..5b65d610afe18d 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -22,7 +22,7 @@ use { stable_log, sysvar_cache::get_sysvar_with_account_check, }, - solana_rbpf::{ + solana_sbpf::{ declare_builtin_function, ebpf::{self, MM_HEAP_START}, elf::Executable, @@ -326,16 +326,16 @@ macro_rules! create_vm { macro_rules! mock_create_vm { ($vm:ident, $additional_regions:expr, $accounts_metadata:expr, $invoke_context:expr $(,)?) => { let loader = solana_type_overrides::sync::Arc::new(BuiltinProgram::new_mock()); - let function_registry = solana_rbpf::program::FunctionRegistry::default(); - let executable = solana_rbpf::elf::Executable::::from_text_bytes( - &[0x95, 0, 0, 0, 0, 0, 0, 0], + let function_registry = solana_sbpf::program::FunctionRegistry::default(); + let executable = solana_sbpf::elf::Executable::::from_text_bytes( + &[0x9D, 0, 0, 0, 0, 0, 0, 0], loader, - SBPFVersion::V2, + SBPFVersion::V3, function_registry, ) .unwrap(); executable - .verify::() + .verify::() .unwrap(); $crate::create_vm!( $vm, @@ -1471,7 +1471,7 @@ pub fn execute<'a, 'b: 'a>( ProgramResult::Err(mut error) => { if invoke_context .get_feature_set() - .is_active(&solana_feature_set::apply_cost_tracker_during_replay::id()) + .is_active(&solana_feature_set::deplete_cu_meter_on_vm_failure::id()) && !matches!(error, EbpfError::SyscallError(_)) { // when an exception is thrown during the execution of a @@ -1696,7 +1696,7 @@ mod tests { let loader_id = bpf_loader::id(); let program_id = Pubkey::new_unique(); let program_account = - load_program_account_from_elf(&loader_id, "test_elfs/out/noop_aligned.so"); + load_program_account_from_elf(&loader_id, "test_elfs/out/sbpfv3_return_ok.so"); let parameter_id = Pubkey::new_unique(); let parameter_account = AccountSharedData::new(1, 0, &loader_id); let parameter_meta = AccountMeta { @@ -2184,10 +2184,10 @@ mod tests { #[test] fn test_bpf_loader_upgradeable_upgrade() { - let mut file = File::open("test_elfs/out/noop_aligned.so").expect("file open failed"); + let mut file = File::open("test_elfs/out/sbpfv3_return_ok.so").expect("file open failed"); let mut elf_orig = Vec::new(); file.read_to_end(&mut elf_orig).unwrap(); - let mut file = File::open("test_elfs/out/noop_unaligned.so").expect("file open failed"); + let mut file = File::open("test_elfs/out/sbpfv3_return_err.so").expect("file open failed"); let mut elf_new = Vec::new(); file.read_to_end(&mut elf_new).unwrap(); assert_ne!(elf_orig.len(), elf_new.len()); @@ -3783,7 +3783,7 @@ mod tests { let program_id = Pubkey::new_unique(); // Create program account - let mut file = File::open("test_elfs/out/noop_aligned.so").expect("file open failed"); + let mut file = File::open("test_elfs/out/sbpfv3_return_ok.so").expect("file open failed"); let mut elf = Vec::new(); file.read_to_end(&mut elf).unwrap(); @@ -3833,7 +3833,7 @@ mod tests { invoke_context: &mut InvokeContext, program_id: Pubkey, ) -> Result<(), InstructionError> { - let mut file = File::open("test_elfs/out/noop_unaligned.so").expect("file open failed"); + let mut file = File::open("test_elfs/out/sbpfv3_return_ok.so").expect("file open failed"); let mut elf = Vec::new(); file.read_to_end(&mut elf).unwrap(); deploy_program!( diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index 15c5820dc23924..e04fd730aa7c80 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -3,7 +3,7 @@ use { byteorder::{ByteOrder, LittleEndian}, solana_program_runtime::invoke_context::SerializedAccountMetadata, - solana_rbpf::{ + solana_sbpf::{ aligned_memory::{AlignedMemory, Pod}, ebpf::{HOST_ALIGN, MM_INPUT_START}, memory_region::{MemoryRegion, MemoryState}, diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index bb0179da75d14c..8492363d2f6808 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -5,7 +5,7 @@ use { solana_feature_set::{self as feature_set, enable_bpf_loader_set_authority_checked_ix}, solana_measure::measure::Measure, solana_program_runtime::invoke_context::SerializedAccountMetadata, - solana_rbpf::{ + solana_sbpf::{ ebpf, memory_region::{MemoryRegion, MemoryState}, }, @@ -1609,7 +1609,7 @@ mod tests { solana_program_runtime::{ invoke_context::SerializedAccountMetadata, with_mock_invoke_context, }, - solana_rbpf::{ + solana_sbpf::{ ebpf::MM_INPUT_START, memory_region::MemoryRegion, program::SBPFVersion, vm::Config, }, solana_sdk::{ @@ -1710,7 +1710,7 @@ mod tests { aligned_memory_mapping: false, ..Config::default() }; - let memory_mapping = MemoryMapping::new(vec![region], &config, &SBPFVersion::V2).unwrap(); + let memory_mapping = MemoryMapping::new(vec![region], &config, SBPFVersion::V3).unwrap(); let ins = SyscallInvokeSignedRust::translate_instruction( vm_addr, @@ -1746,7 +1746,7 @@ mod tests { aligned_memory_mapping: false, ..Config::default() }; - let memory_mapping = MemoryMapping::new(vec![region], &config, &SBPFVersion::V2).unwrap(); + let memory_mapping = MemoryMapping::new(vec![region], &config, SBPFVersion::V3).unwrap(); let signers = SyscallInvokeSignedRust::translate_signers( &program_id, @@ -1782,7 +1782,7 @@ mod tests { aligned_memory_mapping: false, ..Config::default() }; - let memory_mapping = MemoryMapping::new(vec![region], &config, &SBPFVersion::V2).unwrap(); + let memory_mapping = MemoryMapping::new(vec![region], &config, SBPFVersion::V3).unwrap(); let account_info = translate_type::(&memory_mapping, vm_addr, false).unwrap(); @@ -1832,7 +1832,7 @@ mod tests { let memory_mapping = MemoryMapping::new( mock_caller_account.regions.split_off(0), &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -1890,7 +1890,7 @@ mod tests { let memory_mapping = MemoryMapping::new( mock_caller_account.regions.split_off(0), &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2018,7 +2018,7 @@ mod tests { let memory_mapping = MemoryMapping::new( mock_caller_account.regions.split_off(0), &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2193,7 +2193,7 @@ mod tests { let memory_mapping = MemoryMapping::new( mock_caller_account.regions.split_off(0), &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2263,7 +2263,7 @@ mod tests { let memory_mapping = MemoryMapping::new( mock_caller_account.regions.split_off(0), &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2319,7 +2319,7 @@ mod tests { let memory_mapping = MemoryMapping::new( mock_caller_account.regions.split_off(0), &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2392,7 +2392,7 @@ mod tests { let memory_mapping = MemoryMapping::new( mock_caller_account.regions.split_off(0), &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2480,7 +2480,7 @@ mod tests { let memory_mapping = MemoryMapping::new( mock_caller_account.regions.split_off(0), &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2558,7 +2558,7 @@ mod tests { aligned_memory_mapping: false, ..Config::default() }; - let memory_mapping = MemoryMapping::new(vec![region], &config, &SBPFVersion::V2).unwrap(); + let memory_mapping = MemoryMapping::new(vec![region], &config, SBPFVersion::V3).unwrap(); mock_invoke_context!( invoke_context, diff --git a/programs/bpf_loader/src/syscalls/logging.rs b/programs/bpf_loader/src/syscalls/logging.rs index 522145b1d71408..83dcee9055cfcf 100644 --- a/programs/bpf_loader/src/syscalls/logging.rs +++ b/programs/bpf_loader/src/syscalls/logging.rs @@ -1,4 +1,4 @@ -use {super::*, solana_rbpf::vm::ContextObject}; +use {super::*, solana_sbpf::vm::ContextObject}; declare_builtin_function!( /// Log a user's info message diff --git a/programs/bpf_loader/src/syscalls/mem_ops.rs b/programs/bpf_loader/src/syscalls/mem_ops.rs index e6db37ccaa29bf..254828e3c5e2db 100644 --- a/programs/bpf_loader/src/syscalls/mem_ops.rs +++ b/programs/bpf_loader/src/syscalls/mem_ops.rs @@ -1,7 +1,7 @@ use { super::*, solana_program_runtime::invoke_context::SerializedAccountMetadata, - solana_rbpf::{error::EbpfError, memory_region::MemoryRegion}, + solana_sbpf::{error::EbpfError, memory_region::MemoryRegion}, std::slice, }; @@ -573,7 +573,7 @@ mod tests { use { super::*, assert_matches::assert_matches, - solana_rbpf::{ebpf::MM_PROGRAM_START, program::SBPFVersion}, + solana_sbpf::{ebpf::MM_RODATA_START, program::SBPFVersion}, test_case::test_case, }; @@ -591,7 +591,7 @@ mod tests { aligned_memory_mapping: false, ..Config::default() }; - let memory_mapping = MemoryMapping::new(vec![], &config, &SBPFVersion::V2).unwrap(); + let memory_mapping = MemoryMapping::new(vec![], &config, SBPFVersion::V3).unwrap(); let mut src_chunk_iter = MemoryChunkIterator::new(&memory_mapping, &[], AccessType::Load, 0, 1).unwrap(); @@ -605,7 +605,7 @@ mod tests { aligned_memory_mapping: false, ..Config::default() }; - let memory_mapping = MemoryMapping::new(vec![], &config, &SBPFVersion::V2).unwrap(); + let memory_mapping = MemoryMapping::new(vec![], &config, SBPFVersion::V3).unwrap(); let mut src_chunk_iter = MemoryChunkIterator::new(&memory_mapping, &[], AccessType::Load, u64::MAX, 1).unwrap(); @@ -620,9 +620,9 @@ mod tests { }; let mem1 = vec![0xFF; 42]; let memory_mapping = MemoryMapping::new( - vec![MemoryRegion::new_readonly(&mem1, MM_PROGRAM_START)], + vec![MemoryRegion::new_readonly(&mem1, MM_RODATA_START)], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -631,34 +631,34 @@ mod tests { &memory_mapping, &[], AccessType::Load, - MM_PROGRAM_START - 1, + MM_RODATA_START - 1, 42, ) .unwrap(); assert_matches!( src_chunk_iter.next().unwrap().unwrap_err().downcast_ref().unwrap(), - EbpfError::AccessViolation(AccessType::Load, addr, 42, "unknown") if *addr == MM_PROGRAM_START - 1 + EbpfError::AccessViolation(AccessType::Load, addr, 42, "unknown") if *addr == MM_RODATA_START - 1 ); // check oob at the upper bound. Since the memory mapping isn't empty, // this always happens on the second next(). let mut src_chunk_iter = - MemoryChunkIterator::new(&memory_mapping, &[], AccessType::Load, MM_PROGRAM_START, 43) + MemoryChunkIterator::new(&memory_mapping, &[], AccessType::Load, MM_RODATA_START, 43) .unwrap(); assert!(src_chunk_iter.next().unwrap().is_ok()); assert_matches!( src_chunk_iter.next().unwrap().unwrap_err().downcast_ref().unwrap(), - EbpfError::AccessViolation(AccessType::Load, addr, 43, "program") if *addr == MM_PROGRAM_START + EbpfError::AccessViolation(AccessType::Load, addr, 43, "program") if *addr == MM_RODATA_START ); // check oob at the upper bound on the first next_back() let mut src_chunk_iter = - MemoryChunkIterator::new(&memory_mapping, &[], AccessType::Load, MM_PROGRAM_START, 43) + MemoryChunkIterator::new(&memory_mapping, &[], AccessType::Load, MM_RODATA_START, 43) .unwrap() .rev(); assert_matches!( src_chunk_iter.next().unwrap().unwrap_err().downcast_ref().unwrap(), - EbpfError::AccessViolation(AccessType::Load, addr, 43, "program") if *addr == MM_PROGRAM_START + EbpfError::AccessViolation(AccessType::Load, addr, 43, "program") if *addr == MM_RODATA_START ); // check oob at the upper bound on the 2nd next_back() @@ -666,7 +666,7 @@ mod tests { &memory_mapping, &[], AccessType::Load, - MM_PROGRAM_START - 1, + MM_RODATA_START - 1, 43, ) .unwrap() @@ -674,7 +674,7 @@ mod tests { assert!(src_chunk_iter.next().unwrap().is_ok()); assert_matches!( src_chunk_iter.next().unwrap().unwrap_err().downcast_ref().unwrap(), - EbpfError::AccessViolation(AccessType::Load, addr, 43, "unknown") if *addr == MM_PROGRAM_START - 1 + EbpfError::AccessViolation(AccessType::Load, addr, 43, "unknown") if *addr == MM_RODATA_START - 1 ); } @@ -686,9 +686,9 @@ mod tests { }; let mem1 = vec![0xFF; 42]; let memory_mapping = MemoryMapping::new( - vec![MemoryRegion::new_readonly(&mem1, MM_PROGRAM_START)], + vec![MemoryRegion::new_readonly(&mem1, MM_RODATA_START)], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -697,7 +697,7 @@ mod tests { &memory_mapping, &[], AccessType::Load, - MM_PROGRAM_START - 1, + MM_RODATA_START - 1, 1, ) .unwrap(); @@ -708,18 +708,18 @@ mod tests { &memory_mapping, &[], AccessType::Load, - MM_PROGRAM_START + 42, + MM_RODATA_START + 42, 1, ) .unwrap(); assert!(src_chunk_iter.next().unwrap().is_err()); for (vm_addr, len) in [ - (MM_PROGRAM_START, 0), - (MM_PROGRAM_START + 42, 0), - (MM_PROGRAM_START, 1), - (MM_PROGRAM_START, 42), - (MM_PROGRAM_START + 41, 1), + (MM_RODATA_START, 0), + (MM_RODATA_START + 42, 0), + (MM_RODATA_START, 1), + (MM_RODATA_START, 42), + (MM_RODATA_START + 41, 1), ] { for rev in [true, false] { let iter = @@ -749,22 +749,22 @@ mod tests { let mem2 = vec![0x22; 4]; let memory_mapping = MemoryMapping::new( vec![ - MemoryRegion::new_readonly(&mem1, MM_PROGRAM_START), - MemoryRegion::new_readonly(&mem2, MM_PROGRAM_START + 8), + MemoryRegion::new_readonly(&mem1, MM_RODATA_START), + MemoryRegion::new_readonly(&mem2, MM_RODATA_START + 8), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); for (vm_addr, len, mut expected) in [ - (MM_PROGRAM_START, 8, vec![(MM_PROGRAM_START, 8)]), + (MM_RODATA_START, 8, vec![(MM_RODATA_START, 8)]), ( - MM_PROGRAM_START + 7, + MM_RODATA_START + 7, 2, - vec![(MM_PROGRAM_START + 7, 1), (MM_PROGRAM_START + 8, 1)], + vec![(MM_RODATA_START + 7, 1), (MM_RODATA_START + 8, 1)], ), - (MM_PROGRAM_START + 8, 4, vec![(MM_PROGRAM_START + 8, 4)]), + (MM_RODATA_START + 8, 4, vec![(MM_RODATA_START + 8, 4)]), ] { for rev in [false, true] { let iter = @@ -792,11 +792,11 @@ mod tests { let mem2 = vec![0x22; 4]; let memory_mapping = MemoryMapping::new( vec![ - MemoryRegion::new_readonly(&mem1, MM_PROGRAM_START), - MemoryRegion::new_readonly(&mem2, MM_PROGRAM_START + 8), + MemoryRegion::new_readonly(&mem1, MM_RODATA_START), + MemoryRegion::new_readonly(&mem2, MM_RODATA_START + 8), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -804,32 +804,32 @@ mod tests { assert_matches!( iter_memory_pair_chunks( AccessType::Load, - MM_PROGRAM_START, + MM_RODATA_START, AccessType::Load, - MM_PROGRAM_START + 8, + MM_RODATA_START + 8, 8, &[], &memory_mapping, false, |_src, _dst, _len| Ok::<_, Error>(0), ).unwrap_err().downcast_ref().unwrap(), - EbpfError::AccessViolation(AccessType::Load, addr, 8, "program") if *addr == MM_PROGRAM_START + 8 + EbpfError::AccessViolation(AccessType::Load, addr, 8, "program") if *addr == MM_RODATA_START + 8 ); // src is shorter than dst assert_matches!( iter_memory_pair_chunks( AccessType::Load, - MM_PROGRAM_START + 10, + MM_RODATA_START + 10, AccessType::Load, - MM_PROGRAM_START + 2, + MM_RODATA_START + 2, 3, &[], &memory_mapping, false, |_src, _dst, _len| Ok::<_, Error>(0), ).unwrap_err().downcast_ref().unwrap(), - EbpfError::AccessViolation(AccessType::Load, addr, 3, "program") if *addr == MM_PROGRAM_START + 10 + EbpfError::AccessViolation(AccessType::Load, addr, 3, "program") if *addr == MM_RODATA_START + 10 ); } @@ -844,17 +844,17 @@ mod tests { let mem2 = vec![0x22; 4]; let memory_mapping = MemoryMapping::new( vec![ - MemoryRegion::new_readonly(&mem1, MM_PROGRAM_START), - MemoryRegion::new_readonly(&mem2, MM_PROGRAM_START + 8), + MemoryRegion::new_readonly(&mem1, MM_RODATA_START), + MemoryRegion::new_readonly(&mem2, MM_RODATA_START + 8), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); memmove_non_contiguous( - MM_PROGRAM_START, - MM_PROGRAM_START + 8, + MM_RODATA_START, + MM_RODATA_START + 8, 4, &[], &memory_mapping, @@ -903,8 +903,8 @@ mod tests { // do our memmove memmove_non_contiguous( - MM_PROGRAM_START + dst_offset as u64, - MM_PROGRAM_START + src_offset as u64, + MM_RODATA_START + dst_offset as u64, + MM_RODATA_START + src_offset as u64, len as u64, &[], &memory_mapping, @@ -929,16 +929,16 @@ mod tests { let mem2 = vec![0x22; 4]; let memory_mapping = MemoryMapping::new( vec![ - MemoryRegion::new_writable(&mut mem1, MM_PROGRAM_START), - MemoryRegion::new_readonly(&mem2, MM_PROGRAM_START + 8), + MemoryRegion::new_writable(&mut mem1, MM_RODATA_START), + MemoryRegion::new_readonly(&mem2, MM_RODATA_START + 8), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); assert_eq!( - memset_non_contiguous(MM_PROGRAM_START, 0x33, 9, &[], &memory_mapping).unwrap(), + memset_non_contiguous(MM_RODATA_START, 0x33, 9, &[], &memory_mapping).unwrap(), 0 ); } @@ -955,18 +955,18 @@ mod tests { let mut mem4 = vec![0x44; 4]; let memory_mapping = MemoryMapping::new( vec![ - MemoryRegion::new_readonly(&mem1, MM_PROGRAM_START), - MemoryRegion::new_writable(&mut mem2, MM_PROGRAM_START + 1), - MemoryRegion::new_writable(&mut mem3, MM_PROGRAM_START + 3), - MemoryRegion::new_writable(&mut mem4, MM_PROGRAM_START + 6), + MemoryRegion::new_readonly(&mem1, MM_RODATA_START), + MemoryRegion::new_writable(&mut mem2, MM_RODATA_START + 1), + MemoryRegion::new_writable(&mut mem3, MM_RODATA_START + 3), + MemoryRegion::new_writable(&mut mem4, MM_RODATA_START + 6), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); assert_eq!( - memset_non_contiguous(MM_PROGRAM_START + 1, 0x55, 7, &[], &memory_mapping).unwrap(), + memset_non_contiguous(MM_RODATA_START + 1, 0x55, 7, &[], &memory_mapping).unwrap(), 0 ); assert_eq!(&mem1, &[0x11]); @@ -986,20 +986,20 @@ mod tests { let mem3 = b"foobarbad".to_vec(); let memory_mapping = MemoryMapping::new( vec![ - MemoryRegion::new_readonly(&mem1, MM_PROGRAM_START), - MemoryRegion::new_readonly(&mem2, MM_PROGRAM_START + 3), - MemoryRegion::new_readonly(&mem3, MM_PROGRAM_START + 9), + MemoryRegion::new_readonly(&mem1, MM_RODATA_START), + MemoryRegion::new_readonly(&mem2, MM_RODATA_START + 3), + MemoryRegion::new_readonly(&mem3, MM_RODATA_START + 9), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); // non contiguous src assert_eq!( memcmp_non_contiguous( - MM_PROGRAM_START, - MM_PROGRAM_START + 9, + MM_RODATA_START, + MM_RODATA_START + 9, 9, &[], &memory_mapping @@ -1011,8 +1011,8 @@ mod tests { // non contiguous dst assert_eq!( memcmp_non_contiguous( - MM_PROGRAM_START + 10, - MM_PROGRAM_START + 1, + MM_RODATA_START + 10, + MM_RODATA_START + 1, 8, &[], &memory_mapping @@ -1024,8 +1024,8 @@ mod tests { // diff assert_eq!( memcmp_non_contiguous( - MM_PROGRAM_START + 1, - MM_PROGRAM_START + 11, + MM_RODATA_START + 1, + MM_RODATA_START + 11, 5, &[], &memory_mapping @@ -1050,12 +1050,12 @@ mod tests { ); regs.push(MemoryRegion::new_writable( &mut mem[i], - MM_PROGRAM_START + offset as u64, + MM_RODATA_START + offset as u64, )); offset += *region_len; } - let memory_mapping = MemoryMapping::new(regs, config, &SBPFVersion::V2).unwrap(); + let memory_mapping = MemoryMapping::new(regs, config, SBPFVersion::V3).unwrap(); (mem, memory_mapping) } diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 3438c819c74bbe..c1f60fb67e2894 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -21,21 +21,22 @@ use { solana_feature_set::{ self as feature_set, abort_on_invalid_curve, blake3_syscall_enabled, bpf_account_data_direct_mapping, curve25519_syscall_enabled, - disable_deploy_of_alloc_free_syscall, disable_fees_sysvar, disable_sbpf_v1_execution, + disable_deploy_of_alloc_free_syscall, disable_fees_sysvar, disable_sbpf_v0_execution, enable_alt_bn128_compression_syscall, enable_alt_bn128_syscall, enable_big_mod_exp_syscall, enable_get_epoch_stake_syscall, enable_partitioned_epoch_reward, enable_poseidon_syscall, - get_sysvar_syscall_enabled, last_restart_slot_sysvar, - partitioned_epoch_rewards_superfeature, reenable_sbpf_v1_execution, - remaining_compute_units_syscall_enabled, FeatureSet, + enable_sbpf_v1_deployment_and_execution, enable_sbpf_v2_deployment_and_execution, + enable_sbpf_v3_deployment_and_execution, get_sysvar_syscall_enabled, + last_restart_slot_sysvar, partitioned_epoch_rewards_superfeature, + reenable_sbpf_v0_execution, remaining_compute_units_syscall_enabled, FeatureSet, }, solana_log_collector::{ic_logger_msg, ic_msg}, solana_poseidon as poseidon, solana_program_memory::is_nonoverlapping, solana_program_runtime::{invoke_context::InvokeContext, stable_log}, - solana_rbpf::{ + solana_sbpf::{ declare_builtin_function, memory_region::{AccessType, MemoryMapping}, - program::{BuiltinFunction, BuiltinProgram, FunctionRegistry}, + program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, vm::Config, }, solana_sdk::{ @@ -232,11 +233,11 @@ fn consume_compute_meter(invoke_context: &InvokeContext, amount: u64) -> Result< } macro_rules! register_feature_gated_function { - ($result:expr, $is_feature_active:expr, $name:expr, $call:expr $(,)?) => { + ($result:expr, $is_feature_active:expr, $name:expr, $key:expr, $call:expr $(,)?) => { if $is_feature_active { - $result.register_function_hashed($name, $call) + $result.register_function($name, $key, $call) } else { - Ok(0) + Ok(()) } }; } @@ -244,19 +245,23 @@ macro_rules! register_feature_gated_function { pub fn morph_into_deployment_environment_v1( from: Arc>, ) -> Result, Error> { - let mut config = *from.get_config(); + let mut config = from.get_config().clone(); config.reject_broken_elfs = true; + // Once the tests are being build using a toolchain which supports the newer SBPF versions, + // the deployment of older versions will be disabled: + // config.enabled_sbpf_versions = + // *config.enabled_sbpf_versions.end()..=*config.enabled_sbpf_versions.end(); - let mut result = FunctionRegistry::>::default(); + let mut result = BuiltinProgram::new_loader_with_dense_registration(config); - for (key, (name, value)) in from.get_function_registry().iter() { + for (key, (name, value)) in from.get_function_registry(SBPFVersion::V3).iter() { // Deployment of programs with sol_alloc_free is disabled. So do not register the syscall. if name != *b"sol_alloc_free_" { - result.register_function(key, name, value)?; + result.register_function(unsafe { std::str::from_utf8_unchecked(name) }, key, value)?; } } - Ok(BuiltinProgram::new_loader(config, result)) + Ok(result) } pub fn create_program_runtime_environment_v1<'a>( @@ -284,6 +289,24 @@ pub fn create_program_runtime_environment_v1<'a>( let get_sysvar_syscall_enabled = feature_set.is_active(&get_sysvar_syscall_enabled::id()); let enable_get_epoch_stake_syscall = feature_set.is_active(&enable_get_epoch_stake_syscall::id()); + let min_sbpf_version = if !feature_set.is_active(&disable_sbpf_v0_execution::id()) + || feature_set.is_active(&reenable_sbpf_v0_execution::id()) + { + SBPFVersion::V0 + } else { + SBPFVersion::V3 + }; + let max_sbpf_version = if feature_set.is_active(&enable_sbpf_v3_deployment_and_execution::id()) + { + SBPFVersion::V3 + } else if feature_set.is_active(&enable_sbpf_v2_deployment_and_execution::id()) { + SBPFVersion::V2 + } else if feature_set.is_active(&enable_sbpf_v1_deployment_and_execution::id()) { + SBPFVersion::V1 + } else { + SBPFVersion::V0 + }; + debug_assert!(min_sbpf_version <= max_sbpf_version); let config = Config { max_call_depth: compute_budget.max_call_depth, @@ -297,53 +320,52 @@ pub fn create_program_runtime_environment_v1<'a>( reject_broken_elfs: reject_deployment_of_broken_elfs, noop_instruction_rate: 256, sanitize_user_provided_values: true, - external_internal_function_hash_collision: true, - reject_callx_r10: true, - enable_sbpf_v1: !feature_set.is_active(&disable_sbpf_v1_execution::id()) - || feature_set.is_active(&reenable_sbpf_v1_execution::id()), - enable_sbpf_v2: false, + enabled_sbpf_versions: min_sbpf_version..=max_sbpf_version, optimize_rodata: false, aligned_memory_mapping: !feature_set.is_active(&bpf_account_data_direct_mapping::id()), // Warning, do not use `Config::default()` so that configuration here is explicit. }; - let mut result = FunctionRegistry::>::default(); + let mut result = BuiltinProgram::new_loader_with_dense_registration(config); // Abort - result.register_function_hashed(*b"abort", SyscallAbort::vm)?; + result.register_function("abort", 1, SyscallAbort::vm)?; // Panic - result.register_function_hashed(*b"sol_panic_", SyscallPanic::vm)?; + result.register_function("sol_panic_", 2, SyscallPanic::vm)?; // Logging - result.register_function_hashed(*b"sol_log_", SyscallLog::vm)?; - result.register_function_hashed(*b"sol_log_64_", SyscallLogU64::vm)?; - result.register_function_hashed(*b"sol_log_compute_units_", SyscallLogBpfComputeUnits::vm)?; - result.register_function_hashed(*b"sol_log_pubkey", SyscallLogPubkey::vm)?; + result.register_function("sol_log_", 7, SyscallLog::vm)?; + result.register_function("sol_log_64_", 8, SyscallLogU64::vm)?; + result.register_function("sol_log_pubkey", 9, SyscallLogPubkey::vm)?; + result.register_function("sol_log_compute_units_", 10, SyscallLogBpfComputeUnits::vm)?; // Program defined addresses (PDA) - result.register_function_hashed( - *b"sol_create_program_address", + result.register_function( + "sol_create_program_address", + 32, SyscallCreateProgramAddress::vm, )?; - result.register_function_hashed( - *b"sol_try_find_program_address", + result.register_function( + "sol_try_find_program_address", + 33, SyscallTryFindProgramAddress::vm, )?; // Sha256 - result.register_function_hashed(*b"sol_sha256", SyscallHash::vm::)?; + result.register_function("sol_sha256", 17, SyscallHash::vm::)?; // Keccak256 - result.register_function_hashed(*b"sol_keccak256", SyscallHash::vm::)?; + result.register_function("sol_keccak256", 18, SyscallHash::vm::)?; // Secp256k1 Recover - result.register_function_hashed(*b"sol_secp256k1_recover", SyscallSecp256k1Recover::vm)?; + result.register_function("sol_secp256k1_recover", 19, SyscallSecp256k1Recover::vm)?; // Blake3 register_feature_gated_function!( result, blake3_syscall_enabled, - *b"sol_blake3", + "sol_blake3", + 20, SyscallHash::vm::, )?; @@ -351,78 +373,87 @@ pub fn create_program_runtime_environment_v1<'a>( register_feature_gated_function!( result, curve25519_syscall_enabled, - *b"sol_curve_validate_point", + "sol_curve_validate_point", + 24, SyscallCurvePointValidation::vm, )?; register_feature_gated_function!( result, curve25519_syscall_enabled, - *b"sol_curve_group_op", + "sol_curve_group_op", + 25, SyscallCurveGroupOps::vm, )?; register_feature_gated_function!( result, curve25519_syscall_enabled, - *b"sol_curve_multiscalar_mul", + "sol_curve_multiscalar_mul", + 26, SyscallCurveMultiscalarMultiplication::vm, )?; // Sysvars - result.register_function_hashed(*b"sol_get_clock_sysvar", SyscallGetClockSysvar::vm)?; - result.register_function_hashed( - *b"sol_get_epoch_schedule_sysvar", + result.register_function("sol_get_clock_sysvar", 36, SyscallGetClockSysvar::vm)?; + result.register_function( + "sol_get_epoch_schedule_sysvar", + 37, SyscallGetEpochScheduleSysvar::vm, )?; register_feature_gated_function!( result, !disable_fees_sysvar, - *b"sol_get_fees_sysvar", + "sol_get_fees_sysvar", + 40, SyscallGetFeesSysvar::vm, )?; - result.register_function_hashed(*b"sol_get_rent_sysvar", SyscallGetRentSysvar::vm)?; + result.register_function("sol_get_rent_sysvar", 41, SyscallGetRentSysvar::vm)?; register_feature_gated_function!( result, last_restart_slot_syscall_enabled, - *b"sol_get_last_restart_slot", + "sol_get_last_restart_slot", + 38, SyscallGetLastRestartSlotSysvar::vm, )?; register_feature_gated_function!( result, epoch_rewards_syscall_enabled, - *b"sol_get_epoch_rewards_sysvar", + "sol_get_epoch_rewards_sysvar", + 39, SyscallGetEpochRewardsSysvar::vm, )?; // Memory ops - result.register_function_hashed(*b"sol_memcpy_", SyscallMemcpy::vm)?; - result.register_function_hashed(*b"sol_memmove_", SyscallMemmove::vm)?; - result.register_function_hashed(*b"sol_memcmp_", SyscallMemcmp::vm)?; - result.register_function_hashed(*b"sol_memset_", SyscallMemset::vm)?; + result.register_function("sol_memcpy_", 3, SyscallMemcpy::vm)?; + result.register_function("sol_memmove_", 4, SyscallMemmove::vm)?; + result.register_function("sol_memset_", 5, SyscallMemset::vm)?; + result.register_function("sol_memcmp_", 6, SyscallMemcmp::vm)?; // Processed sibling instructions - result.register_function_hashed( - *b"sol_get_processed_sibling_instruction", + result.register_function( + "sol_get_processed_sibling_instruction", + 22, SyscallGetProcessedSiblingInstruction::vm, )?; // Stack height - result.register_function_hashed(*b"sol_get_stack_height", SyscallGetStackHeight::vm)?; + result.register_function("sol_get_stack_height", 23, SyscallGetStackHeight::vm)?; // Return data - result.register_function_hashed(*b"sol_set_return_data", SyscallSetReturnData::vm)?; - result.register_function_hashed(*b"sol_get_return_data", SyscallGetReturnData::vm)?; + result.register_function("sol_set_return_data", 14, SyscallSetReturnData::vm)?; + result.register_function("sol_get_return_data", 15, SyscallGetReturnData::vm)?; // Cross-program invocation - result.register_function_hashed(*b"sol_invoke_signed_c", SyscallInvokeSignedC::vm)?; - result.register_function_hashed(*b"sol_invoke_signed_rust", SyscallInvokeSignedRust::vm)?; + result.register_function("sol_invoke_signed_c", 12, SyscallInvokeSignedC::vm)?; + result.register_function("sol_invoke_signed_rust", 13, SyscallInvokeSignedRust::vm)?; // Memory allocator register_feature_gated_function!( result, !disable_deploy_of_alloc_free_syscall, - *b"sol_alloc_free_", + "sol_alloc_free_", + 11, SyscallAllocFree::vm, )?; @@ -430,7 +461,8 @@ pub fn create_program_runtime_environment_v1<'a>( register_feature_gated_function!( result, enable_alt_bn128_syscall, - *b"sol_alt_bn128_group_op", + "sol_alt_bn128_group_op", + 28, SyscallAltBn128::vm, )?; @@ -438,7 +470,8 @@ pub fn create_program_runtime_environment_v1<'a>( register_feature_gated_function!( result, enable_big_mod_exp_syscall, - *b"sol_big_mod_exp", + "sol_big_mod_exp", + 30, SyscallBigModExp::vm, )?; @@ -446,7 +479,8 @@ pub fn create_program_runtime_environment_v1<'a>( register_feature_gated_function!( result, enable_poseidon_syscall, - *b"sol_poseidon", + "sol_poseidon", + 21, SyscallPoseidon::vm, )?; @@ -454,7 +488,8 @@ pub fn create_program_runtime_environment_v1<'a>( register_feature_gated_function!( result, remaining_compute_units_syscall_enabled, - *b"sol_remaining_compute_units", + "sol_remaining_compute_units", + 31, SyscallRemainingComputeUnits::vm )?; @@ -462,7 +497,8 @@ pub fn create_program_runtime_environment_v1<'a>( register_feature_gated_function!( result, enable_alt_bn128_compression_syscall, - *b"sol_alt_bn128_compression", + "sol_alt_bn128_compression", + 29, SyscallAltBn128Compression::vm, )?; @@ -470,7 +506,8 @@ pub fn create_program_runtime_environment_v1<'a>( register_feature_gated_function!( result, get_sysvar_syscall_enabled, - *b"sol_get_sysvar", + "sol_get_sysvar", + 34, SyscallGetSysvar::vm, )?; @@ -478,14 +515,15 @@ pub fn create_program_runtime_environment_v1<'a>( register_feature_gated_function!( result, enable_get_epoch_stake_syscall, - *b"sol_get_epoch_stake", + "sol_get_epoch_stake", + 35, SyscallGetEpochStake::vm, )?; // Log data - result.register_function_hashed(*b"sol_log_data", SyscallLogData::vm)?; + result.register_function("sol_log_data", 16, SyscallLogData::vm)?; - Ok(BuiltinProgram::new_loader(config, result)) + Ok(result) } pub fn create_program_runtime_environment_v2<'a>( @@ -504,10 +542,7 @@ pub fn create_program_runtime_environment_v2<'a>( reject_broken_elfs: true, noop_instruction_rate: 256, sanitize_user_provided_values: true, - external_internal_function_hash_collision: true, - reject_callx_r10: true, - enable_sbpf_v1: false, - enable_sbpf_v2: true, + enabled_sbpf_versions: SBPFVersion::Reserved..=SBPFVersion::Reserved, optimize_rodata: true, aligned_memory_mapping: true, // Warning, do not use `Config::default()` so that configuration here is explicit. @@ -1848,7 +1883,7 @@ declare_builtin_function!( let budget = invoke_context.get_compute_budget(); consume_compute_meter(invoke_context, budget.syscall_base_cost)?; - use solana_rbpf::vm::ContextObject; + use solana_sbpf::vm::ContextObject; Ok(invoke_context.get_remaining()) } ); @@ -2120,7 +2155,7 @@ mod tests { assert_matches::assert_matches, core::slice, solana_program_runtime::{invoke_context::InvokeContext, with_mock_invoke_context}, - solana_rbpf::{ + solana_sbpf::{ error::EbpfError, memory_region::MemoryRegion, program::SBPFVersion, vm::Config, }, solana_sdk::{ @@ -2191,7 +2226,7 @@ mod tests { let memory_mapping = MemoryMapping::new( vec![MemoryRegion::new_readonly(&data, START)], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2232,7 +2267,7 @@ mod tests { let memory_mapping = MemoryMapping::new( vec![MemoryRegion::new_readonly(bytes_of(&pubkey), 0x100000000)], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); let translated_pubkey = @@ -2248,14 +2283,14 @@ mod tests { let instruction = StableInstruction::from(instruction); let memory_region = MemoryRegion::new_readonly(bytes_of(&instruction), 0x100000000); let memory_mapping = - MemoryMapping::new(vec![memory_region], &config, &SBPFVersion::V2).unwrap(); + MemoryMapping::new(vec![memory_region], &config, SBPFVersion::V3).unwrap(); let translated_instruction = translate_type::(&memory_mapping, 0x100000000, true).unwrap(); assert_eq!(instruction, *translated_instruction); let memory_region = MemoryRegion::new_readonly(&bytes_of(&instruction)[..1], 0x100000000); let memory_mapping = - MemoryMapping::new(vec![memory_region], &config, &SBPFVersion::V2).unwrap(); + MemoryMapping::new(vec![memory_region], &config, SBPFVersion::V3).unwrap(); assert!(translate_type::(&memory_mapping, 0x100000000, true).is_err()); } @@ -2270,7 +2305,7 @@ mod tests { let memory_mapping = MemoryMapping::new( vec![MemoryRegion::new_readonly(&good_data, 0x100000000)], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); let translated_data = @@ -2283,7 +2318,7 @@ mod tests { let memory_mapping = MemoryMapping::new( vec![MemoryRegion::new_readonly(&data, 0x100000000)], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); let translated_data = @@ -2308,7 +2343,7 @@ mod tests { 0x100000000, )], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); let translated_data = @@ -2328,7 +2363,7 @@ mod tests { 0x100000000, )], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); let translated_data = @@ -2346,7 +2381,7 @@ mod tests { let memory_mapping = MemoryMapping::new( vec![MemoryRegion::new_readonly(string.as_bytes(), 0x100000000)], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); assert_eq!( @@ -2370,7 +2405,7 @@ mod tests { fn test_syscall_abort() { prepare_mockup!(invoke_context, program_id, bpf_loader::id()); let config = Config::default(); - let mut memory_mapping = MemoryMapping::new(vec![], &config, &SBPFVersion::V2).unwrap(); + let mut memory_mapping = MemoryMapping::new(vec![], &config, SBPFVersion::V3).unwrap(); let result = SyscallAbort::rust(&mut invoke_context, 0, 0, 0, 0, 0, &mut memory_mapping); result.unwrap(); } @@ -2385,7 +2420,7 @@ mod tests { let mut memory_mapping = MemoryMapping::new( vec![MemoryRegion::new_readonly(string.as_bytes(), 0x100000000)], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2426,7 +2461,7 @@ mod tests { let mut memory_mapping = MemoryMapping::new( vec![MemoryRegion::new_readonly(string.as_bytes(), 0x100000000)], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2493,7 +2528,7 @@ mod tests { invoke_context.mock_set_remaining(cost); let config = Config::default(); - let mut memory_mapping = MemoryMapping::new(vec![], &config, &SBPFVersion::V2).unwrap(); + let mut memory_mapping = MemoryMapping::new(vec![], &config, SBPFVersion::V3).unwrap(); let result = SyscallLogU64::rust(&mut invoke_context, 1, 2, 3, 4, 5, &mut memory_mapping); result.unwrap(); @@ -2517,7 +2552,7 @@ mod tests { let mut memory_mapping = MemoryMapping::new( vec![MemoryRegion::new_readonly(bytes_of(&pubkey), 0x100000000)], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2700,7 +2735,7 @@ mod tests { MemoryRegion::new_readonly(bytes2.as_bytes(), bytes_to_hash[1].vm_addr), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2798,7 +2833,7 @@ mod tests { MemoryRegion::new_readonly(&invalid_bytes, invalid_bytes_va), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2871,7 +2906,7 @@ mod tests { MemoryRegion::new_readonly(&invalid_bytes, invalid_bytes_va), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -2958,7 +2993,7 @@ mod tests { MemoryRegion::new_writable(bytes_of_slice_mut(&mut result_point), result_point_va), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -3113,7 +3148,7 @@ mod tests { MemoryRegion::new_writable(bytes_of_slice_mut(&mut result_point), result_point_va), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -3283,7 +3318,7 @@ mod tests { MemoryRegion::new_writable(bytes_of_slice_mut(&mut result_point), result_point_va), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -3376,7 +3411,7 @@ mod tests { MemoryRegion::new_writable(bytes_of_slice_mut(&mut result_point), result_point_va), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -3561,7 +3596,7 @@ mod tests { MemoryRegion::new_readonly(&Clock::id().to_bytes(), clock_id_va), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -3627,7 +3662,7 @@ mod tests { ), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -3689,7 +3724,7 @@ mod tests { got_fees_va, )], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -3728,7 +3763,7 @@ mod tests { MemoryRegion::new_readonly(&Rent::id().to_bytes(), rent_id_va), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -3788,7 +3823,7 @@ mod tests { MemoryRegion::new_readonly(&EpochRewards::id().to_bytes(), rewards_id_va), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -3853,7 +3888,7 @@ mod tests { MemoryRegion::new_readonly(&LastRestartSlot::id().to_bytes(), restart_id_va), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -3938,7 +3973,7 @@ mod tests { MemoryRegion::new_readonly(&StakeHistory::id().to_bytes(), history_id_va), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -3997,7 +4032,7 @@ mod tests { MemoryRegion::new_readonly(&SlotHashes::id().to_bytes(), hashes_id_va), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -4043,7 +4078,7 @@ mod tests { MemoryRegion::new_readonly(&got_clock_buf_ro, got_clock_buf_ro_va), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -4232,7 +4267,7 @@ mod tests { bytes_of_slice(&mock_slices), SEEDS_VA, )); - let mut memory_mapping = MemoryMapping::new(regions, &config, &SBPFVersion::V2).unwrap(); + let mut memory_mapping = MemoryMapping::new(regions, &config, SBPFVersion::V3).unwrap(); let result = syscall( invoke_context, @@ -4296,7 +4331,7 @@ mod tests { MemoryRegion::new_writable(&mut id_buffer, PROGRAM_ID_VA), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -4396,7 +4431,7 @@ mod tests { let mut memory_mapping = MemoryMapping::new( vec![MemoryRegion::new_writable(&mut memory, VM_BASE_ADDRESS)], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); let processed_sibling_instruction = translate_type_mut::( @@ -4702,7 +4737,7 @@ mod tests { MemoryRegion::new_writable(&mut data_out, VADDR_OUT), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -4744,7 +4779,7 @@ mod tests { MemoryRegion::new_writable(&mut data_out, VADDR_OUT), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -4799,7 +4834,7 @@ mod tests { let null_pointer_var = std::ptr::null::() as u64; - let mut memory_mapping = MemoryMapping::new(vec![], &config, &SBPFVersion::V2).unwrap(); + let mut memory_mapping = MemoryMapping::new(vec![], &config, SBPFVersion::V3).unwrap(); let result = SyscallGetEpochStake::rust( &mut invoke_context, @@ -4863,7 +4898,7 @@ mod tests { MemoryRegion::new_readonly(&[2; 31], vote_address_var), ], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -4893,7 +4928,7 @@ mod tests { vote_address_var, )], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); @@ -4925,7 +4960,7 @@ mod tests { vote_address_var, )], &config, - &SBPFVersion::V2, + SBPFVersion::V3, ) .unwrap(); diff --git a/programs/bpf_loader/test_elfs/out/callx-r10-sbfv1.so b/programs/bpf_loader/test_elfs/out/sbpfv0_verifier_err.so similarity index 100% rename from programs/bpf_loader/test_elfs/out/callx-r10-sbfv1.so rename to programs/bpf_loader/test_elfs/out/sbpfv0_verifier_err.so diff --git a/programs/bpf_loader/test_elfs/out/sbpfv3_return_err.so b/programs/bpf_loader/test_elfs/out/sbpfv3_return_err.so new file mode 100644 index 00000000000000..fac0efc9da6945 Binary files /dev/null and b/programs/bpf_loader/test_elfs/out/sbpfv3_return_err.so differ diff --git a/programs/bpf_loader/test_elfs/out/sbpfv3_return_ok.so b/programs/bpf_loader/test_elfs/out/sbpfv3_return_ok.so new file mode 100644 index 00000000000000..e3cd32871d39ce Binary files /dev/null and b/programs/bpf_loader/test_elfs/out/sbpfv3_return_ok.so differ diff --git a/programs/compute-budget-bench/Cargo.toml b/programs/compute-budget-bench/Cargo.toml index e9071e2afa6e81..f4b006ff78739c 100644 --- a/programs/compute-budget-bench/Cargo.toml +++ b/programs/compute-budget-bench/Cargo.toml @@ -12,8 +12,11 @@ edition = { workspace = true } criterion = { workspace = true } solana-compute-budget = { workspace = true } solana-compute-budget-instruction = { workspace = true } +solana-compute-budget-interface = { workspace = true } solana-compute-budget-program = { workspace = true } -solana-sdk = { workspace = true } +solana-feature-set = { workspace = true } +solana-message = { workspace = true } +solana-sdk-ids = { workspace = true } solana-svm-transaction = { workspace = true } [[bench]] diff --git a/programs/compute-budget-bench/benches/compute_budget.rs b/programs/compute-budget-bench/benches/compute_budget.rs index 6b2f2b92a9332e..ebc7291ebc6f7f 100644 --- a/programs/compute-budget-bench/benches/compute_budget.rs +++ b/programs/compute-budget-bench/benches/compute_budget.rs @@ -2,10 +2,9 @@ use { criterion::{black_box, criterion_group, criterion_main, Criterion}, solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, solana_compute_budget_instruction::instructions_processor::process_compute_budget_instructions, - solana_sdk::{ - compute_budget::ComputeBudgetInstruction, feature_set::FeatureSet, - instruction::CompiledInstruction, - }, + solana_compute_budget_interface::ComputeBudgetInstruction, + solana_feature_set::FeatureSet, + solana_message::compiled_instruction::CompiledInstruction, solana_svm_transaction::instruction::SVMInstruction, std::num::NonZero, }; @@ -15,7 +14,7 @@ const SIXTY_FOUR_MB: u32 = 64 * 1024 * 1024; fn bench_request_heap_frame(c: &mut Criterion) { let instruction = [( - solana_sdk::compute_budget::id(), + solana_sdk_ids::compute_budget::id(), CompiledInstruction::new_from_raw_parts( 0, ComputeBudgetInstruction::request_heap_frame(ONE_PAGE).data, @@ -48,7 +47,7 @@ fn bench_request_heap_frame(c: &mut Criterion) { fn bench_set_compute_unit_limit(c: &mut Criterion) { let instruction = [( - solana_sdk::compute_budget::id(), + solana_sdk_ids::compute_budget::id(), CompiledInstruction::new_from_raw_parts( 0, ComputeBudgetInstruction::set_compute_unit_limit(1024).data, @@ -81,7 +80,7 @@ fn bench_set_compute_unit_limit(c: &mut Criterion) { fn bench_set_compute_unit_price(c: &mut Criterion) { let instruction = [( - solana_sdk::compute_budget::id(), + solana_sdk_ids::compute_budget::id(), CompiledInstruction::new_from_raw_parts( 0, ComputeBudgetInstruction::set_compute_unit_price(1).data, @@ -114,7 +113,7 @@ fn bench_set_compute_unit_price(c: &mut Criterion) { fn bench_set_loaded_accounts_data_size_limit(c: &mut Criterion) { let instruction = [( - solana_sdk::compute_budget::id(), + solana_sdk_ids::compute_budget::id(), CompiledInstruction::new_from_raw_parts( 0, ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(1).data, diff --git a/programs/loader-v4/Cargo.toml b/programs/loader-v4/Cargo.toml index 55e6e702f3e163..ceb3eb2ef4ff47 100644 --- a/programs/loader-v4/Cargo.toml +++ b/programs/loader-v4/Cargo.toml @@ -15,9 +15,9 @@ solana-compute-budget = { workspace = true } solana-log-collector = { workspace = true } solana-measure = { workspace = true } solana-program-runtime = { workspace = true } +solana-sbpf = { workspace = true } solana-sdk = { workspace = true } solana-type-overrides = { workspace = true } -solana_rbpf = { workspace = true } [dev-dependencies] bincode = { workspace = true } @@ -30,4 +30,4 @@ name = "solana_loader_v4_program" targets = ["x86_64-unknown-linux-gnu"] [features] -shuttle-test = ["solana-type-overrides/shuttle-test", "solana-program-runtime/shuttle-test", "solana_rbpf/shuttle-test"] +shuttle-test = ["solana-type-overrides/shuttle-test", "solana-program-runtime/shuttle-test", "solana-sbpf/shuttle-test"] diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 78033323e2a1e7..b209f54dbbbe07 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -6,7 +6,7 @@ use { invoke_context::InvokeContext, loaded_programs::{ProgramCacheEntry, ProgramCacheEntryOwner, ProgramCacheEntryType}, }, - solana_rbpf::{declare_builtin_function, memory_region::MemoryMapping}, + solana_sbpf::{declare_builtin_function, memory_region::MemoryMapping}, solana_sdk::{ instruction::InstructionError, loader_v4::{self, LoaderV4State, LoaderV4Status, DEPLOYMENT_COOLDOWN_IN_SLOTS}, @@ -572,7 +572,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Deployed, - "noop_unaligned", + "sbpfv3_return_err", ), ), ( @@ -584,7 +584,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Finalized, - "noop_unaligned", + "sbpfv3_return_err", ), ), ( @@ -670,7 +670,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "noop_unaligned", + "sbpfv3_return_err", ), ), ( @@ -682,7 +682,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Deployed, - "noop_unaligned", + "sbpfv3_return_err", ), ), ( @@ -767,7 +767,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "noop_unaligned", + "sbpfv3_return_err", ), ), ( @@ -780,14 +780,14 @@ mod tests { ), ( Pubkey::new_unique(), - AccountSharedData::new(40000000, 0, &loader_v4::id()), + AccountSharedData::new(0, 0, &loader_v4::id()), ), ( Pubkey::new_unique(), load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "noop_aligned", + "sbpfv3_return_ok", ), ), ( @@ -795,7 +795,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Deployed, - "noop_unaligned", + "sbpfv3_return_err", ), ), ( @@ -807,6 +807,9 @@ mod tests { create_account_shared_data_for_test(&rent::Rent::default()), ), ]; + let smaller_program_lamports = transaction_accounts[0].1.lamports(); + let larger_program_lamports = transaction_accounts[4].1.lamports(); + assert_ne!(smaller_program_lamports, larger_program_lamports); // No change let accounts = process_instruction( @@ -829,10 +832,11 @@ mod tests { transaction_accounts[0].1.data().len(), ); assert_eq!(accounts[2].lamports(), transaction_accounts[2].1.lamports()); - let lamports = transaction_accounts[4].1.lamports(); - transaction_accounts[0].1.set_lamports(lamports); // Initialize program account + transaction_accounts[3] + .1 + .set_lamports(smaller_program_lamports); let accounts = process_instruction( vec![], &bincode::serialize(&LoaderV4Instruction::Truncate { @@ -854,6 +858,9 @@ mod tests { ); // Increase program account size + transaction_accounts[0] + .1 + .set_lamports(larger_program_lamports); let accounts = process_instruction( vec![], &bincode::serialize(&LoaderV4Instruction::Truncate { @@ -1024,7 +1031,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "noop_aligned", + "sbpfv3_return_ok", ), ), ( @@ -1036,7 +1043,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "noop_unaligned", + "sbpfv3_return_err", ), ), ( @@ -1048,7 +1055,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "callx-r10-sbfv1", + "sbpfv0_verifier_err", ), ), (clock::id(), clock(1000)), @@ -1172,7 +1179,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Deployed, - "noop_aligned", + "sbpfv3_return_err", ), ), ( @@ -1188,7 +1195,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "noop_aligned", + "sbpfv3_return_err", ), ), (clock::id(), clock(1000)), @@ -1252,7 +1259,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Deployed, - "noop_aligned", + "sbpfv3_return_err", ), ), ( @@ -1260,7 +1267,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "noop_aligned", + "sbpfv3_return_err", ), ), ( @@ -1347,7 +1354,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Deployed, - "noop_aligned", + "sbpfv3_return_err", ), ), ( @@ -1355,7 +1362,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "noop_aligned", + "sbpfv3_return_err", ), ), ( @@ -1363,7 +1370,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Finalized, - "noop_aligned", + "sbpfv3_return_err", ), ), ( @@ -1371,7 +1378,7 @@ mod tests { load_program_account_from_elf( Pubkey::new_unique(), LoaderV4Status::Retracted, - "noop_aligned", + "sbpfv3_return_err", ), ), ( @@ -1487,7 +1494,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Finalized, - "noop_aligned", + "sbpfv3_return_ok", ), ), ( @@ -1503,7 +1510,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Retracted, - "noop_aligned", + "sbpfv3_return_ok", ), ), ( @@ -1511,7 +1518,7 @@ mod tests { load_program_account_from_elf( authority_address, LoaderV4Status::Finalized, - "callx-r10-sbfv1", + "sbpfv0_verifier_err", ), ), ]; diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index cd9fae063dfaf9..45e2e593aab310 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -70,7 +70,7 @@ dependencies = [ "log", "solana-sdk", "solana-transaction-status", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -142,7 +142,7 @@ dependencies = [ "solana-version", "solana-vote-program", "symlink", - "thiserror 2.0.6", + "thiserror 2.0.9", "tikv-jemallocator", "tokio", ] @@ -236,9 +236,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "aquamarine" @@ -850,18 +850,18 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.20.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" +checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" dependencies = [ "proc-macro2", "quote", @@ -1066,15 +1066,15 @@ dependencies = [ [[package]] name = "console" -version = "0.15.8" +version = "0.15.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" +checksum = "ea3c6ecd8059b57859df5c69830340ed3c41d30e3da0c1cbed90a96ac853041b" dependencies = [ "encode_unicode", - "lazy_static", "libc", - "unicode-width 0.1.8", - "windows-sys 0.52.0", + "once_cell", + "unicode-width 0.2.0", + "windows-sys 0.59.0", ] [[package]] @@ -1157,9 +1157,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] @@ -1572,9 +1572,9 @@ dependencies = [ [[package]] name = "encode_unicode" -version = "0.3.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" [[package]] name = "encoding_rs" @@ -2232,9 +2232,9 @@ checksum = "3c1ad908cc71012b7bea4d0c53ba96a8cba9962f048fa68d143376143d863b7a" [[package]] name = "hyper" -version = "0.14.31" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ "bytes", "futures-channel", @@ -2821,9 +2821,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.168" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libloading" @@ -3978,9 +3978,9 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.19", + "rustls 0.23.20", "socket2 0.5.8", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -3996,11 +3996,11 @@ dependencies = [ "rand 0.8.5", "ring 0.17.3", "rustc-hash 2.0.0", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-pki-types", "rustls-platform-verifier", "slab", - "thiserror 2.0.6", + "thiserror 2.0.9", "tinyvec", "tracing", "web-time", @@ -4409,9 +4409,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.19" +version = "0.23.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" +checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" dependencies = [ "once_cell", "ring 0.17.3", @@ -4473,7 +4473,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -4546,12 +4546,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "scroll" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" - [[package]] name = "sct" version = "0.7.0" @@ -4588,9 +4582,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" [[package]] name = "seqlock" @@ -4603,9 +4597,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" dependencies = [ "serde_derive", ] @@ -4630,9 +4624,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" dependencies = [ "proc-macro2", "quote", @@ -4928,7 +4922,7 @@ dependencies = [ "spl-token-2022", "spl-token-group-interface", "spl-token-metadata-interface", - "thiserror 2.0.6", + "thiserror 2.0.9", "zstd", ] @@ -4998,7 +4992,7 @@ dependencies = [ "static_assertions", "tar", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -5015,7 +5009,7 @@ dependencies = [ "solana-program", "solana-program-runtime", "solana-sdk", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -5035,7 +5029,7 @@ dependencies = [ "solana-program", "solana-sdk", "tarpc", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-serde", ] @@ -5103,7 +5097,7 @@ dependencies = [ "ark-serialize", "bytemuck", "solana-define-syscall", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -5132,11 +5126,11 @@ dependencies = [ "solana-poseidon", "solana-program-memory", "solana-program-runtime", + "solana-sbpf", "solana-sdk", "solana-timings", "solana-type-overrides", - "solana_rbpf", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -5218,7 +5212,7 @@ dependencies = [ "solana-seed-phrase", "solana-signature", "solana-signer", - "thiserror 2.0.6", + "thiserror 2.0.9", "tiny-bip39", "uriparse", "url 2.5.4", @@ -5303,7 +5297,7 @@ dependencies = [ "solana-transaction", "solana-transaction-error", "solana-udp-client", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -5372,7 +5366,7 @@ dependencies = [ "solana-pubkey", "solana-sdk", "solana-svm-transaction", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -5425,7 +5419,7 @@ dependencies = [ "solana-metrics", "solana-time-utils", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -5459,7 +5453,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustls 0.23.19", + "rustls 0.23.20", "serde", "serde_bytes", "serde_derive", @@ -5512,7 +5506,7 @@ dependencies = [ "sys-info", "sysctl", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "trees", ] @@ -5555,7 +5549,7 @@ dependencies = [ "bytemuck_derive", "curve25519-dalek 4.1.3", "solana-define-syscall", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -5694,7 +5688,7 @@ dependencies = [ "solana-transaction", "solana-version", "spl-memo", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -5795,7 +5789,7 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-transaction-status", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -5818,8 +5812,10 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "serde", + "serde-big-array", "serde_bytes", "serde_derive", + "siphasher", "solana-bloom", "solana-clap-utils", "solana-client", @@ -5845,7 +5841,7 @@ dependencies = [ "solana-vote", "solana-vote-program", "static_assertions", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6034,7 +6030,7 @@ dependencies = [ "strum_macros", "tar", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-stream", "trees", @@ -6050,9 +6046,9 @@ dependencies = [ "solana-log-collector", "solana-measure", "solana-program-runtime", + "solana-sbpf", "solana-sdk", "solana-type-overrides", - "solana_rbpf", ] [[package]] @@ -6118,7 +6114,7 @@ dependencies = [ "solana-cluster-type", "solana-sha256-hasher", "solana-time-utils", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6246,7 +6242,7 @@ dependencies = [ "solana-metrics", "solana-runtime", "solana-sdk", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6264,7 +6260,7 @@ dependencies = [ "ark-bn254", "light-poseidon", "solana-define-syscall", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6364,7 +6360,7 @@ dependencies = [ "solana-system-interface", "solana-sysvar", "solana-sysvar-id", - "thiserror 2.0.6", + "thiserror 2.0.9", "wasm-bindgen", ] @@ -6419,7 +6415,6 @@ dependencies = [ "bincode", "enum-iterator", "itertools 0.12.1", - "libc", "log", "num-derive", "num-traits", @@ -6441,6 +6436,7 @@ dependencies = [ "solana-precompiles", "solana-pubkey", "solana-rent", + "solana-sbpf", "solana-sdk-ids", "solana-slot-hashes", "solana-stable-layout", @@ -6449,8 +6445,7 @@ dependencies = [ "solana-timings", "solana-transaction-context", "solana-type-overrides", - "solana_rbpf", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6478,12 +6473,12 @@ dependencies = [ "solana-logger", "solana-program-runtime", "solana-runtime", + "solana-sbpf", "solana-sdk", "solana-svm", "solana-timings", "solana-vote-program", - "solana_rbpf", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -6529,7 +6524,7 @@ dependencies = [ "solana-pubkey", "solana-rpc-client-api", "solana-signature", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-stream", "tokio-tungstenite", @@ -6549,7 +6544,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls 0.23.19", + "rustls 0.23.20", "solana-connection-cache", "solana-keypair", "solana-measure", @@ -6562,7 +6557,7 @@ dependencies = [ "solana-streamer", "solana-tls-utils", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -6599,7 +6594,7 @@ dependencies = [ "solana-pubkey", "solana-signature", "solana-signer", - "thiserror 2.0.6", + "thiserror 2.0.9", "uriparse", ] @@ -6694,7 +6689,7 @@ dependencies = [ "spl-token", "spl-token-2022", "stream-cancel", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-util 0.7.1", ] @@ -6761,7 +6756,7 @@ dependencies = [ "solana-transaction-error", "solana-transaction-status-client-types", "solana-version", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6776,7 +6771,7 @@ dependencies = [ "solana-pubkey", "solana-rpc-client", "solana-sdk-ids", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6857,7 +6852,7 @@ dependencies = [ "symlink", "tar", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", "zstd", ] @@ -6873,7 +6868,7 @@ dependencies = [ "solana-sdk", "solana-sdk-ids", "solana-svm-transaction", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6912,6 +6907,7 @@ dependencies = [ "solana-sbf-rust-invoke-dep", "solana-sbf-rust-realloc-dep", "solana-sbf-rust-realloc-invoke-dep", + "solana-sbpf", "solana-sdk", "solana-svm", "solana-svm-transaction", @@ -6920,7 +6916,6 @@ dependencies = [ "solana-type-overrides", "solana-vote", "solana-vote-program", - "solana_rbpf", ] [[package]] @@ -7379,6 +7374,23 @@ dependencies = [ "solana-program", ] +[[package]] +name = "solana-sbpf" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92b4c060a707fdb0754a876cbbf49591b60a573b5521b485125d2a4d6ff68ce3" +dependencies = [ + "byteorder 1.5.0", + "combine 3.8.1", + "hash32", + "libc", + "log", + "rand 0.8.5", + "rustc-demangle", + "thiserror 1.0.69", + "winapi 0.3.9", +] + [[package]] name = "solana-sdk" version = "2.2.0" @@ -7455,6 +7467,7 @@ dependencies = [ "solana-serde", "solana-serde-varint", "solana-short-vec", + "solana-shred-version", "solana-signature", "solana-signer", "solana-system-transaction", @@ -7462,7 +7475,8 @@ dependencies = [ "solana-transaction", "solana-transaction-context", "solana-transaction-error", - "thiserror 2.0.6", + "solana-validator-exit", + "thiserror 2.0.9", "wasm-bindgen", ] @@ -7506,7 +7520,7 @@ dependencies = [ "borsh 1.5.3", "libsecp256k1 0.6.0", "solana-define-syscall", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -7602,6 +7616,16 @@ dependencies = [ "serde", ] +[[package]] +name = "solana-shred-version" +version = "2.2.0" +dependencies = [ + "byteorder 1.5.0", + "solana-hard-forks", + "solana-hash", + "solana-sha256-hasher", +] + [[package]] name = "solana-signature" version = "2.2.0" @@ -7695,7 +7719,7 @@ dependencies = [ "solana-sdk", "solana-storage-proto", "solana-transaction-status", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tonic", "zstd", @@ -7738,7 +7762,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.19", + "rustls 0.23.20", "smallvec", "socket2 0.5.8", "solana-keypair", @@ -7755,7 +7779,7 @@ dependencies = [ "solana-tls-utils", "solana-transaction-error", "solana-transaction-metrics-tracker", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-util 0.7.1", "x509-parser", @@ -7772,22 +7796,38 @@ dependencies = [ "qualifier_attr", "serde", "serde_derive", + "solana-account", "solana-bpf-loader-program", + "solana-clock", "solana-compute-budget", "solana-compute-budget-instruction", "solana-feature-set", "solana-fee", + "solana-fee-structure", + "solana-hash", + "solana-instruction", + "solana-instructions-sysvar", "solana-loader-v4-program", "solana-log-collector", "solana-measure", + "solana-message", + "solana-nonce", + "solana-precompiles", + "solana-program", "solana-program-runtime", + "solana-pubkey", + "solana-rent", + "solana-rent-debits", "solana-sdk", + "solana-sdk-ids", "solana-svm-rent-collector", "solana-svm-transaction", "solana-system-program", "solana-timings", + "solana-transaction-context", + "solana-transaction-error", "solana-type-overrides", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -7978,7 +8018,7 @@ dependencies = [ name = "solana-tls-utils" version = "2.2.0" dependencies = [ - "rustls 0.23.19", + "rustls 0.23.20", "solana-keypair", "solana-pubkey", "solana-signer", @@ -8013,7 +8053,7 @@ dependencies = [ "solana-signer", "solana-transaction", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -8025,7 +8065,7 @@ dependencies = [ "log", "lru", "quinn", - "rustls 0.23.19", + "rustls 0.23.20", "solana-clock", "solana-connection-cache", "solana-keypair", @@ -8037,7 +8077,7 @@ dependencies = [ "solana-time-utils", "solana-tls-utils", "solana-tpu-client", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-util 0.7.1", ] @@ -8130,7 +8170,7 @@ dependencies = [ "spl-token-2022", "spl-token-group-interface", "spl-token-metadata-interface", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -8151,7 +8191,7 @@ dependencies = [ "solana-transaction", "solana-transaction-context", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -8170,7 +8210,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls 0.23.19", + "rustls 0.23.20", "solana-entry", "solana-feature-set", "solana-geyser-plugin-manager", @@ -8190,7 +8230,7 @@ dependencies = [ "solana-streamer", "solana-tls-utils", "static_assertions", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -8212,7 +8252,7 @@ dependencies = [ "solana-net-utils", "solana-streamer", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -8247,6 +8287,10 @@ dependencies = [ "vec_extract_if_polyfill", ] +[[package]] +name = "solana-validator-exit" +version = "2.2.0" + [[package]] name = "solana-version" version = "2.2.0" @@ -8269,7 +8313,7 @@ dependencies = [ "serde_derive", "solana-sdk", "solana-svm-transaction", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -8287,7 +8331,7 @@ dependencies = [ "solana-program", "solana-program-runtime", "solana-sdk", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -8318,9 +8362,10 @@ dependencies = [ "bytemuck", "num-derive", "num-traits", + "solana-instruction", "solana-log-collector", "solana-program-runtime", - "solana-sdk", + "solana-sdk-ids", "solana-zk-sdk", ] @@ -8354,7 +8399,7 @@ dependencies = [ "solana-signature", "solana-signer", "subtle", - "thiserror 2.0.6", + "thiserror 2.0.9", "wasm-bindgen", "zeroize", ] @@ -8367,9 +8412,10 @@ dependencies = [ "num-derive", "num-traits", "solana-feature-set", + "solana-instruction", "solana-log-collector", "solana-program-runtime", - "solana-sdk", + "solana-sdk-ids", "solana-zk-token-sdk", ] @@ -8404,28 +8450,10 @@ dependencies = [ "solana-signature", "solana-signer", "subtle", - "thiserror 2.0.6", + "thiserror 2.0.9", "zeroize", ] -[[package]] -name = "solana_rbpf" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c1941b5ef0c3ce8f2ac5dd984d0fb1a97423c4ff2a02eec81e3913f02e2ac2b" -dependencies = [ - "byteorder 1.5.0", - "combine 3.8.1", - "hash32", - "libc", - "log", - "rand 0.8.5", - "rustc-demangle", - "scroll", - "thiserror 1.0.69", - "winapi 0.3.9", -] - [[package]] name = "spin" version = "0.5.2" @@ -9033,11 +9061,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.6" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" dependencies = [ - "thiserror-impl 2.0.6", + "thiserror-impl 2.0.9", ] [[package]] @@ -9053,9 +9081,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.6" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" +checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 63a1bd5e9d4c8b..90be3500204a5f 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -59,6 +59,7 @@ solana-sbf-rust-param-passing-dep = { path = "rust/param_passing_dep", version = solana-sbf-rust-realloc-dep = { path = "rust/realloc_dep", version = "=2.2.0" } solana-sbf-rust-realloc-invoke-dep = { path = "rust/realloc_invoke_dep", version = "=2.2.0" } solana-sdk = { path = "../../sdk", version = "=2.2.0" } +solana-sbpf = "=0.9.0" solana-secp256k1-recover = { path = "../../curves/secp256k1-recover", version = "=2.2.0" } solana-svm = { path = "../../svm", version = "=2.2.0" } solana-svm-transaction = { path = "../../svm-transaction", version = "=2.2.0" } @@ -69,7 +70,6 @@ solana-vote = { path = "../../vote", version = "=2.2.0" } solana-vote-program = { path = "../../programs/vote", version = "=2.2.0" } agave-validator = { path = "../../validator", version = "=2.2.0" } solana-zk-sdk = { path = "../../zk-sdk", version = "=2.2.0" } -solana_rbpf = "=0.8.5" thiserror = "1.0" [package] @@ -130,6 +130,7 @@ solana-runtime-transaction = { workspace = true, features = [ solana-sbf-rust-invoke-dep = { workspace = true } solana-sbf-rust-realloc-dep = { workspace = true } solana-sbf-rust-realloc-invoke-dep = { workspace = true } +solana-sbpf = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } solana-svm = { workspace = true } solana-svm-transaction = { workspace = true } @@ -138,7 +139,6 @@ solana-transaction-status = { workspace = true } solana-type-overrides = { workspace = true } solana-vote = { workspace = true } solana-vote-program = { workspace = true } -solana_rbpf = { workspace = true } [[bench]] name = "bpf_loader" diff --git a/programs/sbf/benches/bpf_loader.rs b/programs/sbf/benches/bpf_loader.rs index 7475e9ea2d0f53..c31a2d64f04c06 100644 --- a/programs/sbf/benches/bpf_loader.rs +++ b/programs/sbf/benches/bpf_loader.rs @@ -8,7 +8,7 @@ )] use { - solana_feature_set::bpf_account_data_direct_mapping, solana_rbpf::memory_region::MemoryState, + solana_feature_set::bpf_account_data_direct_mapping, solana_sbpf::memory_region::MemoryState, solana_sdk::signer::keypair::Keypair, std::slice, }; @@ -24,16 +24,16 @@ use { solana_feature_set::FeatureSet, solana_measure::measure::Measure, solana_program_runtime::invoke_context::InvokeContext, - solana_rbpf::{ - ebpf::MM_INPUT_START, elf::Executable, memory_region::MemoryRegion, - verifier::RequisiteVerifier, vm::ContextObject, - }, solana_runtime::{ bank::Bank, bank_client::BankClient, genesis_utils::{create_genesis_config, GenesisConfigInfo}, loader_utils::{load_program_from_file, load_upgradeable_program_and_advance_slot}, }, + solana_sbpf::{ + ebpf::MM_INPUT_START, elf::Executable, memory_region::MemoryRegion, + verifier::RequisiteVerifier, vm::ContextObject, + }, solana_sdk::{ account::AccountSharedData, bpf_loader, diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 028d7c87aaf44f..5a7877eeaa2d06 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -19,7 +19,7 @@ use { solana_feature_set::{self as feature_set, FeatureSet}, solana_ledger::token_balances::collect_token_balances, solana_program_runtime::{ - invoke_context::mock_process_instruction, solana_rbpf::vm::ContextObject, + invoke_context::mock_process_instruction, solana_sbpf::vm::ContextObject, }, solana_runtime::{ bank::{Bank, TransactionBalancesSet}, @@ -4642,13 +4642,13 @@ fn test_deplete_cost_meter_with_access_violation() { .. } = create_genesis_config(100_123_456_789); - for apply_cost_tracker in [false, true] { + for deplete_cu_meter_on_vm_failure in [false, true] { let mut bank = Bank::new_for_tests(&genesis_config); let feature_set = Arc::make_mut(&mut bank.feature_set); // by default test banks have all features enabled, so we only need to // disable when needed - if !apply_cost_tracker { - feature_set.deactivate(&feature_set::apply_cost_tracker_during_replay::id()); + if !deplete_cu_meter_on_vm_failure { + feature_set.deactivate(&feature_set::deplete_cu_meter_on_vm_failure::id()); } let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank.clone()); @@ -4696,7 +4696,7 @@ fn test_deplete_cost_meter_with_access_violation() { TransactionError::InstructionError(1, InstructionError::ReadonlyDataModified) ); - if apply_cost_tracker { + if deplete_cu_meter_on_vm_failure { assert_eq!(result.executed_units, u64::from(compute_unit_limit)); } else { assert!(result.executed_units < u64::from(compute_unit_limit)); @@ -4715,13 +4715,13 @@ fn test_program_sbf_deplete_cost_meter_with_divide_by_zero() { .. } = create_genesis_config(50); - for apply_cost_tracker in [false, true] { + for deplete_cu_meter_on_vm_failure in [false, true] { let mut bank = Bank::new_for_tests(&genesis_config); let feature_set = Arc::make_mut(&mut bank.feature_set); // by default test banks have all features enabled, so we only need to // disable when needed - if !apply_cost_tracker { - feature_set.deactivate(&feature_set::apply_cost_tracker_during_replay::id()); + if !deplete_cu_meter_on_vm_failure { + feature_set.deactivate(&feature_set::deplete_cu_meter_on_vm_failure::id()); } let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank.clone()); @@ -4752,7 +4752,7 @@ fn test_program_sbf_deplete_cost_meter_with_divide_by_zero() { TransactionError::InstructionError(1, InstructionError::ProgramFailedToComplete) ); - if apply_cost_tracker { + if deplete_cu_meter_on_vm_failure { assert_eq!(result.executed_units, u64::from(compute_unit_limit)); } else { assert!(result.executed_units < u64::from(compute_unit_limit)); diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index 5d5dbd442afa3c..8e29f3b3e75919 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -22,6 +22,7 @@ solana-vote-program = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } +criterion = { workspace = true } proptest = { workspace = true } solana-compute-budget = { workspace = true } solana-logger = { workspace = true } @@ -34,5 +35,9 @@ name = "solana_stake_program" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +[[bench]] +name = "stake" +harness = false + [lints] workspace = true diff --git a/programs/stake/benches/stake.rs b/programs/stake/benches/stake.rs new file mode 100644 index 00000000000000..581d73f100ff3b --- /dev/null +++ b/programs/stake/benches/stake.rs @@ -0,0 +1,759 @@ +use { + bincode::serialize, + criterion::{black_box, criterion_group, criterion_main, Criterion}, + solana_feature_set::FeatureSet, + solana_program_runtime::invoke_context::mock_process_instruction, + solana_sdk::{ + account::{create_account_shared_data_for_test, AccountSharedData, WritableAccount}, + clock::{Clock, Epoch}, + instruction::AccountMeta, + pubkey::Pubkey, + stake::{ + instruction::{ + self, AuthorizeCheckedWithSeedArgs, AuthorizeWithSeedArgs, LockupArgs, + LockupCheckedArgs, StakeInstruction, + }, + stake_flags::StakeFlags, + state::{Authorized, Lockup, StakeAuthorize, StakeStateV2}, + }, + stake_history::StakeHistory, + sysvar::{ + clock, + rent::{self, Rent}, + stake_history, + }, + }, + solana_stake_program::{ + stake_instruction, + stake_state::{Delegation, Meta, Stake}, + }, + solana_vote_program::vote_state::{self, VoteState, VoteStateVersions}, + std::sync::Arc, +}; + +const ACCOUNT_BALANCE: u64 = u64::MAX / 4; // enough lamports for tests + +struct TestSetup { + feature_set: Arc, + stake_address: Pubkey, + stake_account: AccountSharedData, + transaction_accounts: Vec<(Pubkey, AccountSharedData)>, + instruction_accounts: Vec, +} + +impl TestSetup { + fn new() -> Self { + let stake_account = AccountSharedData::new( + ACCOUNT_BALANCE, + StakeStateV2::size_of(), + &solana_stake_program::id(), + ); + let stake_address = solana_sdk::pubkey::Pubkey::new_unique(); + Self { + // some stake instructions are behind feature gate, enable all + // feature gates to bench all instructions + feature_set: Arc::new(FeatureSet::all_enabled()), + stake_address, + stake_account: stake_account.clone(), + transaction_accounts: vec![(stake_address, stake_account)], + instruction_accounts: vec![AccountMeta { + pubkey: stake_address, + is_signer: false, + is_writable: true, + }], + } + } + + fn add_account(&mut self, id: Pubkey, account: AccountSharedData) { + self.transaction_accounts.push((id, account)); + self.instruction_accounts.push(AccountMeta { + pubkey: id, + is_signer: false, + is_writable: true, + }); + } + + fn add_account_signer(&mut self, id: Pubkey, account: AccountSharedData) { + self.transaction_accounts.push((id, account)); + self.instruction_accounts.push(AccountMeta { + pubkey: id, + is_signer: true, + is_writable: true, + }); + } + + fn initialize_stake_account(&mut self) { + let initialized_stake_account = AccountSharedData::new_data_with_space( + ACCOUNT_BALANCE, + &StakeStateV2::Initialized(Meta::auto(&self.stake_address)), + StakeStateV2::size_of(), + &solana_stake_program::id(), + ) + .unwrap(); + + self.stake_account = initialized_stake_account.clone(); + self.transaction_accounts[0] = (self.stake_address, initialized_stake_account); + // also make stake address a signer + self.instruction_accounts[0] = AccountMeta { + pubkey: self.stake_address, + is_signer: true, + is_writable: true, + }; + } + + fn initialize_stake_account_with_seed(&mut self, seed: &str, authorized_owner: &Pubkey) { + self.stake_address = + Pubkey::create_with_seed(authorized_owner, seed, authorized_owner).unwrap(); + self.initialize_stake_account(); + } + + // config withdraw authority, returns authorized withdrwer's pubkey + fn config_withdraw_authority(&mut self) -> Pubkey { + let withdraw_authority_address = Pubkey::new_unique(); + + let instruction = instruction::authorize( + &self.stake_address, + &self.stake_address, + &withdraw_authority_address, + StakeAuthorize::Withdrawer, + None, + ); + + let transaction_accounts = vec![ + (self.stake_address, self.stake_account.clone()), + ( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ), + (withdraw_authority_address, AccountSharedData::default()), + ]; + + let accounts = mock_process_instruction( + &solana_stake_program::id(), + Vec::new(), + &instruction.data, + transaction_accounts, + instruction.accounts.clone(), + Ok(()), + stake_instruction::Entrypoint::vm, + |invoke_context| { + invoke_context.mock_set_feature_set(Arc::clone(&self.feature_set)); + }, + |_invoke_context| {}, + ); + // update stake account + self.transaction_accounts[0] = (self.stake_address, accounts[0].clone()); + + withdraw_authority_address + } + + fn delegate_stake(&mut self) { + let vote_address = Pubkey::new_unique(); + + let instruction = + instruction::delegate_stake(&self.stake_address, &self.stake_address, &vote_address); + + let transaction_accounts = vec![ + (self.stake_address, self.stake_account.clone()), + ( + vote_address, + vote_state::create_account(&vote_address, &Pubkey::new_unique(), 0, 100), + ), + ( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ), + ( + stake_history::id(), + create_account_shared_data_for_test(&StakeHistory::default()), + ), + ]; + + let accounts = mock_process_instruction( + &solana_stake_program::id(), + Vec::new(), + &instruction.data, + transaction_accounts, + instruction.accounts.clone(), + Ok(()), + stake_instruction::Entrypoint::vm, + |invoke_context| { + invoke_context.mock_set_feature_set(Arc::clone(&self.feature_set)); + }, + |_invoke_context| {}, + ); + self.stake_account = accounts[0].clone(); + self.stake_account.set_lamports(ACCOUNT_BALANCE * 2); + self.transaction_accounts[0] = (self.stake_address, self.stake_account.clone()); + } + + fn run(&self, instruction_data: &[u8]) { + mock_process_instruction( + &solana_stake_program::id(), + Vec::new(), + instruction_data, + self.transaction_accounts.clone(), + self.instruction_accounts.clone(), + Ok(()), //expected_result, + stake_instruction::Entrypoint::vm, + |invoke_context| { + invoke_context.mock_set_feature_set(Arc::clone(&self.feature_set)); + }, + |_invoke_context| {}, + ); + } +} + +fn bench_initialize(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.add_account( + solana_sdk::sysvar::rent::id(), + create_account_shared_data_for_test(&Rent::default()), + ); + + let instruction_data = serialize(&StakeInstruction::Initialize( + Authorized::auto(&test_setup.stake_address), + Lockup::default(), + )) + .unwrap(); + c.bench_function("initialize", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_initialize_checked(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.add_account( + solana_sdk::sysvar::rent::id(), + create_account_shared_data_for_test(&Rent::default()), + ); + // add staker account + test_setup.add_account(Pubkey::new_unique(), AccountSharedData::default()); + // add withdrawer account + test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); + + let instruction_data = serialize(&StakeInstruction::InitializeChecked).unwrap(); + + c.bench_function("initialize_checked", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_authorize_staker(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account(); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + let authority_address = Pubkey::new_unique(); + test_setup.add_account(authority_address, AccountSharedData::default()); + + let instruction_data = serialize(&StakeInstruction::Authorize( + authority_address, + StakeAuthorize::Staker, + )) + .unwrap(); + + c.bench_function("authorize_staker", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_authorize_withdrawer(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account(); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + // add authority address + let authority_address = Pubkey::new_unique(); + test_setup.add_account(authority_address, AccountSharedData::default()); + + let instruction_data = serialize(&StakeInstruction::Authorize( + authority_address, + StakeAuthorize::Withdrawer, + )) + .unwrap(); + + c.bench_function("authorize_withdrawer", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_authorize_staker_with_seed(c: &mut Criterion) { + let seed = "test test"; + let authorize_address = Pubkey::new_unique(); + + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account_with_seed(seed, &authorize_address); + test_setup.add_account_signer(authorize_address, AccountSharedData::default()); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + + let instruction_data = serialize(&StakeInstruction::AuthorizeWithSeed( + AuthorizeWithSeedArgs { + new_authorized_pubkey: Pubkey::new_unique(), + stake_authorize: StakeAuthorize::Staker, + authority_seed: seed.to_string(), + authority_owner: authorize_address, + }, + )) + .unwrap(); + + c.bench_function("authorize_staker_with_seed", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_authorize_withdrawer_with_seed(c: &mut Criterion) { + let seed = "test test"; + let authorize_address = Pubkey::new_unique(); + + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account_with_seed(seed, &authorize_address); + test_setup.add_account_signer(authorize_address, AccountSharedData::default()); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + + let instruction_data = serialize(&StakeInstruction::AuthorizeWithSeed( + AuthorizeWithSeedArgs { + new_authorized_pubkey: Pubkey::new_unique(), + stake_authorize: StakeAuthorize::Withdrawer, + authority_seed: seed.to_string(), + authority_owner: authorize_address, + }, + )) + .unwrap(); + + c.bench_function("authorize_withdrawer_with_seed", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_authorize_staker_checked(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account(); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + // add authorized address as signer + test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); + // add staker account as signer + test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); + + let instruction_data = + serialize(&StakeInstruction::AuthorizeChecked(StakeAuthorize::Staker)).unwrap(); + + c.bench_function("authorize_staker_checked", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_authorize_withdrawer_checked(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account(); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + // add authorized address as signer + test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); + // add staker account as signer + test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); + + let instruction_data = serialize(&StakeInstruction::AuthorizeChecked( + StakeAuthorize::Withdrawer, + )) + .unwrap(); + + c.bench_function("authorize_withdrawer_checked", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_authorize_staker_checked_with_seed(c: &mut Criterion) { + let seed = "test test"; + let authorize_address = Pubkey::new_unique(); + + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account_with_seed(seed, &authorize_address); + // add authorized address as signer + test_setup.add_account_signer(authorize_address, AccountSharedData::default()); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + // add new authorize account as signer + test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); + + let instruction_data = serialize(&StakeInstruction::AuthorizeCheckedWithSeed( + AuthorizeCheckedWithSeedArgs { + stake_authorize: StakeAuthorize::Staker, + authority_seed: seed.to_string(), + authority_owner: authorize_address, + }, + )) + .unwrap(); + + c.bench_function("authorize_staker_checked_with_seed", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_authorize_withdrawer_checked_with_seed(c: &mut Criterion) { + let seed = "test test"; + let authorize_address = Pubkey::new_unique(); + + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account_with_seed(seed, &authorize_address); + // add authorized address as signer + test_setup.add_account_signer(authorize_address, AccountSharedData::default()); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + // add new authorize account as signer + test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); + + let instruction_data = serialize(&StakeInstruction::AuthorizeCheckedWithSeed( + AuthorizeCheckedWithSeedArgs { + stake_authorize: StakeAuthorize::Withdrawer, + authority_seed: seed.to_string(), + authority_owner: authorize_address, + }, + )) + .unwrap(); + + c.bench_function("authorize_withdrawer_checked_with_seed", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_set_lockup(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account(); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + + let instruction_data = serialize(&StakeInstruction::SetLockup(LockupArgs { + unix_timestamp: None, + epoch: Some(1), + custodian: None, + })) + .unwrap(); + + c.bench_function("set_lockup", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_set_lockup_checked(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account(); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + + let instruction_data = serialize(&StakeInstruction::SetLockupChecked(LockupCheckedArgs { + unix_timestamp: None, + epoch: Some(1), + })) + .unwrap(); + + c.bench_function("set_lockup_checked", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_withdraw(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account(); + let withdraw_authority_address = test_setup.config_withdraw_authority(); + + // withdraw to pubkey + test_setup.add_account(Pubkey::new_unique(), AccountSharedData::default()); + // clock + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + // stake history + test_setup.add_account( + stake_history::id(), + create_account_shared_data_for_test(&StakeHistory::default()), + ); + // withdrawer pubkey + test_setup.add_account_signer(withdraw_authority_address, AccountSharedData::default()); + + let instruction_data = serialize(&StakeInstruction::Withdraw(1)).unwrap(); + + c.bench_function("withdraw", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_delegate_stake(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account(); + + let vote_address = Pubkey::new_unique(); + let vote_account = vote_state::create_account(&vote_address, &Pubkey::new_unique(), 0, 100); + test_setup.add_account(vote_address, vote_account); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + test_setup.add_account( + stake_history::id(), + create_account_shared_data_for_test(&StakeHistory::default()), + ); + // dummy config account to pass check + test_setup.add_account(Pubkey::new_unique(), AccountSharedData::default()); + let instruction_data = serialize(&StakeInstruction::DelegateStake).unwrap(); + + c.bench_function("delegate_stake", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_deactivate(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account(); + test_setup.delegate_stake(); + + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + + let instruction_data = serialize(&StakeInstruction::Deactivate).unwrap(); + + c.bench_function("deactivate", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_split(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account(); + + let split_to_address = Pubkey::new_unique(); + let split_to_account = AccountSharedData::new_data_with_space( + 0, + &StakeStateV2::Uninitialized, + StakeStateV2::size_of(), + &solana_stake_program::id(), + ) + .unwrap(); + + test_setup.add_account(split_to_address, split_to_account); + test_setup.add_account( + rent::id(), + create_account_shared_data_for_test(&Rent { + lamports_per_byte_year: 0, + ..Rent::default() + }), + ); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + test_setup.add_account( + stake_history::id(), + create_account_shared_data_for_test(&StakeHistory::default()), + ); + + let instruction_data = serialize(&StakeInstruction::Split(1)).unwrap(); + + c.bench_function("split", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_merge(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account(); + + let merge_from_address = Pubkey::new_unique(); + // merge from account has same authority as stake account for simplicity, + // it also has lamports 0 to avoid `ArithmeticOverflow` to current stake account + let merge_from_account = AccountSharedData::new_data_with_space( + 1, + &StakeStateV2::Initialized(Meta::auto(&test_setup.stake_address)), + StakeStateV2::size_of(), + &solana_stake_program::id(), + ) + .unwrap(); + + test_setup.add_account(merge_from_address, merge_from_account); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock::default()), + ); + test_setup.add_account( + stake_history::id(), + create_account_shared_data_for_test(&StakeHistory::default()), + ); + + let instruction_data = serialize(&StakeInstruction::Merge).unwrap(); + + c.bench_function("merge", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_get_minimum_delegation(c: &mut Criterion) { + let test_setup = TestSetup::new(); + let instruction_data = serialize(&StakeInstruction::GetMinimumDelegation).unwrap(); + + c.bench_function("get_minimum_delegation", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_deactivate_delinquent(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + + // reference vote account has been consistently voting + let mut vote_state = VoteState::default(); + for epoch in 0..=solana_sdk::stake::MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION { + vote_state.increment_credits(epoch as Epoch, 1); + } + let reference_vote_address = Pubkey::new_unique(); + let reference_vote_account = AccountSharedData::new_data_with_space( + 1, + &VoteStateVersions::new_current(vote_state), + VoteState::size_of(), + &solana_vote_program::id(), + ) + .unwrap(); + + let vote_address = Pubkey::new_unique(); + let vote_account = vote_state::create_account(&vote_address, &Pubkey::new_unique(), 0, 100); + test_setup.stake_account = AccountSharedData::new_data_with_space( + 1, + &StakeStateV2::Stake( + Meta::default(), + Stake { + delegation: Delegation::new(&vote_address, 1, 1), + credits_observed: VoteState::default().credits(), + }, + StakeFlags::empty(), + ), + StakeStateV2::size_of(), + &solana_stake_program::id(), + ) + .unwrap(); + test_setup.transaction_accounts[0] = + (test_setup.stake_address, test_setup.stake_account.clone()); + + test_setup.add_account(vote_address, vote_account); + test_setup.add_account(reference_vote_address, reference_vote_account); + test_setup.add_account( + clock::id(), + create_account_shared_data_for_test(&Clock { + epoch: solana_sdk::stake::MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION as u64, + ..Clock::default() + }), + ); + + let instruction_data = serialize(&StakeInstruction::DeactivateDelinquent).unwrap(); + + c.bench_function("deactivate_delinquent", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_move_stake(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account(); + test_setup.delegate_stake(); + + let destination_stake_address = Pubkey::new_unique(); + let destination_stake_account = test_setup.transaction_accounts[0].1.clone(); + test_setup.add_account(destination_stake_address, destination_stake_account); + test_setup.add_account_signer(test_setup.stake_address, AccountSharedData::default()); + test_setup.add_account( + clock::id(), + // advance epoch to fully activate source account + create_account_shared_data_for_test(&Clock { + epoch: 1_u64, + ..Clock::default() + }), + ); + test_setup.add_account( + stake_history::id(), + create_account_shared_data_for_test(&StakeHistory::default()), + ); + + let instruction_data = serialize(&StakeInstruction::MoveStake(1)).unwrap(); + + c.bench_function("move_stake", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +fn bench_move_lamports(c: &mut Criterion) { + let mut test_setup = TestSetup::new(); + test_setup.initialize_stake_account(); + test_setup.delegate_stake(); + + let destination_stake_address = Pubkey::new_unique(); + let destination_stake_account = test_setup.transaction_accounts[0].1.clone(); + test_setup.add_account(destination_stake_address, destination_stake_account); + test_setup.add_account_signer(test_setup.stake_address, AccountSharedData::default()); + test_setup.add_account( + clock::id(), + // advance epoch to fully activate source account + create_account_shared_data_for_test(&Clock { + epoch: 1_u64, + ..Clock::default() + }), + ); + test_setup.add_account( + stake_history::id(), + create_account_shared_data_for_test(&StakeHistory::default()), + ); + + let instruction_data = serialize(&StakeInstruction::MoveLamports(1)).unwrap(); + + c.bench_function("move_lamports", |bencher| { + bencher.iter(|| test_setup.run(black_box(&instruction_data))) + }); +} + +criterion_group!( + benches, + bench_initialize, + bench_initialize_checked, + bench_authorize_staker, + bench_authorize_withdrawer, + bench_authorize_staker_with_seed, + bench_authorize_withdrawer_with_seed, + bench_authorize_staker_checked, + bench_authorize_withdrawer_checked, + bench_authorize_staker_checked_with_seed, + bench_authorize_withdrawer_checked_with_seed, + bench_set_lockup, + bench_set_lockup_checked, + bench_withdraw, + bench_delegate_stake, + bench_deactivate, + bench_split, + bench_merge, + bench_get_minimum_delegation, + bench_deactivate_delinquent, + bench_move_stake, + bench_move_lamports, +); +criterion_main!(benches); diff --git a/programs/zk-elgamal-proof/Cargo.toml b/programs/zk-elgamal-proof/Cargo.toml index e3dbcde0d5b18a..78f8acf2da6ec8 100644 --- a/programs/zk-elgamal-proof/Cargo.toml +++ b/programs/zk-elgamal-proof/Cargo.toml @@ -12,7 +12,8 @@ edition = { workspace = true } bytemuck = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } +solana-instruction = { workspace = true, features = ["std"] } solana-log-collector = { workspace = true } solana-program-runtime = { workspace = true } -solana-sdk = { workspace = true } +solana-sdk-ids = { workspace = true } solana-zk-sdk = { workspace = true } diff --git a/programs/zk-elgamal-proof/src/lib.rs b/programs/zk-elgamal-proof/src/lib.rs index 2516cbbadf0d08..519d3d54ffd0ce 100644 --- a/programs/zk-elgamal-proof/src/lib.rs +++ b/programs/zk-elgamal-proof/src/lib.rs @@ -2,9 +2,10 @@ use { bytemuck::Pod, + solana_instruction::error::InstructionError, solana_log_collector::ic_msg, solana_program_runtime::{declare_process_instruction, invoke_context::InvokeContext}, - solana_sdk::{instruction::InstructionError, system_program}, + solana_sdk_ids::system_program, solana_zk_sdk::zk_elgamal_proof_program::{ id, instruction::ProofInstruction, diff --git a/programs/zk-token-proof-tests/Cargo.toml b/programs/zk-token-proof-tests/Cargo.toml index a00c98b20e4d2b..27a8192caa845c 100644 --- a/programs/zk-token-proof-tests/Cargo.toml +++ b/programs/zk-token-proof-tests/Cargo.toml @@ -10,7 +10,15 @@ edition = { workspace = true } [dev-dependencies] bytemuck = { workspace = true } curve25519-dalek = { workspace = true } +solana-account = { workspace = true } solana-compute-budget = { workspace = true } +solana-compute-budget-interface = { workspace = true } +solana-instruction = { workspace = true } +solana-keypair = { workspace = true } solana-program-test = { workspace = true } -solana-sdk = { workspace = true } +solana-pubkey = { workspace = true } +solana-signer = { workspace = true } +solana-system-interface = { workspace = true } +solana-transaction = { workspace = true } +solana-transaction-error = { workspace = true } solana-zk-token-sdk = { workspace = true } diff --git a/programs/zk-token-proof-tests/tests/process_transaction.rs b/programs/zk-token-proof-tests/tests/process_transaction.rs index 71d1761d2222e0..8e22f9fa339e75 100644 --- a/programs/zk-token-proof-tests/tests/process_transaction.rs +++ b/programs/zk-token-proof-tests/tests/process_transaction.rs @@ -1,16 +1,15 @@ use { bytemuck::{bytes_of, Pod}, curve25519_dalek::scalar::Scalar, + solana_account::Account, + solana_instruction::error::InstructionError, + solana_keypair::Keypair, solana_program_test::*, - solana_sdk::{ - account::Account, - instruction::InstructionError, - pubkey::Pubkey, - signature::Signer, - signer::keypair::Keypair, - system_instruction, - transaction::{Transaction, TransactionError}, - }, + solana_pubkey::Pubkey, + solana_signer::Signer, + solana_system_interface::instruction as system_instruction, + solana_transaction::Transaction, + solana_transaction_error::TransactionError, solana_zk_token_sdk::{ encryption::{ elgamal::{ElGamalKeypair, ElGamalSecretKey}, @@ -1713,10 +1712,10 @@ trait WithMaxComputeUnitLimit { fn with_max_compute_unit_limit(self) -> Self; } -impl WithMaxComputeUnitLimit for Vec { +impl WithMaxComputeUnitLimit for Vec { fn with_max_compute_unit_limit(mut self) -> Self { self.push( - solana_sdk::compute_budget::ComputeBudgetInstruction::set_compute_unit_limit( + solana_compute_budget_interface::ComputeBudgetInstruction::set_compute_unit_limit( solana_compute_budget::compute_budget_limits::MAX_COMPUTE_UNIT_LIMIT, ), ); diff --git a/programs/zk-token-proof/Cargo.toml b/programs/zk-token-proof/Cargo.toml index 2b8ce98f2ac2f3..50a49896dc83fb 100644 --- a/programs/zk-token-proof/Cargo.toml +++ b/programs/zk-token-proof/Cargo.toml @@ -13,9 +13,10 @@ bytemuck = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } solana-feature-set = { workspace = true } +solana-instruction = { workspace = true } solana-log-collector = { workspace = true } solana-program-runtime = { workspace = true } -solana-sdk = { workspace = true } +solana-sdk-ids = { workspace = true } solana-zk-token-sdk = { workspace = true } [dev-dependencies] diff --git a/programs/zk-token-proof/src/lib.rs b/programs/zk-token-proof/src/lib.rs index 15d292b29a4ef4..70a7e8e593f722 100644 --- a/programs/zk-token-proof/src/lib.rs +++ b/programs/zk-token-proof/src/lib.rs @@ -3,12 +3,10 @@ use { bytemuck::Pod, solana_feature_set as feature_set, + solana_instruction::{error::InstructionError, TRANSACTION_LEVEL_STACK_HEIGHT}, solana_log_collector::ic_msg, solana_program_runtime::{declare_process_instruction, invoke_context::InvokeContext}, - solana_sdk::{ - instruction::{InstructionError, TRANSACTION_LEVEL_STACK_HEIGHT}, - system_program, - }, + solana_sdk_ids::system_program, solana_zk_token_sdk::{ zk_token_proof_instruction::*, zk_token_proof_program::id, diff --git a/quic-client/src/nonblocking/quic_client.rs b/quic-client/src/nonblocking/quic_client.rs index 2c9020bf846835..d8775aae7b4d32 100644 --- a/quic-client/src/nonblocking/quic_client.rs +++ b/quic-client/src/nonblocking/quic_client.rs @@ -23,7 +23,9 @@ use { }, solana_rpc_client_api::client_error::ErrorKind as ClientErrorKind, solana_streamer::nonblocking::quic::ALPN_TPU_PROTOCOL_ID, - solana_tls_utils::{new_dummy_x509_certificate, QuicClientCertificate, SkipServerVerification}, + solana_tls_utils::{ + new_dummy_x509_certificate, tls_client_config_builder, QuicClientCertificate, + }, solana_transaction_error::TransportResult, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, @@ -85,9 +87,7 @@ impl QuicLazyInitializedEndpoint { QuicNewConnection::create_endpoint(EndpointConfig::default(), client_socket) }; - let mut crypto = rustls::ClientConfig::builder() - .dangerous() - .with_custom_certificate_verifier(SkipServerVerification::new()) + let mut crypto = tls_client_config_builder() .with_client_auth_cert( vec![self.client_certificate.certificate.clone()], self.client_certificate.key.clone_key(), diff --git a/rpc/src/cache_block_meta_service.rs b/rpc/src/cache_block_meta_service.rs index 5d0dbefa7802a2..192b766e9f4a83 100644 --- a/rpc/src/cache_block_meta_service.rs +++ b/rpc/src/cache_block_meta_service.rs @@ -1,8 +1,10 @@ +//! The `CacheBlockMetaService` is responsible for persisting block metadata +//! from banks into the `Blockstore` + pub use solana_ledger::blockstore_processor::CacheBlockMetaSender; use { crossbeam_channel::{Receiver, RecvTimeoutError}, - solana_ledger::blockstore::Blockstore, - solana_measure::measure::Measure, + solana_ledger::blockstore::{Blockstore, BlockstoreError}, solana_runtime::bank::Bank, std::{ sync::{ @@ -20,8 +22,6 @@ pub struct CacheBlockMetaService { thread_hdl: JoinHandle<()>, } -const CACHE_BLOCK_TIME_WARNING_MS: u64 = 150; - impl CacheBlockMetaService { pub fn new( cache_block_meta_receiver: CacheBlockMetaReceiver, @@ -30,40 +30,39 @@ impl CacheBlockMetaService { ) -> Self { let thread_hdl = Builder::new() .name("solCacheBlkTime".to_string()) - .spawn(move || loop { - if exit.load(Ordering::Relaxed) { - break; - } - let recv_result = cache_block_meta_receiver.recv_timeout(Duration::from_secs(1)); - match recv_result { - Err(RecvTimeoutError::Disconnected) => { + .spawn(move || { + info!("CacheBlockMetaService has started"); + loop { + if exit.load(Ordering::Relaxed) { break; } - Ok(bank) => { - let mut cache_block_meta_timer = Measure::start("cache_block_meta_timer"); - Self::cache_block_meta(&bank, &blockstore); - cache_block_meta_timer.stop(); - if cache_block_meta_timer.as_ms() > CACHE_BLOCK_TIME_WARNING_MS { - warn!( - "cache_block_meta operation took: {}ms", - cache_block_meta_timer.as_ms() - ); + + let bank = match cache_block_meta_receiver.recv_timeout(Duration::from_secs(1)) + { + Ok(bank) => bank, + Err(RecvTimeoutError::Timeout) => continue, + Err(err @ RecvTimeoutError::Disconnected) => { + info!("CacheBlockMetaService is stopping because: {err}"); + break; } + }; + + if let Err(err) = Self::cache_block_meta(&bank, &blockstore) { + error!("CacheBlockMetaService is stopping because: {err}"); + // Set the exit flag to allow other services to gracefully stop + exit.store(true, Ordering::Relaxed); + break; } - _ => {} } + info!("CacheBlockMetaService has stopped"); }) .unwrap(); Self { thread_hdl } } - fn cache_block_meta(bank: &Bank, blockstore: &Blockstore) { - if let Err(e) = blockstore.cache_block_time(bank.slot(), bank.clock().unix_timestamp) { - error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e); - } - if let Err(e) = blockstore.cache_block_height(bank.slot(), bank.block_height()) { - error!("cache_block_height failed: slot {:?} {:?}", bank.slot(), e); - } + fn cache_block_meta(bank: &Bank, blockstore: &Blockstore) -> Result<(), BlockstoreError> { + blockstore.cache_block_time(bank.slot(), bank.clock().unix_timestamp)?; + blockstore.cache_block_height(bank.slot(), bank.block_height()) } pub fn join(self) -> thread::Result<()> { diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index dfe856ee0b6a1a..baffa72b8e0788 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -8,7 +8,11 @@ use { base64::{prelude::BASE64_STANDARD, Engine}, bincode::{config::Options, serialize}, crossbeam_channel::{unbounded, Receiver, Sender}, - jsonrpc_core::{futures::future, types::error, BoxFuture, Error, Metadata, Result}, + jsonrpc_core::{ + futures::future::{self, FutureExt, OptionFuture}, + types::error, + BoxFuture, Error, Metadata, Result, + }, jsonrpc_derive::rpc, solana_account_decoder::{ encode_ui_account, @@ -18,7 +22,7 @@ use { }, solana_accounts_db::{ accounts::AccountAddressFilter, - accounts_index::{AccountIndex, AccountSecondaryIndexes, IndexKey, ScanConfig}, + accounts_index::{AccountIndex, AccountSecondaryIndexes, IndexKey, ScanConfig, ScanResult}, }, solana_client::connection_cache::Protocol, solana_entry::entry::Entry, @@ -55,7 +59,7 @@ use { bank_forks::BankForks, commitment::{BlockCommitmentArray, BlockCommitmentCache}, installed_scheduler_pool::BankWithScheduler, - non_circulating_supply::calculate_non_circulating_supply, + non_circulating_supply::{calculate_non_circulating_supply, NonCirculatingSupply}, prioritization_fee_cache::PrioritizationFeeCache, snapshot_config::SnapshotConfig, snapshot_utils, @@ -79,6 +83,7 @@ use { self, AddressLoader, MessageHash, SanitizedTransaction, TransactionError, VersionedTransaction, MAX_TX_ACCOUNT_LOCKS, }, + transaction_context::TransactionAccount, }, solana_send_transaction_service::send_transaction_service::TransactionInfo, solana_stake_program, @@ -112,6 +117,7 @@ use { }, time::Duration, }, + tokio::runtime::Runtime, }; #[cfg(test)] use { @@ -150,7 +156,7 @@ fn is_finalized( && (blockstore.is_root(slot) || bank.status_cache_ancestors().contains(&slot)) } -#[derive(Debug, Default, Clone)] +#[derive(Debug, Clone)] pub struct JsonRpcConfig { pub enable_rpc_transaction_history: bool, pub enable_extended_tx_metadata_storage: bool, @@ -161,6 +167,7 @@ pub struct JsonRpcConfig { pub max_multiple_accounts: Option, pub account_indexes: AccountSecondaryIndexes, pub rpc_threads: usize, + pub rpc_blocking_threads: usize, pub rpc_niceness_adj: i8, pub full_api: bool, pub rpc_scan_and_fix_roots: bool, @@ -169,6 +176,28 @@ pub struct JsonRpcConfig { pub disable_health_check: bool, } +impl Default for JsonRpcConfig { + fn default() -> Self { + Self { + enable_rpc_transaction_history: Default::default(), + enable_extended_tx_metadata_storage: Default::default(), + faucet_addr: Option::default(), + health_check_slot_distance: Default::default(), + skip_preflight_health_check: bool::default(), + rpc_bigtable_config: Option::default(), + max_multiple_accounts: Option::default(), + account_indexes: AccountSecondaryIndexes::default(), + rpc_threads: 1, + rpc_blocking_threads: 1, + rpc_niceness_adj: Default::default(), + full_api: Default::default(), + rpc_scan_and_fix_roots: Default::default(), + max_request_body_size: Option::default(), + disable_health_check: Default::default(), + } + } +} + impl JsonRpcConfig { pub fn default_for_test() -> Self { Self { @@ -223,6 +252,7 @@ pub struct JsonRpcRequestProcessor { max_complete_transaction_status_slot: Arc, max_complete_rewards_slot: Arc, prioritization_fee_cache: Arc, + runtime: Arc, } impl Metadata for JsonRpcRequestProcessor {} @@ -253,6 +283,51 @@ impl JsonRpcRequestProcessor { Ok(bank) } + async fn calculate_non_circulating_supply( + &self, + bank: &Arc, + ) -> ScanResult { + let bank = Arc::clone(bank); + self.runtime + .spawn_blocking(move || calculate_non_circulating_supply(&bank)) + .await + .expect("Failed to spawn blocking task") + } + + pub async fn get_filtered_indexed_accounts( + &self, + bank: &Arc, + index_key: &IndexKey, + program_id: &Pubkey, + filters: Vec, + sort_results: bool, + ) -> ScanResult> { + let bank = Arc::clone(bank); + let index_key = index_key.to_owned(); + let program_id = program_id.to_owned(); + self.runtime + .spawn_blocking(move || { + bank.get_filtered_indexed_accounts( + &index_key, + |account| { + // The program-id account index checks for Account owner on inclusion. + // However, due to the current AccountsDb implementation, an account may + // remain in storage as a zero-lamport AccountSharedData::Default() after + // being wiped and reinitialized in later updates. We include the redundant + // filters here to avoid returning these accounts. + account.owner().eq(&program_id) + && filters + .iter() + .all(|filter_type| filter_allows(filter_type, account)) + }, + &ScanConfig::new(!sort_results), + bank.byte_limit_for_scans(), + ) + }) + .await + .expect("Failed to spawn blocking task") + } + #[allow(deprecated)] fn bank(&self, commitment: Option) -> Arc { debug!("RPC commitment_config: {:?}", commitment); @@ -329,6 +404,7 @@ impl JsonRpcRequestProcessor { max_complete_transaction_status_slot: Arc, max_complete_rewards_slot: Arc, prioritization_fee_cache: Arc, + runtime: Arc, ) -> (Self, Receiver) { let (transaction_sender, transaction_receiver) = unbounded(); ( @@ -351,6 +427,7 @@ impl JsonRpcRequestProcessor { max_complete_transaction_status_slot, max_complete_rewards_slot, prioritization_fee_cache, + runtime, }, transaction_receiver, ) @@ -362,6 +439,8 @@ impl JsonRpcRequestProcessor { socket_addr_space: SocketAddrSpace, connection_cache: Arc, ) -> Self { + use crate::rpc_service::service_runtime; + let genesis_hash = bank.hash(); let bank_forks = BankForks::new_rw_arc(bank); let bank = bank_forks.read().unwrap().root_bank(); @@ -401,8 +480,15 @@ impl JsonRpcRequestProcessor { let slot = bank.slot(); let optimistically_confirmed_bank = Arc::new(RwLock::new(OptimisticallyConfirmedBank { bank })); + let config = JsonRpcConfig::default(); + let JsonRpcConfig { + rpc_threads, + rpc_blocking_threads, + rpc_niceness_adj, + .. + } = config; Self { - config: JsonRpcConfig::default(), + config, snapshot_config: None, bank_forks, block_commitment_cache: Arc::new(RwLock::new(BlockCommitmentCache::new( @@ -430,12 +516,13 @@ impl JsonRpcRequestProcessor { max_complete_transaction_status_slot: Arc::new(AtomicU64::default()), max_complete_rewards_slot: Arc::new(AtomicU64::default()), prioritization_fee_cache: Arc::new(PrioritizationFeeCache::default()), + runtime: service_runtime(rpc_threads, rpc_blocking_threads, rpc_niceness_adj), } } - pub fn get_account_info( + pub async fn get_account_info( &self, - pubkey: &Pubkey, + pubkey: Pubkey, config: Option, ) -> Result>> { let RpcAccountInfoConfig { @@ -450,11 +537,18 @@ impl JsonRpcRequestProcessor { })?; let encoding = encoding.unwrap_or(UiAccountEncoding::Binary); - let response = get_encoded_account(&bank, pubkey, encoding, data_slice, None)?; + let response = self + .runtime + .spawn_blocking({ + let bank = Arc::clone(&bank); + move || get_encoded_account(&bank, &pubkey, encoding, data_slice, None) + }) + .await + .expect("rpc: get_encoded_account panicked")?; Ok(new_response(&bank, response)) } - pub fn get_multiple_accounts( + pub async fn get_multiple_accounts( &self, pubkeys: Vec, config: Option, @@ -471,10 +565,18 @@ impl JsonRpcRequestProcessor { })?; let encoding = encoding.unwrap_or(UiAccountEncoding::Base64); - let accounts = pubkeys - .into_iter() - .map(|pubkey| get_encoded_account(&bank, &pubkey, encoding, data_slice, None)) - .collect::>>()?; + let mut accounts = Vec::with_capacity(pubkeys.len()); + for pubkey in pubkeys { + let bank = Arc::clone(&bank); + accounts.push( + self.runtime + .spawn_blocking(move || { + get_encoded_account(&bank, &pubkey, encoding, data_slice, None) + }) + .await + .expect("rpc: get_encoded_account panicked")?, + ); + } Ok(new_response(&bank, accounts)) } @@ -487,9 +589,9 @@ impl JsonRpcRequestProcessor { .get_minimum_balance_for_rent_exemption(data_len) } - pub fn get_program_accounts( + pub async fn get_program_accounts( &self, - program_id: &Pubkey, + program_id: Pubkey, config: Option, mut filters: Vec, with_context: bool, @@ -508,30 +610,38 @@ impl JsonRpcRequestProcessor { let encoding = encoding.unwrap_or(UiAccountEncoding::Binary); optimize_filters(&mut filters); let keyed_accounts = { - if let Some(owner) = get_spl_token_owner_filter(program_id, &filters) { + if let Some(owner) = get_spl_token_owner_filter(&program_id, &filters) { self.get_filtered_spl_token_accounts_by_owner( - &bank, + Arc::clone(&bank), program_id, - &owner, + owner, filters, sort_results, - )? - } else if let Some(mint) = get_spl_token_mint_filter(program_id, &filters) { + ) + .await? + } else if let Some(mint) = get_spl_token_mint_filter(&program_id, &filters) { self.get_filtered_spl_token_accounts_by_mint( - &bank, + Arc::clone(&bank), program_id, - &mint, + mint, filters, sort_results, - )? + ) + .await? } else { - self.get_filtered_program_accounts(&bank, program_id, filters, sort_results)? + self.get_filtered_program_accounts( + Arc::clone(&bank), + program_id, + filters, + sort_results, + ) + .await? } }; - let accounts = if is_known_spl_token_id(program_id) + let accounts = if is_known_spl_token_id(&program_id) && encoding == UiAccountEncoding::JsonParsed { - get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() + get_parsed_token_accounts(Arc::clone(&bank), keyed_accounts.into_iter()).collect() } else { keyed_accounts .into_iter() @@ -925,7 +1035,7 @@ impl JsonRpcRequestProcessor { largest_accounts_cache.set_largest_accounts(filter, slot, accounts) } - fn get_largest_accounts( + async fn get_largest_accounts( &self, config: Option, ) -> RpcCustomResult>> { @@ -940,11 +1050,11 @@ impl JsonRpcRequestProcessor { }) } else { let (addresses, address_filter) = if let Some(filter) = config.clone().filter { - let non_circulating_supply = - calculate_non_circulating_supply(&bank).map_err(|e| { - RpcCustomError::ScanError { - message: e.to_string(), - } + let non_circulating_supply = self + .calculate_non_circulating_supply(&bank) + .await + .map_err(|e| RpcCustomError::ScanError { + message: e.to_string(), })?; let addresses = non_circulating_supply.accounts.into_iter().collect(); let address_filter = match filter { @@ -955,13 +1065,21 @@ impl JsonRpcRequestProcessor { } else { (HashSet::new(), AccountAddressFilter::Exclude) }; - let accounts = bank - .get_largest_accounts( - NUM_LARGEST_ACCOUNTS, - &addresses, - address_filter, - sort_results, - ) + let accounts = self + .runtime + .spawn_blocking({ + let bank = Arc::clone(&bank); + move || { + bank.get_largest_accounts( + NUM_LARGEST_ACCOUNTS, + &addresses, + address_filter, + sort_results, + ) + } + }) + .await + .expect("Failed to spawn blocking task") .map_err(|e| RpcCustomError::ScanError { message: e.to_string(), })? @@ -977,16 +1095,18 @@ impl JsonRpcRequestProcessor { } } - fn get_supply( + async fn get_supply( &self, config: Option, ) -> RpcCustomResult> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let non_circulating_supply = - calculate_non_circulating_supply(&bank).map_err(|e| RpcCustomError::ScanError { - message: e.to_string(), - })?; + self.calculate_non_circulating_supply(&bank) + .await + .map_err(|e| RpcCustomError::ScanError { + message: e.to_string(), + })?; let total_supply = bank.capitalization(); let non_circulating_accounts = if config.exclude_non_circulating_accounts_list { vec![] @@ -1191,42 +1311,65 @@ impl JsonRpcRequestProcessor { .highest_super_majority_root() { self.check_blockstore_writes_complete(slot)?; - let result = self.blockstore.get_rooted_block(slot, true); + let result = self + .runtime + .spawn_blocking({ + let blockstore = Arc::clone(&self.blockstore); + move || blockstore.get_rooted_block(slot, true) + }) + .await + .expect("Failed to spawn blocking task"); self.check_blockstore_root(&result, slot)?; - let encode_block = |confirmed_block: ConfirmedBlock| -> Result { - let mut encoded_block = confirmed_block - .encode_with_options(encoding, encoding_options) - .map_err(RpcCustomError::from)?; + let encode_block = |confirmed_block: ConfirmedBlock| async move { + let mut encoded_block = self + .runtime + .spawn_blocking(move || { + confirmed_block + .encode_with_options(encoding, encoding_options) + .map_err(RpcCustomError::from) + }) + .await + .expect("Failed to spawn blocking task")?; if slot == 0 { encoded_block.block_time = Some(self.genesis_creation_time()); encoded_block.block_height = Some(0); } - Ok(encoded_block) + Ok::(encoded_block) }; if result.is_err() { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_result = bigtable_ledger_storage.get_confirmed_block(slot).await; self.check_bigtable_result(&bigtable_result)?; - return bigtable_result.ok().map(encode_block).transpose(); + let encoded_block_future: OptionFuture<_> = + bigtable_result.ok().map(encode_block).into(); + return encoded_block_future.await.transpose(); } } self.check_slot_cleaned_up(&result, slot)?; - return result + let encoded_block_future: OptionFuture<_> = result .ok() .map(ConfirmedBlock::from) .map(encode_block) - .transpose(); + .into(); + return encoded_block_future.await.transpose(); } else if commitment.is_confirmed() { // Check if block is confirmed let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); if confirmed_bank.status_cache_ancestors().contains(&slot) { self.check_blockstore_writes_complete(slot)?; - let result = self.blockstore.get_complete_block(slot, true); - return result + let result = self + .runtime + .spawn_blocking({ + let blockstore = Arc::clone(&self.blockstore); + move || blockstore.get_complete_block(slot, true) + }) + .await + .expect("Failed to spawn blocking task"); + let encoded_block_future: OptionFuture<_> = result .ok() .map(ConfirmedBlock::from) - .map(|mut confirmed_block| -> Result { + .map(|mut confirmed_block| async move { if confirmed_block.block_time.is_none() || confirmed_block.block_height.is_none() { @@ -1241,12 +1384,20 @@ impl JsonRpcRequestProcessor { } } } - - Ok(confirmed_block - .encode_with_options(encoding, encoding_options) - .map_err(RpcCustomError::from)?) + let encoded_block = self + .runtime + .spawn_blocking(move || { + confirmed_block + .encode_with_options(encoding, encoding_options) + .map_err(RpcCustomError::from) + }) + .await + .expect("Failed to spawn blocking task")?; + + Ok(encoded_block) }) - .transpose(); + .into(); + return encoded_block_future.await.transpose(); } } } else { @@ -1604,13 +1755,22 @@ impl JsonRpcRequestProcessor { if self.config.enable_rpc_transaction_history { let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); - let confirmed_transaction = if commitment.is_confirmed() { - let highest_confirmed_slot = confirmed_bank.slot(); - self.blockstore - .get_complete_transaction(signature, highest_confirmed_slot) - } else { - self.blockstore.get_rooted_transaction(signature) - }; + let confirmed_transaction = self + .runtime + .spawn_blocking({ + let blockstore = Arc::clone(&self.blockstore); + let confirmed_bank = Arc::clone(&confirmed_bank); + move || { + if commitment.is_confirmed() { + let highest_confirmed_slot = confirmed_bank.slot(); + blockstore.get_complete_transaction(signature, highest_confirmed_slot) + } else { + blockstore.get_rooted_transaction(signature) + } + } + }) + .await + .expect("Failed to spawn blocking task"); let encode_transaction = |confirmed_tx_with_meta: ConfirmedTransactionWithStatusMeta| -> Result { @@ -1886,13 +2046,13 @@ impl JsonRpcRequestProcessor { Ok(new_response(&bank, supply)) } - pub fn get_token_largest_accounts( + pub async fn get_token_largest_accounts( &self, - mint: &Pubkey, + mint: Pubkey, commitment: Option, ) -> Result>> { let bank = self.bank(commitment); - let (mint_owner, data) = get_mint_owner_and_additional_data(&bank, mint)?; + let (mint_owner, data) = get_mint_owner_and_additional_data(&bank, &mint)?; if !is_known_spl_token_id(&mint_owner) { return Err(Error::invalid_params( "Invalid param: not a Token mint".to_string(), @@ -1901,8 +2061,15 @@ impl JsonRpcRequestProcessor { let mut token_balances = BinaryHeap::>::with_capacity(NUM_LARGEST_ACCOUNTS); - for (address, account) in - self.get_filtered_spl_token_accounts_by_mint(&bank, &mint_owner, mint, vec![], true)? + for (address, account) in self + .get_filtered_spl_token_accounts_by_mint( + Arc::clone(&bank), + mint_owner, + mint, + vec![], + true, + ) + .await? { let amount = StateWithExtensions::::unpack(account.data()) .map(|account| account.base.amount) @@ -1935,9 +2102,9 @@ impl JsonRpcRequestProcessor { Ok(new_response(&bank, token_balances)) } - pub fn get_token_accounts_by_owner( + pub async fn get_token_accounts_by_owner( &self, - owner: &Pubkey, + owner: Pubkey, token_account_filter: TokenAccountsFilter, config: Option, sort_results: bool, @@ -1964,13 +2131,15 @@ impl JsonRpcRequestProcessor { ))); } - let keyed_accounts = self.get_filtered_spl_token_accounts_by_owner( - &bank, - &token_program_id, - owner, - filters, - sort_results, - )?; + let keyed_accounts = self + .get_filtered_spl_token_accounts_by_owner( + Arc::clone(&bank), + token_program_id, + owner, + filters, + sort_results, + ) + .await?; let accounts = if encoding == UiAccountEncoding::JsonParsed { get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() } else { @@ -1987,9 +2156,9 @@ impl JsonRpcRequestProcessor { Ok(new_response(&bank, accounts)) } - pub fn get_token_accounts_by_delegate( + pub async fn get_token_accounts_by_delegate( &self, - delegate: &Pubkey, + delegate: Pubkey, token_account_filter: TokenAccountsFilter, config: Option, sort_results: bool, @@ -2019,16 +2188,23 @@ impl JsonRpcRequestProcessor { // Optional filter on Mint address, uses mint account index for scan let keyed_accounts = if let Some(mint) = mint { self.get_filtered_spl_token_accounts_by_mint( - &bank, - &token_program_id, - &mint, + Arc::clone(&bank), + token_program_id, + mint, filters, sort_results, - )? + ) + .await? } else { // Filter on Token Account state filters.push(RpcFilterType::TokenAccountState); - self.get_filtered_program_accounts(&bank, &token_program_id, filters, sort_results)? + self.get_filtered_program_accounts( + Arc::clone(&bank), + token_program_id, + filters, + sort_results, + ) + .await? }; let accounts = if encoding == UiAccountEncoding::JsonParsed { get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() @@ -2047,66 +2223,63 @@ impl JsonRpcRequestProcessor { } /// Use a set of filters to get an iterator of keyed program accounts from a bank - fn get_filtered_program_accounts( + async fn get_filtered_program_accounts( &self, - bank: &Bank, - program_id: &Pubkey, + bank: Arc, + program_id: Pubkey, mut filters: Vec, sort_results: bool, ) -> RpcCustomResult> { optimize_filters(&mut filters); - let filter_closure = |account: &AccountSharedData| { - filters - .iter() - .all(|filter_type| filter_allows(filter_type, account)) - }; if self .config .account_indexes .contains(&AccountIndex::ProgramId) { - if !self.config.account_indexes.include_key(program_id) { + if !self.config.account_indexes.include_key(&program_id) { return Err(RpcCustomError::KeyExcludedFromSecondaryIndex { index_key: program_id.to_string(), }); } - Ok(bank - .get_filtered_indexed_accounts( - &IndexKey::ProgramId(*program_id), - |account| { - // The program-id account index checks for Account owner on inclusion. However, due - // to the current AccountsDb implementation, an account may remain in storage as a - // zero-lamport AccountSharedData::Default() after being wiped and reinitialized in later - // updates. We include the redundant filters here to avoid returning these - // accounts. - account.owner() == program_id && filter_closure(account) - }, - &ScanConfig::new(!sort_results), - bank.byte_limit_for_scans(), - ) - .map_err(|e| RpcCustomError::ScanError { - message: e.to_string(), - })?) + self.get_filtered_indexed_accounts( + &bank, + &IndexKey::ProgramId(program_id), + &program_id, + filters, + sort_results, + ) + .await + .map_err(|e| RpcCustomError::ScanError { + message: e.to_string(), + }) } else { // this path does not need to provide a mb limit because we only want to support secondary indexes - Ok(bank - .get_filtered_program_accounts( - program_id, - filter_closure, - &ScanConfig::new(!sort_results), - ) - .map_err(|e| RpcCustomError::ScanError { - message: e.to_string(), - })?) + self.runtime + .spawn_blocking(move || { + bank.get_filtered_program_accounts( + &program_id, + |account: &AccountSharedData| { + filters + .iter() + .all(|filter_type| filter_allows(filter_type, account)) + }, + &ScanConfig::new(!sort_results), + ) + .map_err(|e| RpcCustomError::ScanError { + message: e.to_string(), + }) + }) + .await + .expect("Failed to spawn blocking task") } } /// Get an iterator of spl-token accounts by owner address - fn get_filtered_spl_token_accounts_by_owner( + async fn get_filtered_spl_token_accounts_by_owner( &self, - bank: &Bank, - program_id: &Pubkey, - owner_key: &Pubkey, + bank: Arc, + program_id: Pubkey, + owner_key: Pubkey, mut filters: Vec, sort_results: bool, ) -> RpcCustomResult> { @@ -2128,37 +2301,34 @@ impl JsonRpcRequestProcessor { .account_indexes .contains(&AccountIndex::SplTokenOwner) { - if !self.config.account_indexes.include_key(owner_key) { + if !self.config.account_indexes.include_key(&owner_key) { return Err(RpcCustomError::KeyExcludedFromSecondaryIndex { index_key: owner_key.to_string(), }); } - Ok(bank - .get_filtered_indexed_accounts( - &IndexKey::SplTokenOwner(*owner_key), - |account| { - account.owner() == program_id - && filters - .iter() - .all(|filter_type| filter_allows(filter_type, account)) - }, - &ScanConfig::new(!sort_results), - bank.byte_limit_for_scans(), - ) - .map_err(|e| RpcCustomError::ScanError { - message: e.to_string(), - })?) + self.get_filtered_indexed_accounts( + &bank, + &IndexKey::SplTokenOwner(owner_key), + &program_id, + filters, + sort_results, + ) + .await + .map_err(|e| RpcCustomError::ScanError { + message: e.to_string(), + }) } else { self.get_filtered_program_accounts(bank, program_id, filters, sort_results) + .await } } /// Get an iterator of spl-token accounts by mint address - fn get_filtered_spl_token_accounts_by_mint( + async fn get_filtered_spl_token_accounts_by_mint( &self, - bank: &Bank, - program_id: &Pubkey, - mint_key: &Pubkey, + bank: Arc, + program_id: Pubkey, + mint_key: Pubkey, mut filters: Vec, sort_results: bool, ) -> RpcCustomResult> { @@ -2179,28 +2349,25 @@ impl JsonRpcRequestProcessor { .account_indexes .contains(&AccountIndex::SplTokenMint) { - if !self.config.account_indexes.include_key(mint_key) { + if !self.config.account_indexes.include_key(&mint_key) { return Err(RpcCustomError::KeyExcludedFromSecondaryIndex { index_key: mint_key.to_string(), }); } - Ok(bank - .get_filtered_indexed_accounts( - &IndexKey::SplTokenMint(*mint_key), - |account| { - account.owner() == program_id - && filters - .iter() - .all(|filter_type| filter_allows(filter_type, account)) - }, - &ScanConfig::new(!sort_results), - bank.byte_limit_for_scans(), - ) - .map_err(|e| RpcCustomError::ScanError { - message: e.to_string(), - })?) + self.get_filtered_indexed_accounts( + &bank, + &IndexKey::SplTokenMint(mint_key), + &program_id, + filters, + sort_results, + ) + .await + .map_err(|e| RpcCustomError::ScanError { + message: e.to_string(), + }) } else { self.get_filtered_program_accounts(bank, program_id, filters, sort_results) + .await } } @@ -3020,7 +3187,7 @@ pub mod rpc_accounts { meta: Self::Metadata, pubkey_str: String, config: Option, - ) -> Result>>; + ) -> BoxFuture>>>; #[rpc(meta, name = "getMultipleAccounts")] fn get_multiple_accounts( @@ -3028,7 +3195,7 @@ pub mod rpc_accounts { meta: Self::Metadata, pubkey_strs: Vec, config: Option, - ) -> Result>>>; + ) -> BoxFuture>>>>; #[rpc(meta, name = "getBlockCommitment")] fn get_block_commitment( @@ -3067,10 +3234,13 @@ pub mod rpc_accounts { meta: Self::Metadata, pubkey_str: String, config: Option, - ) -> Result>> { + ) -> BoxFuture>>> { debug!("get_account_info rpc request received: {:?}", pubkey_str); - let pubkey = verify_pubkey(&pubkey_str)?; - meta.get_account_info(&pubkey, config) + async move { + let pubkey = verify_pubkey(&pubkey_str)?; + meta.get_account_info(pubkey, config).await + } + .boxed() } fn get_multiple_accounts( @@ -3078,26 +3248,28 @@ pub mod rpc_accounts { meta: Self::Metadata, pubkey_strs: Vec, config: Option, - ) -> Result>>> { + ) -> BoxFuture>>>> { debug!( "get_multiple_accounts rpc request received: {:?}", pubkey_strs.len() ); - - let max_multiple_accounts = meta - .config - .max_multiple_accounts - .unwrap_or(MAX_MULTIPLE_ACCOUNTS); - if pubkey_strs.len() > max_multiple_accounts { - return Err(Error::invalid_params(format!( - "Too many inputs provided; max {max_multiple_accounts}" - ))); + async move { + let max_multiple_accounts = meta + .config + .max_multiple_accounts + .unwrap_or(MAX_MULTIPLE_ACCOUNTS); + if pubkey_strs.len() > max_multiple_accounts { + return Err(Error::invalid_params(format!( + "Too many inputs provided; max {max_multiple_accounts}" + ))); + } + let pubkeys = pubkey_strs + .into_iter() + .map(|pubkey_str| verify_pubkey(&pubkey_str)) + .collect::>>()?; + meta.get_multiple_accounts(pubkeys, config).await } - let pubkeys = pubkey_strs - .into_iter() - .map(|pubkey_str| verify_pubkey(&pubkey_str)) - .collect::>>()?; - meta.get_multiple_accounts(pubkeys, config) + .boxed() } fn get_block_commitment( @@ -3151,21 +3323,21 @@ pub mod rpc_accounts_scan { meta: Self::Metadata, program_id_str: String, config: Option, - ) -> Result>>; + ) -> BoxFuture>>>; #[rpc(meta, name = "getLargestAccounts")] fn get_largest_accounts( &self, meta: Self::Metadata, config: Option, - ) -> Result>>; + ) -> BoxFuture>>>; #[rpc(meta, name = "getSupply")] fn get_supply( &self, meta: Self::Metadata, config: Option, - ) -> Result>; + ) -> BoxFuture>>; // SPL Token-specific RPC endpoints // See https://github.com/solana-labs/solana-program-library/releases/tag/token-v2.0.0 for @@ -3177,7 +3349,7 @@ pub mod rpc_accounts_scan { meta: Self::Metadata, mint_str: String, commitment: Option, - ) -> Result>>; + ) -> BoxFuture>>>; #[rpc(meta, name = "getTokenAccountsByOwner")] fn get_token_accounts_by_owner( @@ -3186,7 +3358,7 @@ pub mod rpc_accounts_scan { owner_str: String, token_account_filter: RpcTokenAccountsFilter, config: Option, - ) -> Result>>; + ) -> BoxFuture>>>; #[rpc(meta, name = "getTokenAccountsByDelegate")] fn get_token_accounts_by_delegate( @@ -3195,7 +3367,7 @@ pub mod rpc_accounts_scan { delegate_str: String, token_account_filter: RpcTokenAccountsFilter, config: Option, - ) -> Result>>; + ) -> BoxFuture>>>; } pub struct AccountsScanImpl; @@ -3207,49 +3379,53 @@ pub mod rpc_accounts_scan { meta: Self::Metadata, program_id_str: String, config: Option, - ) -> Result>> { + ) -> BoxFuture>>> { debug!( "get_program_accounts rpc request received: {:?}", program_id_str ); - let program_id = verify_pubkey(&program_id_str)?; - let (config, filters, with_context, sort_results) = if let Some(config) = config { - ( - Some(config.account_config), - config.filters.unwrap_or_default(), - config.with_context.unwrap_or_default(), - config.sort_results.unwrap_or(true), - ) - } else { - (None, vec![], false, true) - }; - if filters.len() > MAX_GET_PROGRAM_ACCOUNT_FILTERS { - return Err(Error::invalid_params(format!( - "Too many filters provided; max {MAX_GET_PROGRAM_ACCOUNT_FILTERS}" - ))); - } - for filter in &filters { - verify_filter(filter)?; + async move { + let program_id = verify_pubkey(&program_id_str)?; + let (config, filters, with_context, sort_results) = if let Some(config) = config { + ( + Some(config.account_config), + config.filters.unwrap_or_default(), + config.with_context.unwrap_or_default(), + config.sort_results.unwrap_or(true), + ) + } else { + (None, vec![], false, true) + }; + if filters.len() > MAX_GET_PROGRAM_ACCOUNT_FILTERS { + return Err(Error::invalid_params(format!( + "Too many filters provided; max {MAX_GET_PROGRAM_ACCOUNT_FILTERS}" + ))); + } + for filter in &filters { + verify_filter(filter)?; + } + meta.get_program_accounts(program_id, config, filters, with_context, sort_results) + .await } - meta.get_program_accounts(&program_id, config, filters, with_context, sort_results) + .boxed() } fn get_largest_accounts( &self, meta: Self::Metadata, config: Option, - ) -> Result>> { + ) -> BoxFuture>>> { debug!("get_largest_accounts rpc request received"); - Ok(meta.get_largest_accounts(config)?) + async move { Ok(meta.get_largest_accounts(config).await?) }.boxed() } fn get_supply( &self, meta: Self::Metadata, config: Option, - ) -> Result> { + ) -> BoxFuture>> { debug!("get_supply rpc request received"); - Ok(meta.get_supply(config)?) + async move { Ok(meta.get_supply(config).await?) }.boxed() } fn get_token_largest_accounts( @@ -3257,13 +3433,16 @@ pub mod rpc_accounts_scan { meta: Self::Metadata, mint_str: String, commitment: Option, - ) -> Result>> { + ) -> BoxFuture>>> { debug!( "get_token_largest_accounts rpc request received: {:?}", mint_str ); - let mint = verify_pubkey(&mint_str)?; - meta.get_token_largest_accounts(&mint, commitment) + async move { + let mint = verify_pubkey(&mint_str)?; + meta.get_token_largest_accounts(mint, commitment).await + } + .boxed() } fn get_token_accounts_by_owner( @@ -3272,14 +3451,18 @@ pub mod rpc_accounts_scan { owner_str: String, token_account_filter: RpcTokenAccountsFilter, config: Option, - ) -> Result>> { + ) -> BoxFuture>>> { debug!( "get_token_accounts_by_owner rpc request received: {:?}", owner_str ); - let owner = verify_pubkey(&owner_str)?; - let token_account_filter = verify_token_account_filter(token_account_filter)?; - meta.get_token_accounts_by_owner(&owner, token_account_filter, config, true) + async move { + let owner = verify_pubkey(&owner_str)?; + let token_account_filter = verify_token_account_filter(token_account_filter)?; + meta.get_token_accounts_by_owner(owner, token_account_filter, config, true) + .await + } + .boxed() } fn get_token_accounts_by_delegate( @@ -3288,14 +3471,18 @@ pub mod rpc_accounts_scan { delegate_str: String, token_account_filter: RpcTokenAccountsFilter, config: Option, - ) -> Result>> { + ) -> BoxFuture>>> { debug!( "get_token_accounts_by_delegate rpc request received: {:?}", delegate_str ); - let delegate = verify_pubkey(&delegate_str)?; - let token_account_filter = verify_token_account_filter(token_account_filter)?; - meta.get_token_accounts_by_delegate(&delegate, token_account_filter, config, true) + async move { + let delegate = verify_pubkey(&delegate_str)?; + let token_account_filter = verify_token_account_filter(token_account_filter)?; + meta.get_token_accounts_by_delegate(delegate, token_account_filter, config, true) + .await + } + .boxed() } } } @@ -4335,6 +4522,7 @@ pub mod tests { optimistically_confirmed_bank_tracker::{ BankNotification, OptimisticallyConfirmedBankTracker, }, + rpc_service::service_runtime, rpc_subscriptions::RpcSubscriptions, }, bincode::deserialize, @@ -4513,6 +4701,12 @@ pub mod tests { let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let JsonRpcConfig { + rpc_threads, + rpc_blocking_threads, + rpc_niceness_adj, + .. + } = config; let meta = JsonRpcRequestProcessor::new( config, None, @@ -4531,6 +4725,7 @@ pub mod tests { max_complete_transaction_status_slot.clone(), max_complete_rewards_slot, Arc::new(PrioritizationFeeCache::default()), + service_runtime(rpc_threads, rpc_blocking_threads, rpc_niceness_adj), ) .0; @@ -6479,8 +6674,15 @@ pub mod tests { .my_contact_info() .tpu(connection_cache.protocol()) .unwrap(); + let config = JsonRpcConfig::default(); + let JsonRpcConfig { + rpc_threads, + rpc_blocking_threads, + rpc_niceness_adj, + .. + } = config; let (meta, receiver) = JsonRpcRequestProcessor::new( - JsonRpcConfig::default(), + config, None, bank_forks.clone(), block_commitment_cache, @@ -6497,6 +6699,7 @@ pub mod tests { Arc::new(AtomicU64::default()), Arc::new(AtomicU64::default()), Arc::new(PrioritizationFeeCache::default()), + service_runtime(rpc_threads, rpc_blocking_threads, rpc_niceness_adj), ); let client = ConnectionCacheClient::::new( connection_cache.clone(), @@ -6751,8 +6954,15 @@ pub mod tests { .unwrap(); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let config = JsonRpcConfig::default(); + let JsonRpcConfig { + rpc_threads, + rpc_blocking_threads, + rpc_niceness_adj, + .. + } = config; let (request_processor, receiver) = JsonRpcRequestProcessor::new( - JsonRpcConfig::default(), + config, None, bank_forks.clone(), block_commitment_cache, @@ -6769,6 +6979,7 @@ pub mod tests { Arc::new(AtomicU64::default()), Arc::new(AtomicU64::default()), Arc::new(PrioritizationFeeCache::default()), + service_runtime(rpc_threads, rpc_blocking_threads, rpc_niceness_adj), ); let client = ConnectionCacheClient::::new( connection_cache.clone(), @@ -8400,8 +8611,15 @@ pub mod tests { optimistically_confirmed_bank.clone(), )); + let config = JsonRpcConfig::default(); + let JsonRpcConfig { + rpc_threads, + rpc_blocking_threads, + rpc_niceness_adj, + .. + } = config; let (meta, _receiver) = JsonRpcRequestProcessor::new( - JsonRpcConfig::default(), + config, None, bank_forks.clone(), block_commitment_cache, @@ -8418,6 +8636,7 @@ pub mod tests { max_complete_transaction_status_slot, max_complete_rewards_slot, Arc::new(PrioritizationFeeCache::default()), + service_runtime(rpc_threads, rpc_blocking_threads, rpc_niceness_adj), ); let mut io = MetaIoHandler::default(); diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index 2c582be72ea5a6..78dab869ecaee6 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -362,6 +362,7 @@ impl JsonRpcService { info!("rpc bound to {:?}", rpc_addr); info!("rpc configuration: {:?}", config); let rpc_threads = 1.max(config.rpc_threads); + let rpc_blocking_threads = 1.max(config.rpc_blocking_threads); let rpc_niceness_adj = config.rpc_niceness_adj; let health = Arc::new(RpcHealth::new( @@ -381,21 +382,7 @@ impl JsonRpcService { .tpu(connection_cache.protocol()) .map_err(|err| format!("{err}"))?; - // sadly, some parts of our current rpc implemention block the jsonrpc's - // _socket-listening_ event loop for too long, due to (blocking) long IO or intesive CPU, - // causing no further processing of incoming requests and ultimatily innocent clients timing-out. - // So create a (shared) multi-threaded event_loop for jsonrpc and set its .threads() to 1, - // so that we avoid the single-threaded event loops from being created automatically by - // jsonrpc for threads when .threads(N > 1) is given. - let runtime = Arc::new( - tokio::runtime::Builder::new_multi_thread() - .worker_threads(rpc_threads) - .on_thread_start(move || renice_this_thread(rpc_niceness_adj).unwrap()) - .thread_name("solRpcEl") - .enable_all() - .build() - .expect("Runtime"), - ); + let runtime = service_runtime(rpc_threads, rpc_blocking_threads, rpc_niceness_adj); let exit_bigtable_ledger_upload_service = Arc::new(AtomicBool::new(false)); @@ -473,6 +460,7 @@ impl JsonRpcService { max_complete_transaction_status_slot, max_complete_rewards_slot, prioritization_fee_cache, + Arc::clone(&runtime), ); let leader_info = @@ -586,6 +574,40 @@ impl JsonRpcService { } } +pub fn service_runtime( + rpc_threads: usize, + rpc_blocking_threads: usize, + rpc_niceness_adj: i8, +) -> Arc { + // The jsonrpc_http_server crate supports two execution models: + // + // - By default, it spawns a number of threads - configured with .threads(N) - and runs a + // single-threaded futures executor in each thread. + // - Alternatively when configured with .event_loop_executor(executor) and .threads(1), + // it executes all the tasks on the given executor, not spawning any extra internal threads. + // + // We use the latter configuration, using a multi threaded tokio runtime as the executor. We + // do this so we can configure the number of worker threads, the number of blocking threads + // and then use tokio::task::spawn_blocking() to avoid blocking the worker threads on CPU + // bound operations like getMultipleAccounts. This results in reduced latency, since fast + // rpc calls (the majority) are not blocked by slow CPU bound ones. + // + // NB: `rpc_blocking_threads` shouldn't be set too high (defaults to num_cpus / 2). Too many + // (busy) blocking threads could compete with CPU time with other validator threads and + // negatively impact performance. + let runtime = Arc::new( + tokio::runtime::Builder::new_multi_thread() + .worker_threads(rpc_threads) + .max_blocking_threads(rpc_blocking_threads) + .on_thread_start(move || renice_this_thread(rpc_niceness_adj).unwrap()) + .thread_name("solRpcEl") + .enable_all() + .build() + .expect("Runtime"), + ); + runtime +} + #[cfg(test)] mod tests { use { diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 10db11e0b09937..0b19397559f794 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -59,7 +59,7 @@ use { verify_precompiles::verify_precompiles, }, accounts_lt_hash::{CacheValue as AccountsLtHashCacheValue, Stats as AccountsLtHashStats}, - ahash::{AHashMap, AHashSet}, + ahash::AHashSet, byteorder::{ByteOrder, LittleEndian}, dashmap::{DashMap, DashSet}, log::*, @@ -97,7 +97,7 @@ use { solana_builtins::{prototype::BuiltinPrototype, BUILTINS, STATELESS_BUILTINS}, solana_compute_budget::compute_budget::ComputeBudget, solana_compute_budget_instruction::instructions_processor::process_compute_budget_instructions, - solana_cost_model::cost_tracker::CostTracker, + solana_cost_model::{block_cost_limits::simd_0207_block_limits, cost_tracker::CostTracker}, solana_feature_set::{ self as feature_set, remove_rounding_in_fee_calculation, reward_full_priority_fee, FeatureSet, @@ -940,7 +940,7 @@ pub struct Bank { /// /// Note: The initial state must be strictly from an ancestor, /// and not an intermediate state within this slot. - cache_for_accounts_lt_hash: RwLock>, + cache_for_accounts_lt_hash: DashMap, /// Stats related to the accounts lt hash stats_for_accounts_lt_hash: AccountsLtHashStats, @@ -1164,7 +1164,7 @@ impl Bank { #[cfg(feature = "dev-context-only-utils")] hash_overrides: Arc::new(Mutex::new(HashOverrides::default())), accounts_lt_hash: Mutex::new(AccountsLtHash(LtHash::identity())), - cache_for_accounts_lt_hash: RwLock::new(AHashMap::new()), + cache_for_accounts_lt_hash: DashMap::default(), stats_for_accounts_lt_hash: AccountsLtHashStats::default(), block_id: RwLock::new(None), bank_hash_stats: AtomicBankHashStats::default(), @@ -1420,7 +1420,7 @@ impl Bank { #[cfg(feature = "dev-context-only-utils")] hash_overrides: parent.hash_overrides.clone(), accounts_lt_hash: Mutex::new(parent.accounts_lt_hash.lock().unwrap().clone()), - cache_for_accounts_lt_hash: RwLock::new(AHashMap::new()), + cache_for_accounts_lt_hash: DashMap::default(), stats_for_accounts_lt_hash: AccountsLtHashStats::default(), block_id: RwLock::new(None), bank_hash_stats: AtomicBankHashStats::default(), @@ -1493,11 +1493,8 @@ impl Bank { let accounts_modified_this_slot = new.rc.accounts.accounts_db.get_pubkeys_for_slot(slot); let num_accounts_modified_this_slot = accounts_modified_this_slot.len(); - let cache_for_accounts_lt_hash = - new.cache_for_accounts_lt_hash.get_mut().unwrap(); - cache_for_accounts_lt_hash.reserve(num_accounts_modified_this_slot); for pubkey in accounts_modified_this_slot { - cache_for_accounts_lt_hash + new.cache_for_accounts_lt_hash .entry(pubkey) .or_insert(AccountsLtHashCacheValue::BankNew); } @@ -1833,7 +1830,7 @@ impl Bank { #[cfg(feature = "dev-context-only-utils")] hash_overrides: Arc::new(Mutex::new(HashOverrides::default())), accounts_lt_hash: Mutex::new(AccountsLtHash(LtHash([0xBAD1; LtHash::NUM_ELEMENTS]))), - cache_for_accounts_lt_hash: RwLock::new(AHashMap::new()), + cache_for_accounts_lt_hash: DashMap::default(), stats_for_accounts_lt_hash: AccountsLtHashStats::default(), block_id: RwLock::new(None), bank_hash_stats: AtomicBankHashStats::new(&fields.bank_hash_stats), @@ -3954,7 +3951,7 @@ impl Bank { processed_counts.processed_with_successful_result_count += 1; } Err(err) => { - if *err_count == 0 { + if err_count.0 == 0 { debug!("tx error: {:?} {:?}", err, tx); } *err_count += 1; @@ -5301,6 +5298,22 @@ impl Bank { debug_do_not_add_builtins, ); + // Cost-Tracker is not serialized in snapshot or any configs. + // We must apply previously activated features related to limits here + // so that the initial bank state is consistent with the feature set. + // Cost-tracker limits are propagated through children banks. + if self + .feature_set + .is_active(&feature_set::raise_block_limits_to_50m::id()) + { + let (account_cost_limit, block_cost_limit, vote_cost_limit) = simd_0207_block_limits(); + self.write_cost_tracker().unwrap().set_limits( + account_cost_limit, + block_cost_limit, + vote_cost_limit, + ); + } + if !debug_do_not_add_builtins { for builtin in BUILTINS .iter() @@ -6917,6 +6930,15 @@ impl Bank { ); } } + + if new_feature_activations.contains(&feature_set::raise_block_limits_to_50m::id()) { + let (account_cost_limit, block_cost_limit, vote_cost_limit) = simd_0207_block_limits(); + self.write_cost_tracker().unwrap().set_limits( + account_cost_limit, + block_cost_limit, + vote_cost_limit, + ); + } } fn apply_updated_hashes_per_tick(&mut self, hashes_per_tick: u64) { diff --git a/runtime/src/bank/accounts_lt_hash.rs b/runtime/src/bank/accounts_lt_hash.rs index e68264e6327e7f..4e1f3741458b0e 100644 --- a/runtime/src/bank/accounts_lt_hash.rs +++ b/runtime/src/bank/accounts_lt_hash.rs @@ -93,7 +93,7 @@ impl Bank { // And since `strictly_ancestors` is empty, loading the previous version of the account // from accounts db will return `None` (aka Dead), which is the correct behavior. assert!(strictly_ancestors.is_empty()); - self.cache_for_accounts_lt_hash.write().unwrap().clear(); + self.cache_for_accounts_lt_hash.clear(); } // Get all the accounts stored in this slot. @@ -133,7 +133,6 @@ impl Bank { // And a single page is likely the smallest size a disk read will actually read. // This can be tuned larger, but likely not smaller. const CHUNK_SIZE: usize = 128; - let cache_for_accounts_lt_hash = self.cache_for_accounts_lt_hash.read().unwrap(); accounts_curr .par_iter() .fold_chunks( @@ -142,9 +141,13 @@ impl Bank { |mut accum, (pubkey, curr_account)| { // load the initial state of the account let (initial_state_of_account, measure_load) = meas_dur!({ - match cache_for_accounts_lt_hash.get(pubkey) { + let cache_value = self + .cache_for_accounts_lt_hash + .get(pubkey) + .map(|entry| entry.value().clone()); + match cache_value { Some(CacheValue::InspectAccount(initial_state_of_account)) => { - initial_state_of_account.clone() + initial_state_of_account } Some(CacheValue::BankNew) | None => { accum.1.num_cache_misses += 1; @@ -313,17 +316,11 @@ impl Bank { // Only insert the account the *first* time we see it. // We want to capture the value of the account *before* any modifications during this slot. - let (is_in_cache, lookup_time) = meas_dur!({ - self.cache_for_accounts_lt_hash - .read() - .unwrap() - .contains_key(address) - }); + let (is_in_cache, lookup_time) = + meas_dur!(self.cache_for_accounts_lt_hash.contains_key(address)); if !is_in_cache { let (_, insert_time) = meas_dur!({ self.cache_for_accounts_lt_hash - .write() - .unwrap() .entry(*address) .or_insert_with(|| { let initial_state_of_account = match account_state { @@ -680,7 +677,7 @@ mod tests { assert!(bank.is_accounts_lt_hash_enabled()); // the cache should start off empty - assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 0); + assert_eq!(bank.cache_for_accounts_lt_hash.len(), 0); // ensure non-writable accounts are *not* added to the cache bank.inspect_account_for_accounts_lt_hash( @@ -693,30 +690,25 @@ mod tests { &AccountState::Alive(&AccountSharedData::default()), false, ); - assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 0); + assert_eq!(bank.cache_for_accounts_lt_hash.len(), 0); // ensure *new* accounts are added to the cache let address = Pubkey::new_unique(); bank.inspect_account_for_accounts_lt_hash(&address, &AccountState::Dead, true); - assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 1); - assert!(bank - .cache_for_accounts_lt_hash - .read() - .unwrap() - .contains_key(&address)); + assert_eq!(bank.cache_for_accounts_lt_hash.len(), 1); + assert!(bank.cache_for_accounts_lt_hash.contains_key(&address)); // ensure *existing* accounts are added to the cache let address = Pubkey::new_unique(); let initial_lamports = 123; let mut account = AccountSharedData::new(initial_lamports, 0, &Pubkey::default()); bank.inspect_account_for_accounts_lt_hash(&address, &AccountState::Alive(&account), true); - assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 2); + assert_eq!(bank.cache_for_accounts_lt_hash.len(), 2); if let CacheValue::InspectAccount(InitialStateOfAccount::Alive(cached_account)) = bank .cache_for_accounts_lt_hash - .read() - .unwrap() .get(&address) .unwrap() + .value() { assert_eq!(*cached_account, account); } else { @@ -727,13 +719,12 @@ mod tests { let updated_lamports = account.lamports() + 1; account.set_lamports(updated_lamports); bank.inspect_account_for_accounts_lt_hash(&address, &AccountState::Alive(&account), true); - assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 2); + assert_eq!(bank.cache_for_accounts_lt_hash.len(), 2); if let CacheValue::InspectAccount(InitialStateOfAccount::Alive(cached_account)) = bank .cache_for_accounts_lt_hash - .read() - .unwrap() .get(&address) .unwrap() + .value() { assert_eq!(cached_account.lamports(), initial_lamports); } else { @@ -744,13 +735,12 @@ mod tests { { let address = Pubkey::new_unique(); bank.inspect_account_for_accounts_lt_hash(&address, &AccountState::Dead, true); - assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 3); + assert_eq!(bank.cache_for_accounts_lt_hash.len(), 3); match bank .cache_for_accounts_lt_hash - .read() - .unwrap() .get(&address) .unwrap() + .value() { CacheValue::InspectAccount(InitialStateOfAccount::Dead) => { // this is expected, nothing to do here @@ -763,13 +753,12 @@ mod tests { &AccountState::Alive(&AccountSharedData::default()), true, ); - assert_eq!(bank.cache_for_accounts_lt_hash.read().unwrap().len(), 3); + assert_eq!(bank.cache_for_accounts_lt_hash.len(), 3); match bank .cache_for_accounts_lt_hash - .read() - .unwrap() .get(&address) .unwrap() + .value() { CacheValue::InspectAccount(InitialStateOfAccount::Dead) => { // this is expected, nothing to do here @@ -1049,10 +1038,8 @@ mod tests { ]; let mut actual_cache: Vec<_> = bank .cache_for_accounts_lt_hash - .read() - .unwrap() .iter() - .map(|(k, v)| (*k, v.clone())) + .map(|entry| (*entry.key(), entry.value().clone())) .collect(); actual_cache.sort_unstable_by(|a, b| a.0.cmp(&b.0)); assert_eq!(expected_cache, actual_cache.as_slice()); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 5f1f751865ebb6..2005cd5e23a4f1 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -37,6 +37,7 @@ use { compute_budget::ComputeBudget, compute_budget_limits::{self, ComputeBudgetLimits, MAX_COMPUTE_UNIT_LIMIT}, }, + solana_cost_model::block_cost_limits::{MAX_BLOCK_UNITS, MAX_BLOCK_UNITS_SIMD_0207}, solana_feature_set::{self as feature_set, FeatureSet}, solana_inline_spl::token, solana_logger, @@ -7999,6 +8000,73 @@ fn test_reserved_account_keys() { ); } +#[test] +fn test_block_limits() { + let (bank0, _bank_forks) = create_simple_test_arc_bank(100_000); + let mut bank = Bank::new_from_parent(bank0, &Pubkey::default(), 1); + assert!(!bank + .feature_set + .is_active(&feature_set::raise_block_limits_to_50m::id())); + assert_eq!( + bank.read_cost_tracker().unwrap().get_block_limit(), + MAX_BLOCK_UNITS, + "before activating the feature, bank should have old/default limit" + ); + + // Activate `raise_block_limits_to_50m` feature + bank.store_account( + &feature_set::raise_block_limits_to_50m::id(), + &feature::create_account(&Feature::default(), 42), + ); + // apply_feature_activations for `FinishInit` will not cause the block limit to be updated + bank.apply_feature_activations(ApplyFeatureActivationsCaller::FinishInit, true); + assert_eq!( + bank.read_cost_tracker().unwrap().get_block_limit(), + MAX_BLOCK_UNITS, + "before activating the feature, bank should have old/default limit" + ); + + // apply_feature_activations for `NewFromParent` will cause feature to be activated + bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, true); + assert_eq!( + bank.read_cost_tracker().unwrap().get_block_limit(), + MAX_BLOCK_UNITS_SIMD_0207, + "after activating the feature, bank should have new limit" + ); + + // Make sure the limits propagate to the child-bank. + let bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), 2); + assert_eq!( + bank.read_cost_tracker().unwrap().get_block_limit(), + MAX_BLOCK_UNITS_SIMD_0207, + "child bank should have new limit" + ); + + // Test starting from a genesis config with and without feature account + let (mut genesis_config, _keypair) = create_genesis_config(100_000); + // Without feature account in genesis, old limits are used. + let bank = Bank::new_for_tests(&genesis_config); + assert_eq!( + bank.read_cost_tracker().unwrap().get_block_limit(), + MAX_BLOCK_UNITS, + "before activating the feature, bank should have old/default limit" + ); + + activate_feature( + &mut genesis_config, + feature_set::raise_block_limits_to_50m::id(), + ); + let bank = Bank::new_for_tests(&genesis_config); + assert!(bank + .feature_set + .is_active(&feature_set::raise_block_limits_to_50m::id())); + assert_eq!( + bank.read_cost_tracker().unwrap().get_block_limit(), + MAX_BLOCK_UNITS_SIMD_0207, + "bank created from genesis config should have new limit" + ); +} + #[test] fn test_program_replacement() { let mut bank = create_simple_test_bank(0); @@ -12016,14 +12084,13 @@ fn test_feature_activation_loaded_programs_cache_preparation_phase() { solana_logger::setup(); // Bank Setup - let (mut genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - genesis_config - .accounts - .remove(&feature_set::disable_sbpf_v1_execution::id()); - genesis_config - .accounts - .remove(&feature_set::reenable_sbpf_v1_execution::id()); - let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let (genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let mut bank = Bank::new_for_tests(&genesis_config); + let mut feature_set = FeatureSet::all_enabled(); + feature_set.deactivate(&feature_set::disable_sbpf_v0_execution::id()); + feature_set.deactivate(&feature_set::reenable_sbpf_v0_execution::id()); + bank.feature_set = Arc::new(feature_set); + let (root_bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); // Program Setup let program_keypair = Keypair::new(); @@ -12056,7 +12123,7 @@ fn test_feature_activation_loaded_programs_cache_preparation_phase() { let feature_account_balance = std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1); bank.store_account( - &feature_set::disable_sbpf_v1_execution::id(), + &feature_set::disable_sbpf_v0_execution::id(), &feature::create_account(&Feature { activated_at: None }, feature_account_balance), ); @@ -12115,7 +12182,7 @@ fn test_feature_activation_loaded_programs_cache_preparation_phase() { result_with_feature_enabled, Err(TransactionError::InstructionError( 0, - InstructionError::InvalidAccountData + InstructionError::UnsupportedProgramId )) ); } @@ -12131,7 +12198,7 @@ fn test_feature_activation_loaded_programs_epoch_transition() { .remove(&feature_set::disable_fees_sysvar::id()); genesis_config .accounts - .remove(&feature_set::reenable_sbpf_v1_execution::id()); + .remove(&feature_set::reenable_sbpf_v0_execution::id()); let (root_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Program Setup diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 83c2e0ab3fd675..1627f8113021db 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -284,8 +284,8 @@ impl BankForks { self[self.highest_slot()].clone() } - pub fn working_bank_with_scheduler(&self) -> &BankWithScheduler { - &self.banks[&self.highest_slot()] + pub fn working_bank_with_scheduler(&self) -> BankWithScheduler { + self.banks[&self.highest_slot()].clone_with_scheduler() } /// Register to be notified when a bank has been dumped (due to duplicate block handling) diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index aa17bba0768db1..84d662e1422fde 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -49,6 +49,7 @@ full = [ "dep:solana-secp256k1-program", "dep:solana-seed-derivable", "dep:solana-seed-phrase", + "dep:solana-shred-version", "dep:solana-signer", "dep:solana-system-transaction", "dep:solana-transaction", @@ -180,6 +181,7 @@ solana-seed-phrase = { workspace = true, optional = true } solana-serde = { workspace = true } solana-serde-varint = { workspace = true } solana-short-vec = { workspace = true } +solana-shred-version = { workspace = true, optional = true } solana-signature = { workspace = true, features = [ "rand", "serde", @@ -199,6 +201,7 @@ solana-transaction-context = { workspace = true, features = ["bincode"] } solana-transaction-error = { workspace = true, features = [ "serde", ], optional = true } +solana-validator-exit = { workspace = true } thiserror = { workspace = true } [target.'cfg(target_arch = "wasm32")'.dependencies] diff --git a/sdk/cargo-build-sbf/src/main.rs b/sdk/cargo-build-sbf/src/main.rs index 421e17f74cb2dc..e3f9009d4d1091 100644 --- a/sdk/cargo-build-sbf/src/main.rs +++ b/sdk/cargo-build-sbf/src/main.rs @@ -34,6 +34,7 @@ struct Config<'a> { features: Vec, force_tools_install: bool, skip_tools_install: bool, + no_rustup_override: bool, generate_child_script_on_failure: bool, no_default_features: bool, offline: bool, @@ -63,6 +64,7 @@ impl Default for Config<'_> { features: vec![], force_tools_install: false, skip_tools_install: false, + no_rustup_override: false, generate_child_script_on_failure: false, no_default_features: false, offline: false, @@ -645,9 +647,6 @@ fn build_solana_package( } }); - let platform_tools_version = - validate_platform_tools_version(platform_tools_version, DEFAULT_PLATFORM_TOOLS_VERSION); - info!("Solana SDK: {}", config.sbf_sdk.display()); if config.no_default_features { info!("No default features"); @@ -665,6 +664,9 @@ fn build_solana_package( }; if !config.skip_tools_install { + let platform_tools_version = + validate_platform_tools_version(platform_tools_version, DEFAULT_PLATFORM_TOOLS_VERSION); + let platform_tools_download_file_name = if cfg!(target_os = "windows") { format!("platform-tools-windows-{arch}.tar.bz2") } else if cfg!(target_os = "macos") { @@ -700,7 +702,23 @@ fn build_solana_package( exit(1); }); } - link_solana_toolchain(config); + let target = "sbf-solana-solana"; + + if config.no_rustup_override { + check_solana_target_installed(target); + } else { + link_solana_toolchain(config); + // RUSTC variable overrides cargo + mechanism of + // selecting the rust compiler and makes cargo run a rust compiler + // other than the one linked in Solana toolchain. We have to prevent + // this by removing RUSTC from the child process environment. + if env::var("RUSTC").is_ok() { + warn!( + "Removed RUSTC from cargo environment, because it overrides +solana cargo command line option." + ); + env::remove_var("RUSTC") + } + } let llvm_bin = config .sbf_sdk @@ -713,16 +731,6 @@ fn build_solana_package( env::set_var("OBJDUMP", llvm_bin.join("llvm-objdump")); env::set_var("OBJCOPY", llvm_bin.join("llvm-objcopy")); - // RUSTC variable overrides cargo + mechanism of - // selecting the rust compiler and makes cargo run a rust compiler - // other than the one linked in Solana toolchain. We have to prevent - // this by removing RUSTC from the child process environment. - if env::var("RUSTC").is_ok() { - warn!( - "Removed RUSTC from cargo environment, because it overrides +solana cargo command line option." - ); - env::remove_var("RUSTC") - } let cargo_target = "CARGO_TARGET_SBF_SOLANA_SOLANA_RUSTFLAGS"; let rustflags = env::var("RUSTFLAGS").ok().unwrap_or_default(); if env::var("RUSTFLAGS").is_ok() { @@ -757,13 +765,12 @@ fn build_solana_package( } let cargo_build = PathBuf::from("cargo"); - let mut cargo_build_args = vec![ - "+solana", - "build", - "--release", - "--target", - "sbf-solana-solana", - ]; + let mut cargo_build_args = vec![]; + if !config.no_rustup_override { + cargo_build_args.push("+solana"); + }; + + cargo_build_args.append(&mut vec!["build", "--release", "--target", target]); if config.arch == "sbfv2" { cargo_build_args.push("-Zbuild-std=std,panic_abort"); } @@ -912,6 +919,17 @@ fn build_solana_package( } } +// allow user to set proper `rustc` into RUSTC or into PATH +fn check_solana_target_installed(target: &str) { + let rustc = env::var("RUSTC").unwrap_or("rustc".to_owned()); + let rustc = PathBuf::from(rustc); + let output = spawn(&rustc, ["--print", "target-list"], false); + if !output.contains(target) { + error!("Provided {:?} does not have {} target. The Solana rustc must be available in $PATH or the $RUSTC environment variable for the build to succeed.", rustc, target); + exit(1); + } +} + fn build_solana(config: Config, manifest_path: Option) { let mut metadata_command = cargo_metadata::MetadataCommand::new(); if let Some(manifest_path) = manifest_path { @@ -1048,6 +1066,13 @@ fn main() { .takes_value(false) .conflicts_with("force_tools_install") .help("Skip downloading and installing platform-tools, assuming they are properly mounted"), + ) + .arg( + Arg::new("no_rustup_override") + .long("no-rustup-override") + .takes_value(false) + .conflicts_with("force_tools_install") + .help("Do not use rustup to manage the toolchain. By default, cargo-build-sbf invokes rustup to find the Solana rustc using a `+solana` toolchain override. This flag disables that behavior."), ) .arg( Arg::new("generate_child_script_on_failure") @@ -1173,6 +1198,7 @@ fn main() { features: matches.values_of_t("features").ok().unwrap_or_default(), force_tools_install: matches.is_present("force_tools_install"), skip_tools_install: matches.is_present("skip_tools_install"), + no_rustup_override: matches.is_present("no_rustup_override"), generate_child_script_on_failure: matches.is_present("generate_child_script_on_failure"), no_default_features: matches.is_present("no_default_features"), remap_cwd: !matches.is_present("remap_cwd"), diff --git a/sdk/feature-set/src/lib.rs b/sdk/feature-set/src/lib.rs index 04fe40e5190bc3..5239edcaeb95f7 100644 --- a/sdk/feature-set/src/lib.rs +++ b/sdk/feature-set/src/lib.rs @@ -614,10 +614,10 @@ pub mod delay_visibility_of_program_deployment { } pub mod apply_cost_tracker_during_replay { - solana_pubkey::declare_id!("B7H2caeia4ZFcpE3QcgMqbiWiBtWrdBRBSJ1DY6Ktxbq"); + solana_pubkey::declare_id!("2ry7ygxiYURULZCrypHhveanvP5tzZ4toRwVp89oCNSj"); } pub mod bpf_account_data_direct_mapping { - solana_pubkey::declare_id!("EenyoWx9UMXYKpR8mW5Jmfmy2fRjzUtM7NduYMY8bx33"); + solana_pubkey::declare_id!("GJVDwRkUPNdk9QaK4VsU4g1N41QNxhy1hevjf8kz45Mq"); } pub mod add_set_tx_loaded_accounts_data_size_instruction { @@ -860,14 +860,26 @@ pub mod deprecate_legacy_vote_ixs { solana_pubkey::declare_id!("depVvnQ2UysGrhwdiwU42tCadZL8GcBb1i2GYhMopQv"); } -pub mod disable_sbpf_v1_execution { +pub mod disable_sbpf_v0_execution { solana_pubkey::declare_id!("TestFeature11111111111111111111111111111111"); } -pub mod reenable_sbpf_v1_execution { +pub mod reenable_sbpf_v0_execution { solana_pubkey::declare_id!("TestFeature21111111111111111111111111111111"); } +pub mod enable_sbpf_v1_deployment_and_execution { + solana_pubkey::declare_id!("JE86WkYvTrzW8HgNmrHY7dFYpCmSptUpKupbo2AdQ9cG"); +} + +pub mod enable_sbpf_v2_deployment_and_execution { + solana_pubkey::declare_id!("F6UVKh1ujTEFK3en2SyAL3cdVnqko1FVEXWhmdLRu6WP"); +} + +pub mod enable_sbpf_v3_deployment_and_execution { + solana_pubkey::declare_id!("C8XZNs1bfzaiT3YDeXZJ7G5swQWQv7tVzDnCxtHvnSpw"); +} + pub mod remove_accounts_executable_flag_checks { solana_pubkey::declare_id!("FfgtauHUWKeXTzjXkua9Px4tNGBFHKZ9WaigM5VbbzFx"); } @@ -881,7 +893,7 @@ pub mod disable_account_loader_special_case { } pub mod enable_secp256r1_precompile { - solana_pubkey::declare_id!("sr11RdZWgbHTHxSroPALe6zgaT5A1K9LcE4nfsZS4gi"); + solana_pubkey::declare_id!("sryYyFwxzJop1Bh9XpyiVWjZP4nfHExiqNp3Dh71W9i"); } pub mod accounts_lt_hash { @@ -892,10 +904,18 @@ pub mod migrate_stake_program_to_core_bpf { solana_pubkey::declare_id!("6M4oQ6eXneVhtLoiAr4yRYQY43eVLjrKbiDZDJc892yk"); } +pub mod deplete_cu_meter_on_vm_failure { + solana_pubkey::declare_id!("B7H2caeia4ZFcpE3QcgMqbiWiBtWrdBRBSJ1DY6Ktxbq"); +} + pub mod reserve_minimal_cus_for_builtin_instructions { solana_pubkey::declare_id!("C9oAhLxDBm3ssWtJx1yBGzPY55r2rArHmN1pbQn6HogH"); } +pub mod raise_block_limits_to_50m { + solana_pubkey::declare_id!("5oMCU3JPaFLr8Zr4ct7yFA7jdk6Mw1RmB8K4u9ZbS42z"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: AHashMap = [ @@ -1106,15 +1126,20 @@ lazy_static! { (enable_turbine_extended_fanout_experiments::id(), "enable turbine extended fanout experiments #"), (deprecate_legacy_vote_ixs::id(), "Deprecate legacy vote instructions"), (partitioned_epoch_rewards_superfeature::id(), "replaces enable_partitioned_epoch_reward to enable partitioned rewards at epoch boundary SIMD-0118"), - (disable_sbpf_v1_execution::id(), "Disables execution of SBPFv1 programs"), - (reenable_sbpf_v1_execution::id(), "Re-enables execution of SBPFv1 programs"), + (disable_sbpf_v0_execution::id(), "Disables execution of SBPFv1 programs SIMD-0161"), + (reenable_sbpf_v0_execution::id(), "Re-enables execution of SBPFv1 programs"), + (enable_sbpf_v1_deployment_and_execution::id(), "Enables deployment and execution of SBPFv1 programs SIMD-0161"), + (enable_sbpf_v2_deployment_and_execution::id(), "Enables deployment and execution of SBPFv2 programs SIMD-0161"), + (enable_sbpf_v3_deployment_and_execution::id(), "Enables deployment and execution of SBPFv3 programs SIMD-0161"), (remove_accounts_executable_flag_checks::id(), "Remove checks of accounts is_executable flag SIMD-0162"), (lift_cpi_caller_restriction::id(), "Lift the restriction in CPI that the caller must have the callee as an instruction account #2202"), (disable_account_loader_special_case::id(), "Disable account loader special case #3513"), (accounts_lt_hash::id(), "enables lattice-based accounts hash #3333"), (enable_secp256r1_precompile::id(), "Enable secp256r1 precompile SIMD-0075"), (migrate_stake_program_to_core_bpf::id(), "Migrate Stake program to Core BPF SIMD-0196 #3655"), + (deplete_cu_meter_on_vm_failure::id(), "Deplete compute meter for vm errors SIMD-0182 #3993"), (reserve_minimal_cus_for_builtin_instructions::id(), "Reserve minimal CUs for builtin instructions SIMD-170 #2562"), + (raise_block_limits_to_50m::id(), "Raise block limit to 50M SIMD-0207"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/sdk/packet/src/lib.rs b/sdk/packet/src/lib.rs index fe25e4ffa108f4..871f9050c41336 100644 --- a/sdk/packet/src/lib.rs +++ b/sdk/packet/src/lib.rs @@ -52,9 +52,10 @@ bitflags! { const FORWARDED = 0b0000_0010; const REPAIR = 0b0000_0100; const SIMPLE_VOTE_TX = 0b0000_1000; - const TRACER_PACKET = 0b0001_0000; // Previously used - this can now be re-used for something else. - const UNUSED = 0b0010_0000; + const UNUSED_0 = 0b0001_0000; + // Previously used - this can now be re-used for something else. + const UNUSED_1 = 0b0010_0000; /// For tracking performance const PERF_TRACK_PACKET = 0b0100_0000; /// For marking packets from staked nodes @@ -264,11 +265,6 @@ impl Meta { self.flags.set(PacketFlags::DISCARD, discard); } - #[inline] - pub fn set_tracer(&mut self, is_tracer: bool) { - self.flags.set(PacketFlags::TRACER_PACKET, is_tracer); - } - #[inline] pub fn set_track_performance(&mut self, is_performance_track: bool) { self.flags @@ -295,11 +291,6 @@ impl Meta { self.flags.contains(PacketFlags::SIMPLE_VOTE_TX) } - #[inline] - pub fn is_tracer_packet(&self) -> bool { - self.flags.contains(PacketFlags::TRACER_PACKET) - } - #[inline] pub fn is_perf_track_packet(&self) -> bool { self.flags.contains(PacketFlags::PERF_TRACK_PACKET) diff --git a/sdk/shred-version/Cargo.toml b/sdk/shred-version/Cargo.toml new file mode 100644 index 00000000000000..17b30e4389f5c4 --- /dev/null +++ b/sdk/shred-version/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "solana-shred-version" +description = "Calculation of shred versions." +documentation = "https://docs.rs/solana-shred-version" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +byteorder = { workspace = true } +solana-hard-forks = { workspace = true } +solana-hash = { workspace = true } +solana-sha256-hasher = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/sdk/src/shred_version.rs b/sdk/shred-version/src/lib.rs similarity index 94% rename from sdk/src/shred_version.rs rename to sdk/shred-version/src/lib.rs index 52560a1709c102..123e96465b8eff 100644 --- a/sdk/src/shred_version.rs +++ b/sdk/shred-version/src/lib.rs @@ -2,12 +2,7 @@ //! //! [shred]: https://solana.com/docs/terminology#shred -#![cfg(feature = "full")] - -use solana_sdk::{ - hard_forks::HardForks, - hash::{extend_and_hash, Hash}, -}; +use {solana_hard_forks::HardForks, solana_hash::Hash, solana_sha256_hasher::extend_and_hash}; pub fn version_from_hash(hash: &Hash) -> u16 { let hash = hash.as_ref(); diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 6d1371e2b53712..f9d1e89ce4f305 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -65,7 +65,6 @@ pub use solana_signer::signers; pub mod entrypoint; pub mod entrypoint_deprecated; pub mod example_mocks; -pub mod exit; pub mod feature; pub mod genesis_config; #[cfg(feature = "full")] @@ -89,7 +88,9 @@ pub mod reward_type { pub use solana_reward_info::RewardType; } pub mod rpc_port; -pub mod shred_version; +#[cfg(feature = "full")] +#[deprecated(since = "2.2.0", note = "Use `solana-shred-version` crate instead")] +pub use solana_shred_version as shred_version; pub mod signature; pub mod signer; pub mod transaction; @@ -231,6 +232,8 @@ pub use solana_transaction::simple_vote_transaction_checker; note = "Use `solana-transaction-context` crate instead" )] pub use solana_transaction_context as transaction_context; +#[deprecated(since = "2.2.0", note = "Use `solana-validator-exit` crate instead")] +pub use solana_validator_exit as exit; /// Convenience macro for `AddAssign` with saturating arithmetic. /// Replace by `std::num::Saturating` once stable diff --git a/sdk/validator-exit/Cargo.toml b/sdk/validator-exit/Cargo.toml new file mode 100644 index 00000000000000..f1b89a0794e6e5 --- /dev/null +++ b/sdk/validator-exit/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "solana-validator-exit" +description = "Solana validator exit handling." +documentation = "https://docs.rs/solana-validator-exit" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/sdk/src/exit.rs b/sdk/validator-exit/src/lib.rs similarity index 100% rename from sdk/src/exit.rs rename to sdk/validator-exit/src/lib.rs diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index 002a00bb12674a..d873dd77925ed4 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -1525,8 +1525,8 @@ pub mod test { nonblocking::{ quic::compute_max_allowed_uni_streams, testing_utilities::{ - get_client_config, make_client_endpoint, setup_quic_server, - SpawnTestServerResult, TestServerConfig, + check_multiple_streams, get_client_config, make_client_endpoint, + setup_quic_server, SpawnTestServerResult, TestServerConfig, }, }, quic::DEFAULT_TPU_COALESCE, @@ -1589,48 +1589,6 @@ pub mod test { } } - pub async fn check_multiple_streams( - receiver: Receiver, - server_address: SocketAddr, - ) { - let conn1 = Arc::new(make_client_endpoint(&server_address, None).await); - let conn2 = Arc::new(make_client_endpoint(&server_address, None).await); - let mut num_expected_packets = 0; - for i in 0..10 { - info!("sending: {}", i); - let c1 = conn1.clone(); - let c2 = conn2.clone(); - let mut s1 = c1.open_uni().await.unwrap(); - let mut s2 = c2.open_uni().await.unwrap(); - s1.write_all(&[0u8]).await.unwrap(); - s1.finish().unwrap(); - s2.write_all(&[0u8]).await.unwrap(); - s2.finish().unwrap(); - num_expected_packets += 2; - sleep(Duration::from_millis(200)).await; - } - let mut all_packets = vec![]; - let now = Instant::now(); - let mut total_packets = 0; - while now.elapsed().as_secs() < 10 { - if let Ok(packets) = receiver.try_recv() { - total_packets += packets.len(); - all_packets.push(packets) - } else { - sleep(Duration::from_secs(1)).await; - } - if total_packets == num_expected_packets { - break; - } - } - for batch in all_packets { - for p in batch.iter() { - assert_eq!(p.meta().size, 1); - } - } - assert_eq!(total_packets, num_expected_packets); - } - pub async fn check_multiple_writes( receiver: Receiver, server_address: SocketAddr, @@ -2049,7 +2007,7 @@ pub mod test { ) .unwrap(); - check_multiple_streams(receiver, server_address).await; + check_multiple_streams(receiver, server_address, None).await; assert_eq!(stats.total_streams.load(Ordering::Relaxed), 0); assert_eq!(stats.total_new_streams.load(Ordering::Relaxed), 20); assert_eq!(stats.total_connections.load(Ordering::Relaxed), 2); diff --git a/streamer/src/nonblocking/testing_utilities.rs b/streamer/src/nonblocking/testing_utilities.rs index 80c758ca8957af..78adfd3171b52c 100644 --- a/streamer/src/nonblocking/testing_utilities.rs +++ b/streamer/src/nonblocking/testing_utilities.rs @@ -12,7 +12,7 @@ use { }, streamer::StakedNodes, }, - crossbeam_channel::unbounded, + crossbeam_channel::{unbounded, Receiver}, quinn::{ crypto::rustls::QuicClientConfig, ClientConfig, Connection, EndpointConfig, IdleTimeout, TokioRuntime, TransportConfig, @@ -21,20 +21,19 @@ use { solana_net_utils::bind_to_localhost, solana_perf::packet::PacketBatch, solana_quic_definitions::{QUIC_KEEP_ALIVE, QUIC_MAX_TIMEOUT}, - solana_tls_utils::{new_dummy_x509_certificate, SkipServerVerification}, + solana_tls_utils::{new_dummy_x509_certificate, tls_client_config_builder}, std::{ net::{SocketAddr, UdpSocket}, sync::{atomic::AtomicBool, Arc, RwLock}, + time::{Duration, Instant}, }, - tokio::task::JoinHandle, + tokio::{task::JoinHandle, time::sleep}, }; pub fn get_client_config(keypair: &Keypair) -> ClientConfig { let (cert, key) = new_dummy_x509_certificate(keypair); - let mut crypto = rustls::ClientConfig::builder() - .dangerous() - .with_custom_certificate_verifier(SkipServerVerification::new()) + let mut crypto = tls_client_config_builder() .with_client_auth_cert(vec![cert], key) .expect("Failed to use client certificate"); @@ -81,33 +80,35 @@ pub struct SpawnTestServerResult { pub stats: Arc, } +pub fn create_quic_server_sockets() -> Vec { + #[cfg(not(target_os = "windows"))] + { + use { + solana_net_utils::bind_to, + std::net::{IpAddr, Ipv4Addr}, + }; + (0..10) + .map(|_| { + bind_to( + IpAddr::V4(Ipv4Addr::LOCALHOST), + /*port*/ 0, + /*reuseport:*/ true, + ) + .unwrap() + }) + .collect::>() + } + #[cfg(target_os = "windows")] + { + vec![bind_to_localhost().unwrap()] + } +} + pub fn setup_quic_server( option_staked_nodes: Option, config: TestServerConfig, ) -> SpawnTestServerResult { - let sockets = { - #[cfg(not(target_os = "windows"))] - { - use { - solana_net_utils::bind_to, - std::net::{IpAddr, Ipv4Addr}, - }; - (0..10) - .map(|_| { - bind_to( - IpAddr::V4(Ipv4Addr::LOCALHOST), - /*port*/ 0, - /*reuseport:*/ true, - ) - .unwrap() - }) - .collect::>() - } - #[cfg(target_os = "windows")] - { - vec![bind_to_localhost().unwrap()] - } - }; + let sockets = create_quic_server_sockets(); setup_quic_server_with_sockets(sockets, option_staked_nodes, config) } @@ -182,3 +183,46 @@ pub async fn make_client_endpoint( .await .expect("Test server should be already listening on 'localhost'") } + +pub async fn check_multiple_streams( + receiver: Receiver, + server_address: SocketAddr, + client_keypair: Option<&Keypair>, +) { + let conn1 = Arc::new(make_client_endpoint(&server_address, client_keypair).await); + let conn2 = Arc::new(make_client_endpoint(&server_address, client_keypair).await); + let mut num_expected_packets = 0; + for i in 0..10 { + info!("sending: {}", i); + let c1 = conn1.clone(); + let c2 = conn2.clone(); + let mut s1 = c1.open_uni().await.unwrap(); + let mut s2 = c2.open_uni().await.unwrap(); + s1.write_all(&[0u8]).await.unwrap(); + s1.finish().unwrap(); + s2.write_all(&[0u8]).await.unwrap(); + s2.finish().unwrap(); + num_expected_packets += 2; + sleep(Duration::from_millis(200)).await; + } + let mut all_packets = vec![]; + let now = Instant::now(); + let mut total_packets = 0; + while now.elapsed().as_secs() < 10 { + if let Ok(packets) = receiver.try_recv() { + total_packets += packets.len(); + all_packets.push(packets) + } else { + sleep(Duration::from_secs(1)).await; + } + if total_packets == num_expected_packets { + break; + } + } + for batch in all_packets { + for p in batch.iter() { + assert_eq!(p.meta().size, 1); + } + } + assert_eq!(total_packets, num_expected_packets); +} diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index 1914861047dc43..4a290d17c43714 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -19,7 +19,7 @@ use { solana_quic_definitions::{ NotifyKeyUpdate, QUIC_MAX_TIMEOUT, QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS, }, - solana_tls_utils::{new_dummy_x509_certificate, SkipClientVerification}, + solana_tls_utils::{new_dummy_x509_certificate, tls_server_config_builder}, std::{ net::UdpSocket, sync::{ @@ -58,9 +58,8 @@ pub(crate) fn configure_server( }]; let cert_chain_pem = pem::encode_many(&cert_chain_pem_parts); - let mut server_tls_config = rustls::ServerConfig::builder() - .with_client_cert_verifier(SkipClientVerification::new()) - .with_single_cert(vec![cert], priv_key)?; + let mut server_tls_config = + tls_server_config_builder().with_single_cert(vec![cert], priv_key)?; server_tls_config.alpn_protocols = vec![ALPN_TPU_PROTOCOL_ID.to_vec()]; server_tls_config.key_log = Arc::new(KeyLogFile::new()); let quic_server_config = QuicServerConfig::try_from(server_tls_config)?; @@ -565,6 +564,7 @@ pub fn spawn_server( ) } +#[derive(Clone)] pub struct QuicServerParams { pub max_connections_per_peer: usize, pub max_staked_connections: usize, @@ -633,8 +633,11 @@ pub fn spawn_server_multi( #[cfg(test)] mod test { use { - super::*, crate::nonblocking::quic::test::*, crossbeam_channel::unbounded, - solana_net_utils::bind_to_localhost, std::net::SocketAddr, + super::*, + crate::nonblocking::{quic::test::*, testing_utilities::check_multiple_streams}, + crossbeam_channel::unbounded, + solana_net_utils::bind_to_localhost, + std::net::SocketAddr, }; fn setup_quic_server() -> ( @@ -724,7 +727,7 @@ mod test { .unwrap(); let runtime = rt("solQuicTestRt".to_string()); - runtime.block_on(check_multiple_streams(receiver, server_address)); + runtime.block_on(check_multiple_streams(receiver, server_address, None)); exit.store(true, Ordering::Relaxed); t.join().unwrap(); } diff --git a/streamer/src/socket.rs b/streamer/src/socket.rs index fe86f84319d4d4..d9bd0966e3ae3a 100644 --- a/streamer/src/socket.rs +++ b/streamer/src/socket.rs @@ -16,6 +16,7 @@ impl SocketAddrSpace { } /// Returns true if the IP address is valid. + #[inline] #[must_use] pub fn check(&self, addr: &SocketAddr) -> bool { if matches!(self, SocketAddrSpace::Unspecified) { diff --git a/svm/Cargo.toml b/svm/Cargo.toml index e3ed11e14df0f0..7d6fb3c6071938 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -17,26 +17,42 @@ percentage = { workspace = true } qualifier_attr = { workspace = true, optional = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } +solana-account = { workspace = true } solana-bpf-loader-program = { workspace = true } +solana-clock = { workspace = true } solana-compute-budget = { workspace = true } solana-compute-budget-instruction = { workspace = true } solana-feature-set = { workspace = true } solana-fee = { workspace = true } +solana-fee-structure = { workspace = true } solana-frozen-abi = { workspace = true, optional = true, features = [ "frozen-abi", ] } solana-frozen-abi-macro = { workspace = true, optional = true, features = [ "frozen-abi", ] } +solana-hash = { workspace = true } +solana-instruction = { workspace = true, features = ["std"] } +solana-instructions-sysvar = { workspace = true } solana-loader-v4-program = { workspace = true } solana-log-collector = { workspace = true } solana-measure = { workspace = true } +solana-message = { workspace = true } +solana-nonce = { workspace = true } +solana-precompiles = { workspace = true } +solana-program = { workspace = true, default-features = false } solana-program-runtime = { workspace = true } +solana-pubkey = { workspace = true } +solana-rent = { workspace = true } +solana-rent-debits = { workspace = true } solana-sdk = { workspace = true } +solana-sdk-ids = { workspace = true } solana-svm-rent-collector = { workspace = true } solana-svm-transaction = { workspace = true } solana-system-program = { workspace = true } solana-timings = { workspace = true } +solana-transaction-context = { workspace = true } +solana-transaction-error = { workspace = true } solana-type-overrides = { workspace = true } thiserror = { workspace = true } @@ -54,15 +70,30 @@ openssl = { workspace = true } prost = { workspace = true } rand0-7 = { workspace = true } shuttle = { workspace = true } +solana-clock = { workspace = true } +solana-compute-budget-interface = { workspace = true } solana-compute-budget-program = { workspace = true } solana-ed25519-program = { workspace = true } +solana-epoch-schedule = { workspace = true } +solana-fee-calculator = { workspace = true } +solana-keypair = { workspace = true } solana-logger = { workspace = true } +solana-native-token = { workspace = true } +solana-pubkey = { workspace = true } +solana-rent = { workspace = true } +solana-reserved-account-keys = { workspace = true } +solana-sbpf = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } +solana-secp256k1-program = { workspace = true } solana-secp256r1-program = { workspace = true, features = ["openssl-vendored"] } +solana-signature = { workspace = true } +solana-signer = { workspace = true } # See order-crates-for-publishing.py for using this unusual `path = "."` solana-svm = { path = ".", features = ["dev-context-only-utils"] } solana-svm-conformance = { workspace = true } -solana_rbpf = { workspace = true } +solana-system-transaction = { workspace = true } +solana-sysvar = { workspace = true } +solana-transaction = { workspace = true } test-case = { workspace = true } [package.metadata.docs.rs] diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index c2bb2ac2bb5501..40c6b19631cffc 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -70,7 +70,7 @@ dependencies = [ "log", "solana-sdk", "solana-transaction-status", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -162,9 +162,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "aquamarine" @@ -770,18 +770,18 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.20.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" +checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" dependencies = [ "proc-macro2", "quote", @@ -980,15 +980,15 @@ dependencies = [ [[package]] name = "console" -version = "0.15.8" +version = "0.15.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" +checksum = "ea3c6ecd8059b57859df5c69830340ed3c41d30e3da0c1cbed90a96ac853041b" dependencies = [ "encode_unicode", - "lazy_static", "libc", - "unicode-width 0.1.14", - "windows-sys 0.52.0", + "once_cell", + "unicode-width 0.2.0", + "windows-sys 0.59.0", ] [[package]] @@ -1071,9 +1071,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] @@ -1473,9 +1473,9 @@ checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "encode_unicode" -version = "0.3.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" [[package]] name = "encoding_rs" @@ -2132,9 +2132,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.31" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ "bytes", "futures-channel", @@ -2759,9 +2759,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.168" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libloading" @@ -3869,9 +3869,9 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.19", + "rustls 0.23.20", "socket2", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -3887,11 +3887,11 @@ dependencies = [ "rand 0.8.5", "ring", "rustc-hash 2.0.0", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-pki-types", "rustls-platform-verifier", "slab", - "thiserror 2.0.6", + "thiserror 2.0.9", "tinyvec", "tracing", "web-time", @@ -4282,9 +4282,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.19" +version = "0.23.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" +checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" dependencies = [ "once_cell", "ring", @@ -4345,7 +4345,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -4418,12 +4418,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "scroll" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" - [[package]] name = "sct" version = "0.7.1" @@ -4460,9 +4454,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" [[package]] name = "seqlock" @@ -4475,9 +4469,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" dependencies = [ "serde_derive", ] @@ -4502,9 +4496,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" dependencies = [ "proc-macro2", "quote", @@ -4779,7 +4773,7 @@ dependencies = [ "spl-token-2022", "spl-token-group-interface", "spl-token-metadata-interface", - "thiserror 2.0.6", + "thiserror 2.0.9", "zstd", ] @@ -4849,7 +4843,7 @@ dependencies = [ "static_assertions", "tar", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -4866,7 +4860,7 @@ dependencies = [ "solana-program", "solana-program-runtime", "solana-sdk", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -4886,7 +4880,7 @@ dependencies = [ "solana-program", "solana-sdk", "tarpc", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-serde", ] @@ -4954,7 +4948,7 @@ dependencies = [ "ark-serialize", "bytemuck", "solana-define-syscall", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -4983,11 +4977,11 @@ dependencies = [ "solana-poseidon", "solana-program-memory", "solana-program-runtime", + "solana-sbpf", "solana-sdk", "solana-timings", "solana-type-overrides", - "solana_rbpf", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -5069,7 +5063,7 @@ dependencies = [ "solana-seed-phrase", "solana-signature", "solana-signer", - "thiserror 2.0.6", + "thiserror 2.0.9", "tiny-bip39", "uriparse", "url 2.5.4", @@ -5154,7 +5148,7 @@ dependencies = [ "solana-transaction", "solana-transaction-error", "solana-udp-client", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -5223,7 +5217,7 @@ dependencies = [ "solana-pubkey", "solana-sdk", "solana-svm-transaction", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -5276,7 +5270,7 @@ dependencies = [ "solana-metrics", "solana-time-utils", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -5310,7 +5304,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustls 0.23.19", + "rustls 0.23.20", "serde", "serde_bytes", "serde_derive", @@ -5363,7 +5357,7 @@ dependencies = [ "sys-info", "sysctl", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "trees", ] @@ -5406,7 +5400,7 @@ dependencies = [ "bytemuck_derive", "curve25519-dalek 4.1.3", "solana-define-syscall", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -5535,7 +5529,7 @@ dependencies = [ "solana-transaction", "solana-version", "spl-memo", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -5615,7 +5609,7 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-transaction-status", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -5638,8 +5632,10 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "serde", + "serde-big-array", "serde_bytes", "serde_derive", + "siphasher", "solana-bloom", "solana-clap-utils", "solana-client", @@ -5665,7 +5661,7 @@ dependencies = [ "solana-vote", "solana-vote-program", "static_assertions", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -5854,7 +5850,7 @@ dependencies = [ "strum_macros", "tar", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-stream", "trees", @@ -5870,9 +5866,9 @@ dependencies = [ "solana-log-collector", "solana-measure", "solana-program-runtime", + "solana-sbpf", "solana-sdk", "solana-type-overrides", - "solana_rbpf", ] [[package]] @@ -5938,7 +5934,7 @@ dependencies = [ "solana-cluster-type", "solana-sha256-hasher", "solana-time-utils", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6066,7 +6062,7 @@ dependencies = [ "solana-metrics", "solana-runtime", "solana-sdk", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6084,7 +6080,7 @@ dependencies = [ "ark-bn254", "light-poseidon", "solana-define-syscall", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6184,7 +6180,7 @@ dependencies = [ "solana-system-interface", "solana-sysvar", "solana-sysvar-id", - "thiserror 2.0.6", + "thiserror 2.0.9", "wasm-bindgen", ] @@ -6239,7 +6235,6 @@ dependencies = [ "bincode", "enum-iterator", "itertools 0.12.1", - "libc", "log", "num-derive", "num-traits", @@ -6261,6 +6256,7 @@ dependencies = [ "solana-precompiles", "solana-pubkey", "solana-rent", + "solana-sbpf", "solana-sdk-ids", "solana-slot-hashes", "solana-stable-layout", @@ -6269,8 +6265,7 @@ dependencies = [ "solana-timings", "solana-transaction-context", "solana-type-overrides", - "solana_rbpf", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6298,12 +6293,12 @@ dependencies = [ "solana-logger", "solana-program-runtime", "solana-runtime", + "solana-sbpf", "solana-sdk", "solana-svm", "solana-timings", "solana-vote-program", - "solana_rbpf", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -6349,7 +6344,7 @@ dependencies = [ "solana-pubkey", "solana-rpc-client-api", "solana-signature", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-stream", "tokio-tungstenite", @@ -6369,7 +6364,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls 0.23.19", + "rustls 0.23.20", "solana-connection-cache", "solana-keypair", "solana-measure", @@ -6382,7 +6377,7 @@ dependencies = [ "solana-streamer", "solana-tls-utils", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -6419,7 +6414,7 @@ dependencies = [ "solana-pubkey", "solana-signature", "solana-signer", - "thiserror 2.0.6", + "thiserror 2.0.9", "uriparse", ] @@ -6514,7 +6509,7 @@ dependencies = [ "spl-token", "spl-token-2022", "stream-cancel", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-util 0.7.12", ] @@ -6581,7 +6576,7 @@ dependencies = [ "solana-transaction-error", "solana-transaction-status-client-types", "solana-version", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6596,7 +6591,7 @@ dependencies = [ "solana-pubkey", "solana-rpc-client", "solana-sdk-ids", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6676,7 +6671,7 @@ dependencies = [ "symlink", "tar", "tempfile", - "thiserror 2.0.6", + "thiserror 2.0.9", "zstd", ] @@ -6692,13 +6687,30 @@ dependencies = [ "solana-sdk", "solana-sdk-ids", "solana-svm-transaction", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] name = "solana-sanitize" version = "2.2.0" +[[package]] +name = "solana-sbpf" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92b4c060a707fdb0754a876cbbf49591b60a573b5521b485125d2a4d6ff68ce3" +dependencies = [ + "byteorder", + "combine 3.8.1", + "hash32", + "libc", + "log", + "rand 0.8.5", + "rustc-demangle", + "thiserror 1.0.69", + "winapi 0.3.9", +] + [[package]] name = "solana-sdk" version = "2.2.0" @@ -6774,6 +6786,7 @@ dependencies = [ "solana-serde", "solana-serde-varint", "solana-short-vec", + "solana-shred-version", "solana-signature", "solana-signer", "solana-system-transaction", @@ -6781,7 +6794,8 @@ dependencies = [ "solana-transaction", "solana-transaction-context", "solana-transaction-error", - "thiserror 2.0.6", + "solana-validator-exit", + "thiserror 2.0.9", "wasm-bindgen", ] @@ -6825,7 +6839,7 @@ dependencies = [ "borsh 1.5.3", "libsecp256k1", "solana-define-syscall", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -6921,6 +6935,16 @@ dependencies = [ "serde", ] +[[package]] +name = "solana-shred-version" +version = "2.2.0" +dependencies = [ + "byteorder", + "solana-hard-forks", + "solana-hash", + "solana-sha256-hasher", +] + [[package]] name = "solana-signature" version = "2.2.0" @@ -7014,7 +7038,7 @@ dependencies = [ "solana-sdk", "solana-storage-proto", "solana-transaction-status", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tonic", "zstd", @@ -7057,7 +7081,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.19", + "rustls 0.23.20", "smallvec", "socket2", "solana-keypair", @@ -7074,7 +7098,7 @@ dependencies = [ "solana-tls-utils", "solana-transaction-error", "solana-transaction-metrics-tracker", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-util 0.7.12", "x509-parser", @@ -7090,22 +7114,38 @@ dependencies = [ "percentage", "serde", "serde_derive", + "solana-account", "solana-bpf-loader-program", + "solana-clock", "solana-compute-budget", "solana-compute-budget-instruction", "solana-feature-set", "solana-fee", + "solana-fee-structure", + "solana-hash", + "solana-instruction", + "solana-instructions-sysvar", "solana-loader-v4-program", "solana-log-collector", "solana-measure", + "solana-message", + "solana-nonce", + "solana-precompiles", + "solana-program", "solana-program-runtime", + "solana-pubkey", + "solana-rent", + "solana-rent-debits", "solana-sdk", + "solana-sdk-ids", "solana-svm-rent-collector", "solana-svm-transaction", "solana-system-program", "solana-timings", + "solana-transaction-context", + "solana-transaction-error", "solana-type-overrides", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -7314,7 +7354,7 @@ dependencies = [ name = "solana-tls-utils" version = "2.2.0" dependencies = [ - "rustls 0.23.19", + "rustls 0.23.20", "solana-keypair", "solana-pubkey", "solana-signer", @@ -7349,7 +7389,7 @@ dependencies = [ "solana-signer", "solana-transaction", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -7361,7 +7401,7 @@ dependencies = [ "log", "lru", "quinn", - "rustls 0.23.19", + "rustls 0.23.20", "solana-clock", "solana-connection-cache", "solana-keypair", @@ -7373,7 +7413,7 @@ dependencies = [ "solana-time-utils", "solana-tls-utils", "solana-tpu-client", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tokio-util 0.7.12", ] @@ -7466,7 +7506,7 @@ dependencies = [ "spl-token-2022", "spl-token-group-interface", "spl-token-metadata-interface", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -7487,7 +7527,7 @@ dependencies = [ "solana-transaction", "solana-transaction-context", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -7506,7 +7546,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls 0.23.19", + "rustls 0.23.20", "solana-entry", "solana-feature-set", "solana-geyser-plugin-manager", @@ -7526,7 +7566,7 @@ dependencies = [ "solana-streamer", "solana-tls-utils", "static_assertions", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -7548,7 +7588,7 @@ dependencies = [ "solana-net-utils", "solana-streamer", "solana-transaction-error", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", ] @@ -7583,6 +7623,10 @@ dependencies = [ "vec_extract_if_polyfill", ] +[[package]] +name = "solana-validator-exit" +version = "2.2.0" + [[package]] name = "solana-version" version = "2.2.0" @@ -7605,7 +7649,7 @@ dependencies = [ "serde_derive", "solana-sdk", "solana-svm-transaction", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -7623,7 +7667,7 @@ dependencies = [ "solana-program", "solana-program-runtime", "solana-sdk", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -7654,9 +7698,10 @@ dependencies = [ "bytemuck", "num-derive", "num-traits", + "solana-instruction", "solana-log-collector", "solana-program-runtime", - "solana-sdk", + "solana-sdk-ids", "solana-zk-sdk", ] @@ -7690,7 +7735,7 @@ dependencies = [ "solana-signature", "solana-signer", "subtle", - "thiserror 2.0.6", + "thiserror 2.0.9", "wasm-bindgen", "zeroize", ] @@ -7703,9 +7748,10 @@ dependencies = [ "num-derive", "num-traits", "solana-feature-set", + "solana-instruction", "solana-log-collector", "solana-program-runtime", - "solana-sdk", + "solana-sdk-ids", "solana-zk-token-sdk", ] @@ -7740,28 +7786,10 @@ dependencies = [ "solana-signature", "solana-signer", "subtle", - "thiserror 2.0.6", + "thiserror 2.0.9", "zeroize", ] -[[package]] -name = "solana_rbpf" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c1941b5ef0c3ce8f2ac5dd984d0fb1a97423c4ff2a02eec81e3913f02e2ac2b" -dependencies = [ - "byteorder", - "combine 3.8.1", - "hash32", - "libc", - "log", - "rand 0.8.5", - "rustc-demangle", - "scroll", - "thiserror 1.0.69", - "winapi 0.3.9", -] - [[package]] name = "spin" version = "0.9.8" @@ -8363,11 +8391,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.6" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" dependencies = [ - "thiserror-impl 2.0.6", + "thiserror-impl 2.0.9", ] [[package]] @@ -8383,9 +8411,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.6" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" +checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", diff --git a/svm/examples/json-rpc/server/src/rpc_process.rs b/svm/examples/json-rpc/server/src/rpc_process.rs index 280496f0903225..c73b1b1f44e265 100644 --- a/svm/examples/json-rpc/server/src/rpc_process.rs +++ b/svm/examples/json-rpc/server/src/rpc_process.rs @@ -582,7 +582,7 @@ impl JsonRpcRequestProcessor { processed_counts.processed_with_successful_result_count += 1; } Err(err) => { - if *err_count == 0 { + if err_count.0 == 0 { debug!("tx error: {:?} {:?}", err, tx); } *err_count += 1; diff --git a/svm/examples/json-rpc/server/src/svm_bridge.rs b/svm/examples/json-rpc/server/src/svm_bridge.rs index c136af791c08f0..d0d6371df1d882 100644 --- a/svm/examples/json-rpc/server/src/svm_bridge.rs +++ b/svm/examples/json-rpc/server/src/svm_bridge.rs @@ -12,8 +12,8 @@ use { BlockRelation, ForkGraph, LoadProgramMetrics, ProgramCacheEntry, ProgramRuntimeEnvironments, }, - solana_rbpf::{ - program::{BuiltinFunction, BuiltinProgram, FunctionRegistry}, + solana_sbpf::{ + program::{BuiltinFunction, BuiltinProgram, FunctionRegistry, SBPFVersion}, vm::Config, }, }, @@ -154,10 +154,7 @@ pub fn create_custom_environment<'a>() -> BuiltinProgram> { reject_broken_elfs: true, noop_instruction_rate: 256, sanitize_user_provided_values: true, - external_internal_function_hash_collision: false, - reject_callx_r10: false, - enable_sbpf_v1: true, - enable_sbpf_v2: false, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V3, optimize_rodata: false, aligned_memory_mapping: true, }; diff --git a/svm/examples/paytube/src/log.rs b/svm/examples/paytube/src/log.rs index 92c75573080741..040ef237b55284 100644 --- a/svm/examples/paytube/src/log.rs +++ b/svm/examples/paytube/src/log.rs @@ -19,7 +19,7 @@ fn log_magenta(msg: &str) { pub(crate) fn setup_solana_logging() { #[rustfmt::skip] solana_logger::setup_with_default( - "solana_rbpf::vm=debug,\ + "solana_sbpf::vm=debug,\ solana_runtime::message_processor=debug,\ solana_runtime::system_instruction_processor=trace", ); diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index bff6b406e9fa20..36db2aaa75a6d3 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -8,31 +8,34 @@ use { transaction_processing_callback::{AccountState, TransactionProcessingCallback}, }, ahash::{AHashMap, AHashSet}, + solana_account::{ + Account, AccountSharedData, ReadableAccount, WritableAccount, PROGRAM_OWNERS, + }, solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, solana_feature_set::{self as feature_set, FeatureSet}, + solana_fee_structure::FeeDetails, + solana_instruction::{BorrowedAccountMeta, BorrowedInstruction}, + solana_instructions_sysvar::construct_instructions_data, + solana_nonce::state::State as NonceState, solana_program_runtime::loaded_programs::ProgramCacheForTxBatch, - solana_sdk::{ - account::{Account, AccountSharedData, ReadableAccount, WritableAccount, PROGRAM_OWNERS}, - fee::FeeDetails, + solana_pubkey::Pubkey, + solana_rent::RentDue, + solana_rent_debits::RentDebits, + solana_sdk::rent_collector::{CollectedInfo, RENT_EXEMPT_RENT_EPOCH}, + solana_sdk_ids::{ native_loader, - nonce::State as NonceState, - pubkey::Pubkey, - rent::RentDue, - rent_collector::{CollectedInfo, RENT_EXEMPT_RENT_EPOCH}, - rent_debits::RentDebits, - saturating_add_assign, - sysvar::{ - self, - instructions::{construct_instructions_data, BorrowedAccountMeta, BorrowedInstruction}, - slot_history, - }, - transaction::{Result, TransactionError}, - transaction_context::{IndexOfAccount, TransactionAccount}, + sysvar::{self, slot_history}, }, solana_svm_rent_collector::svm_rent_collector::SVMRentCollector, solana_svm_transaction::svm_message::SVMMessage, solana_system_program::{get_system_account_kind, SystemAccountKind}, - std::{collections::HashMap, num::NonZeroU32, sync::Arc}, + solana_transaction_context::{IndexOfAccount, TransactionAccount}, + solana_transaction_error::{TransactionError, TransactionResult as Result}, + std::{ + collections::HashMap, + num::{NonZeroU32, Saturating}, + sync::Arc, + }, }; // for the load instructions @@ -431,7 +434,7 @@ fn load_transaction_accounts( let mut accounts = Vec::with_capacity(account_keys.len()); let mut validated_loaders = AHashSet::with_capacity(PROGRAM_OWNERS.len()); let mut rent_debits = RentDebits::default(); - let mut accumulated_accounts_data_size: u32 = 0; + let mut accumulated_accounts_data_size: Saturating = Saturating(0); let mut collect_loaded_account = |key, loaded_account| -> Result<()> { let LoadedTransactionAccount { @@ -564,7 +567,7 @@ fn load_transaction_accounts( program_indices, rent: tx_rent, rent_debits, - loaded_accounts_data_size: accumulated_accounts_data_size, + loaded_accounts_data_size: accumulated_accounts_data_size.0, }) } @@ -577,7 +580,7 @@ fn load_transaction_account( ) -> LoadedTransactionAccount { let usage_pattern = AccountUsagePattern::new(message, account_index); - let loaded_account = if solana_sdk::sysvar::instructions::check_id(account_key) { + let loaded_account = if solana_sdk_ids::sysvar::instructions::check_id(account_key) { // Since the instructions sysvar is constructed by the SVM and modified // for each transaction instruction, it cannot be loaded. LoadedTransactionAccount { @@ -637,7 +640,7 @@ fn account_shared_data_from_program( /// `accumulated_accounts_data_size` exceeds /// `requested_loaded_accounts_data_size_limit`. fn accumulate_and_check_loaded_account_data_size( - accumulated_loaded_accounts_data_size: &mut u32, + accumulated_loaded_accounts_data_size: &mut Saturating, account_data_size: usize, requested_loaded_accounts_data_size_limit: NonZeroU32, error_metrics: &mut TransactionErrorMetrics, @@ -646,8 +649,8 @@ fn accumulate_and_check_loaded_account_data_size( error_metrics.max_loaded_accounts_data_size_exceeded += 1; return Err(TransactionError::MaxLoadedAccountsDataSizeExceeded); }; - saturating_add_assign!(*accumulated_loaded_accounts_data_size, account_data_size); - if *accumulated_loaded_accounts_data_size > requested_loaded_accounts_data_size_limit.get() { + *accumulated_loaded_accounts_data_size += account_data_size; + if accumulated_loaded_accounts_data_size.0 > requested_loaded_accounts_data_size_limit.get() { error_metrics.max_loaded_accounts_data_size_exceeded += 1; Err(TransactionError::MaxLoadedAccountsDataSizeExceeded) } else { @@ -694,38 +697,40 @@ mod tests { transaction_account_state_info::TransactionAccountStateInfo, transaction_processing_callback::TransactionProcessingCallback, }, - nonce::state::Versions as NonceVersions, + solana_account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, solana_compute_budget::{compute_budget::ComputeBudget, compute_budget_limits}, + solana_epoch_schedule::EpochSchedule, solana_feature_set::FeatureSet, + solana_hash::Hash, + solana_instruction::{AccountMeta, Instruction}, + solana_keypair::Keypair, + solana_message::{ + compiled_instruction::CompiledInstruction, + v0::{LoadedAddresses, LoadedMessage}, + LegacyMessage, Message, MessageHeader, SanitizedMessage, + }, + solana_native_token::{sol_to_lamports, LAMPORTS_PER_SOL}, + solana_nonce::{self as nonce, versions::Versions as NonceVersions}, + solana_program::bpf_loader_upgradeable::UpgradeableLoaderState, solana_program_runtime::loaded_programs::{ ProgramCacheEntry, ProgramCacheEntryOwner, ProgramCacheEntryType, ProgramCacheForTxBatch, }, - solana_rbpf::program::BuiltinProgram, - solana_sdk::{ - account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, - bpf_loader, - bpf_loader_upgradeable::{self, UpgradeableLoaderState}, - epoch_schedule::EpochSchedule, - hash::Hash, - instruction::{AccountMeta, CompiledInstruction, Instruction}, - message::{ - v0::{LoadedAddresses, LoadedMessage}, - LegacyMessage, Message, MessageHeader, SanitizedMessage, - }, - native_loader, - native_token::{sol_to_lamports, LAMPORTS_PER_SOL}, - nonce, - pubkey::Pubkey, - rent::Rent, - rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, - rent_debits::RentDebits, - reserved_account_keys::ReservedAccountKeys, - signature::{Keypair, Signature, Signer}, - system_program, system_transaction, sysvar, - transaction::{Result, SanitizedTransaction, Transaction, TransactionError}, - transaction_context::{TransactionAccount, TransactionContext}, + solana_pubkey::Pubkey, + solana_rent::Rent, + solana_rent_debits::RentDebits, + solana_reserved_account_keys::ReservedAccountKeys, + solana_sbpf::program::BuiltinProgram, + solana_sdk::rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, + solana_sdk_ids::{ + bpf_loader, bpf_loader_upgradeable, native_loader, system_program, sysvar, }, + solana_signature::Signature, + solana_signer::Signer, + solana_system_transaction::transfer, + solana_transaction::{sanitized::SanitizedTransaction, Transaction}, + solana_transaction_context::{TransactionAccount, TransactionContext}, + solana_transaction_error::{TransactionError, TransactionResult as Result}, std::{borrow::Cow, cell::RefCell, collections::HashMap, fs::File, io::Read, sync::Arc}, }; @@ -906,7 +911,7 @@ mod tests { let load_results = load_accounts_aux_test(tx, &accounts, &mut error_metrics); - assert_eq!(error_metrics.account_not_found, 1); + assert_eq!(error_metrics.account_not_found.0, 1); assert!(matches!( load_results, TransactionLoadResult::FeesOnly(FeesOnlyTransaction { @@ -945,7 +950,7 @@ mod tests { let loaded_accounts = load_accounts_with_excluded_features(tx, &accounts, &mut error_metrics, None); - assert_eq!(error_metrics.account_not_found, 0); + assert_eq!(error_metrics.account_not_found.0, 0); match &loaded_accounts { TransactionLoadResult::Loaded(loaded_transaction) => { assert_eq!(loaded_transaction.accounts.len(), 3); @@ -986,7 +991,7 @@ mod tests { let load_results = load_accounts_aux_test(tx, &accounts, &mut error_metrics); - assert_eq!(error_metrics.account_not_found, 1); + assert_eq!(error_metrics.account_not_found.0, 1); assert!(matches!( load_results, TransactionLoadResult::FeesOnly(FeesOnlyTransaction { @@ -1030,7 +1035,7 @@ mod tests { &mut feature_set, ); - assert_eq!(error_metrics.invalid_program_for_execution, 1); + assert_eq!(error_metrics.invalid_program_for_execution.0, 1); assert!(matches!( load_results, TransactionLoadResult::FeesOnly(FeesOnlyTransaction { @@ -1081,7 +1086,7 @@ mod tests { let loaded_accounts = load_accounts_with_excluded_features(tx, &accounts, &mut error_metrics, None); - assert_eq!(error_metrics.account_not_found, 0); + assert_eq!(error_metrics.account_not_found.0, 0); match &loaded_accounts { TransactionLoadResult::Loaded(loaded_transaction) => { assert_eq!(loaded_transaction.accounts.len(), 3); @@ -1131,12 +1136,12 @@ mod tests { #[test] fn test_instructions() { solana_logger::setup(); - let instructions_key = solana_sdk::sysvar::instructions::id(); + let instructions_key = solana_sdk_ids::sysvar::instructions::id(); let keypair = Keypair::new(); let instructions = vec![CompiledInstruction::new(1, &(), vec![0, 1])]; let tx = Transaction::new_with_compiled_instructions( &[&keypair], - &[solana_sdk::pubkey::new_rand(), instructions_key], + &[solana_pubkey::new_rand(), instructions_key], Hash::default(), vec![native_loader::id()], instructions, @@ -1188,7 +1193,7 @@ mod tests { #[test] fn test_accumulate_and_check_loaded_account_data_size() { let mut error_metrics = TransactionErrorMetrics::default(); - let mut accumulated_data_size: u32 = 0; + let mut accumulated_data_size: Saturating = Saturating(0); let data_size: usize = 123; let requested_data_size_limit = NonZeroU32::new(data_size as u32).unwrap(); @@ -1200,7 +1205,7 @@ mod tests { &mut error_metrics ) .is_ok()); - assert_eq!(data_size as u32, accumulated_data_size); + assert_eq!(data_size as u32, accumulated_data_size.0); // fail - loading more data that would exceed limit let another_byte: usize = 1; @@ -1361,7 +1366,7 @@ mod tests { #[test] fn test_construct_instructions_account() { let loaded_message = LoadedMessage { - message: Cow::Owned(solana_sdk::message::v0::Message::default()), + message: Cow::Owned(solana_message::v0::Message::default()), loaded_addresses: Cow::Owned(LoadedAddresses::default()), is_writable_account_cache: vec![false], }; @@ -2092,7 +2097,7 @@ mod tests { .insert(recipient, AccountSharedData::default()); let mut account_loader = (&bank).into(); - let tx = system_transaction::transfer( + let tx = transfer( &mint_keypair, &recipient, sol_to_lamports(1.), @@ -2468,7 +2473,7 @@ mod tests { let program1 = program1_keypair.pubkey(); let program2 = Pubkey::new_unique(); let programdata2 = Pubkey::new_unique(); - use solana_sdk::account_utils::StateMut; + use solana_account::state_traits::StateMut; let program2_size = std::mem::size_of::() as u32; let mut program2_account = AccountSharedData::default(); diff --git a/svm/src/account_overrides.rs b/svm/src/account_overrides.rs index c31dc5bac16eec..7381852965f06d 100644 --- a/svm/src/account_overrides.rs +++ b/svm/src/account_overrides.rs @@ -1,5 +1,5 @@ use { - solana_sdk::{account::AccountSharedData, pubkey::Pubkey, sysvar}, + solana_account::AccountSharedData, solana_pubkey::Pubkey, solana_sdk_ids::sysvar, std::collections::HashMap, }; @@ -36,8 +36,8 @@ impl AccountOverrides { #[cfg(test)] mod test { use { - crate::account_overrides::AccountOverrides, - solana_sdk::{account::AccountSharedData, pubkey::Pubkey, sysvar}, + crate::account_overrides::AccountOverrides, solana_account::AccountSharedData, + solana_pubkey::Pubkey, solana_sdk_ids::sysvar, }; #[test] diff --git a/svm/src/message_processor.rs b/svm/src/message_processor.rs index ad4d1723baf4e2..99c0fa5206d05e 100644 --- a/svm/src/message_processor.rs +++ b/svm/src/message_processor.rs @@ -1,15 +1,13 @@ use { + solana_account::WritableAccount, + solana_instructions_sysvar as instructions, solana_measure::measure_us, + solana_precompiles::get_precompile, solana_program_runtime::invoke_context::InvokeContext, - solana_sdk::{ - account::WritableAccount, - precompiles::get_precompile, - sysvar::instructions, - transaction::TransactionError, - transaction_context::{IndexOfAccount, InstructionAccount}, - }, solana_svm_transaction::svm_message::SVMMessage, solana_timings::{ExecuteDetailsTimings, ExecuteTimings}, + solana_transaction_context::{IndexOfAccount, InstructionAccount}, + solana_transaction_error::TransactionError, }; #[derive(Debug, Default, Clone, serde_derive::Deserialize, serde_derive::Serialize)] @@ -138,30 +136,27 @@ mod tests { nid::Nid, }, rand0_7::thread_rng, + solana_account::{AccountSharedData, ReadableAccount}, solana_compute_budget::compute_budget::ComputeBudget, solana_ed25519_program::new_ed25519_instruction, solana_feature_set::FeatureSet, + solana_hash::Hash, + solana_instruction::{error::InstructionError, AccountMeta, Instruction}, + solana_message::{AccountKeys, Message, SanitizedMessage}, solana_program_runtime::{ declare_process_instruction, invoke_context::EnvironmentConfig, loaded_programs::{ProgramCacheEntry, ProgramCacheForTxBatch}, sysvar_cache::SysvarCache, }, - solana_sdk::{ - account::{AccountSharedData, ReadableAccount}, - ed25519_program, - hash::Hash, - instruction::{AccountMeta, Instruction, InstructionError}, - message::{AccountKeys, Message, SanitizedMessage}, - native_loader::{self, create_loadable_account_for_test}, - pubkey::Pubkey, - rent::Rent, - reserved_account_keys::ReservedAccountKeys, - secp256k1_instruction::new_secp256k1_instruction, - secp256k1_program, system_program, - transaction_context::TransactionContext, - }, + solana_pubkey::Pubkey, + solana_rent::Rent, + solana_reserved_account_keys::ReservedAccountKeys, + solana_sdk::native_loader::create_loadable_account_for_test, + solana_sdk_ids::{ed25519_program, native_loader, secp256k1_program, system_program}, + solana_secp256k1_program::new_secp256k1_instruction, solana_secp256r1_program::new_secp256r1_instruction, + solana_transaction_context::TransactionContext, std::sync::Arc, }; @@ -448,11 +443,11 @@ mod tests { let mock_program_id = Pubkey::from([2u8; 32]); let accounts = vec![ ( - solana_sdk::pubkey::new_rand(), + solana_pubkey::new_rand(), AccountSharedData::new(100, 1, &mock_program_id), ), ( - solana_sdk::pubkey::new_rand(), + solana_pubkey::new_rand(), AccountSharedData::new(0, 1, &mock_program_id), ), ( diff --git a/svm/src/nonce_info.rs b/svm/src/nonce_info.rs index 6405c5e9cbe1f4..0fa36bb5eb0328 100644 --- a/svm/src/nonce_info.rs +++ b/svm/src/nonce_info.rs @@ -1,10 +1,10 @@ use { - solana_sdk::{ - account::AccountSharedData, - account_utils::StateMut, - nonce::state::{DurableNonce, State as NonceState, Versions as NonceVersions}, - pubkey::Pubkey, + solana_account::{state_traits::StateMut, AccountSharedData}, + solana_nonce::{ + state::{DurableNonce, State as NonceState}, + versions::Versions as NonceVersions, }, + solana_pubkey::Pubkey, thiserror::Error, }; @@ -62,13 +62,12 @@ impl NonceInfo { mod tests { use { super::*, - solana_sdk::{ - hash::Hash, - nonce::state::{ - Data as NonceData, DurableNonce, State as NonceState, Versions as NonceVersions, - }, - system_program, + solana_hash::Hash, + solana_nonce::{ + state::{Data as NonceData, DurableNonce, State as NonceState}, + versions::Versions as NonceVersions, }, + solana_sdk_ids::system_program, }; fn create_nonce_account(state: NonceState) -> AccountSharedData { diff --git a/svm/src/program_loader.rs b/svm/src/program_loader.rs index 553b57fac08634..f45bf5bdde67b6 100644 --- a/svm/src/program_loader.rs +++ b/svm/src/program_loader.rs @@ -1,21 +1,18 @@ use { crate::transaction_processing_callback::TransactionProcessingCallback, + solana_account::{state_traits::StateMut, AccountSharedData, ReadableAccount}, + solana_clock::Slot, + solana_instruction::error::InstructionError, + solana_program::bpf_loader_upgradeable::{self, UpgradeableLoaderState}, solana_program_runtime::loaded_programs::{ LoadProgramMetrics, ProgramCacheEntry, ProgramCacheEntryOwner, ProgramCacheEntryType, ProgramRuntimeEnvironment, ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, }, - solana_sdk::{ - account::{AccountSharedData, ReadableAccount}, - account_utils::StateMut, - bpf_loader, bpf_loader_deprecated, - bpf_loader_upgradeable::{self, UpgradeableLoaderState}, - clock::Slot, - instruction::InstructionError, - loader_v4::{self, LoaderV4State, LoaderV4Status}, - pubkey::Pubkey, - transaction::{self, TransactionError}, - }, + solana_pubkey::Pubkey, + solana_sdk::loader_v4::{self, LoaderV4State, LoaderV4Status}, + solana_sdk_ids::{bpf_loader, bpf_loader_deprecated}, solana_timings::ExecuteTimings, + solana_transaction_error::{TransactionError, TransactionResult}, solana_type_overrides::sync::Arc, }; @@ -219,7 +216,7 @@ pub fn load_program_with_pubkey( pub(crate) fn get_program_modification_slot( callbacks: &CB, pubkey: &Pubkey, -) -> transaction::Result { +) -> TransactionResult { let program = callbacks .get_account_shared_data(pubkey) .ok_or(TransactionError::ProgramAccountNotFound)?; @@ -254,11 +251,12 @@ mod tests { use { super::*, crate::transaction_processor::TransactionBatchProcessor, + solana_account::WritableAccount, solana_program_runtime::{ loaded_programs::{BlockRelation, ForkGraph, ProgramRuntimeEnvironments}, - solana_rbpf::program::BuiltinProgram, + solana_sbpf::program::BuiltinProgram, }, - solana_sdk::{account::WritableAccount, bpf_loader, bpf_loader_upgradeable}, + solana_sdk_ids::{bpf_loader, bpf_loader_upgradeable}, std::{ cell::RefCell, collections::HashMap, diff --git a/svm/src/rollback_accounts.rs b/svm/src/rollback_accounts.rs index 1a9a764e131186..fec3cb27e3cc16 100644 --- a/svm/src/rollback_accounts.rs +++ b/svm/src/rollback_accounts.rs @@ -1,10 +1,8 @@ use { crate::nonce_info::NonceInfo, - solana_sdk::{ - account::{AccountSharedData, ReadableAccount, WritableAccount}, - clock::Epoch, - pubkey::Pubkey, - }, + solana_account::{AccountSharedData, ReadableAccount, WritableAccount}, + solana_clock::Epoch, + solana_pubkey::Pubkey, }; /// Captured account state used to rollback account state for nonce and fee @@ -107,14 +105,13 @@ impl RollbackAccounts { mod tests { use { super::*, - solana_sdk::{ - account::{ReadableAccount, WritableAccount}, - hash::Hash, - nonce::state::{ - Data as NonceData, DurableNonce, State as NonceState, Versions as NonceVersions, - }, - system_program, + solana_account::{ReadableAccount, WritableAccount}, + solana_hash::Hash, + solana_nonce::{ + state::{Data as NonceData, DurableNonce, State as NonceState}, + versions::Versions as NonceVersions, }, + solana_sdk_ids::system_program, }; #[test] diff --git a/svm/src/transaction_account_state_info.rs b/svm/src/transaction_account_state_info.rs index 3bf90adcd3cba6..b8ce3785f8cf02 100644 --- a/svm/src/transaction_account_state_info.rs +++ b/svm/src/transaction_account_state_info.rs @@ -1,12 +1,10 @@ use { - solana_sdk::{ - account::ReadableAccount, - native_loader, - transaction::Result, - transaction_context::{IndexOfAccount, TransactionContext}, - }, + solana_account::ReadableAccount, + solana_sdk_ids::native_loader, solana_svm_rent_collector::{rent_state::RentState, svm_rent_collector::SVMRentCollector}, solana_svm_transaction::svm_message::SVMMessage, + solana_transaction_context::{IndexOfAccount, TransactionContext}, + solana_transaction_error::TransactionResult as Result, }; #[derive(PartialEq, Debug)] @@ -73,18 +71,19 @@ impl TransactionAccountStateInfo { mod test { use { super::*, - solana_sdk::{ - account::AccountSharedData, - hash::Hash, - instruction::CompiledInstruction, - message::{LegacyMessage, Message, MessageHeader, SanitizedMessage}, - rent::Rent, - rent_collector::RentCollector, - reserved_account_keys::ReservedAccountKeys, - signature::{Keypair, Signer}, - transaction::TransactionError, - transaction_context::TransactionContext, + solana_account::AccountSharedData, + solana_hash::Hash, + solana_keypair::Keypair, + solana_message::{ + compiled_instruction::CompiledInstruction, LegacyMessage, Message, MessageHeader, + SanitizedMessage, }, + solana_rent::Rent, + solana_reserved_account_keys::ReservedAccountKeys, + solana_sdk::rent_collector::RentCollector, + solana_signer::Signer, + solana_transaction_context::TransactionContext, + solana_transaction_error::TransactionError, }; #[test] diff --git a/svm/src/transaction_commit_result.rs b/svm/src/transaction_commit_result.rs index 6d838ea0786d53..5e260e08ea863f 100644 --- a/svm/src/transaction_commit_result.rs +++ b/svm/src/transaction_commit_result.rs @@ -1,9 +1,8 @@ use { crate::transaction_execution_result::TransactionLoadedAccountsStats, - solana_sdk::{ - fee::FeeDetails, inner_instruction::InnerInstructionsList, rent_debits::RentDebits, - transaction::Result as TransactionResult, transaction_context::TransactionReturnData, - }, + solana_fee_structure::FeeDetails, solana_rent_debits::RentDebits, + solana_sdk::inner_instruction::InnerInstructionsList, + solana_transaction_context::TransactionReturnData, solana_transaction_error::TransactionResult, }; pub type TransactionCommitResult = TransactionResult; diff --git a/svm/src/transaction_error_metrics.rs b/svm/src/transaction_error_metrics.rs index 5b3ec2b7e53d1d..8f345390bbd63d 100644 --- a/svm/src/transaction_error_metrics.rs +++ b/svm/src/transaction_error_metrics.rs @@ -1,31 +1,31 @@ -use solana_sdk::saturating_add_assign; +use std::num::Saturating; #[derive(Debug, Default)] pub struct TransactionErrorMetrics { - pub total: usize, - pub account_in_use: usize, - pub too_many_account_locks: usize, - pub account_loaded_twice: usize, - pub account_not_found: usize, - pub blockhash_not_found: usize, - pub blockhash_too_old: usize, - pub call_chain_too_deep: usize, - pub already_processed: usize, - pub instruction_error: usize, - pub insufficient_funds: usize, - pub invalid_account_for_fee: usize, - pub invalid_account_index: usize, - pub invalid_program_for_execution: usize, - pub invalid_compute_budget: usize, - pub not_allowed_during_cluster_maintenance: usize, - pub invalid_writable_account: usize, - pub invalid_rent_paying_account: usize, - pub would_exceed_max_block_cost_limit: usize, - pub would_exceed_max_account_cost_limit: usize, - pub would_exceed_max_vote_cost_limit: usize, - pub would_exceed_account_data_block_limit: usize, - pub max_loaded_accounts_data_size_exceeded: usize, - pub program_execution_temporarily_restricted: usize, + pub total: Saturating, + pub account_in_use: Saturating, + pub too_many_account_locks: Saturating, + pub account_loaded_twice: Saturating, + pub account_not_found: Saturating, + pub blockhash_not_found: Saturating, + pub blockhash_too_old: Saturating, + pub call_chain_too_deep: Saturating, + pub already_processed: Saturating, + pub instruction_error: Saturating, + pub insufficient_funds: Saturating, + pub invalid_account_for_fee: Saturating, + pub invalid_account_index: Saturating, + pub invalid_program_for_execution: Saturating, + pub invalid_compute_budget: Saturating, + pub not_allowed_during_cluster_maintenance: Saturating, + pub invalid_writable_account: Saturating, + pub invalid_rent_paying_account: Saturating, + pub would_exceed_max_block_cost_limit: Saturating, + pub would_exceed_max_account_cost_limit: Saturating, + pub would_exceed_max_vote_cost_limit: Saturating, + pub would_exceed_account_data_block_limit: Saturating, + pub max_loaded_accounts_data_size_exceeded: Saturating, + pub program_execution_temporarily_restricted: Saturating, } impl TransactionErrorMetrics { @@ -34,59 +34,30 @@ impl TransactionErrorMetrics { } pub fn accumulate(&mut self, other: &TransactionErrorMetrics) { - saturating_add_assign!(self.total, other.total); - saturating_add_assign!(self.account_in_use, other.account_in_use); - saturating_add_assign!(self.too_many_account_locks, other.too_many_account_locks); - saturating_add_assign!(self.account_loaded_twice, other.account_loaded_twice); - saturating_add_assign!(self.account_not_found, other.account_not_found); - saturating_add_assign!(self.blockhash_not_found, other.blockhash_not_found); - saturating_add_assign!(self.blockhash_too_old, other.blockhash_too_old); - saturating_add_assign!(self.call_chain_too_deep, other.call_chain_too_deep); - saturating_add_assign!(self.already_processed, other.already_processed); - saturating_add_assign!(self.instruction_error, other.instruction_error); - saturating_add_assign!(self.insufficient_funds, other.insufficient_funds); - saturating_add_assign!(self.invalid_account_for_fee, other.invalid_account_for_fee); - saturating_add_assign!(self.invalid_account_index, other.invalid_account_index); - saturating_add_assign!( - self.invalid_program_for_execution, - other.invalid_program_for_execution - ); - saturating_add_assign!(self.invalid_compute_budget, other.invalid_compute_budget); - saturating_add_assign!( - self.not_allowed_during_cluster_maintenance, - other.not_allowed_during_cluster_maintenance - ); - saturating_add_assign!( - self.invalid_writable_account, - other.invalid_writable_account - ); - saturating_add_assign!( - self.invalid_rent_paying_account, - other.invalid_rent_paying_account - ); - saturating_add_assign!( - self.would_exceed_max_block_cost_limit, - other.would_exceed_max_block_cost_limit - ); - saturating_add_assign!( - self.would_exceed_max_account_cost_limit, - other.would_exceed_max_account_cost_limit - ); - saturating_add_assign!( - self.would_exceed_max_vote_cost_limit, - other.would_exceed_max_vote_cost_limit - ); - saturating_add_assign!( - self.would_exceed_account_data_block_limit, - other.would_exceed_account_data_block_limit - ); - saturating_add_assign!( - self.max_loaded_accounts_data_size_exceeded, - other.max_loaded_accounts_data_size_exceeded - ); - saturating_add_assign!( - self.program_execution_temporarily_restricted, - other.program_execution_temporarily_restricted - ); + self.total += other.total; + self.account_in_use += other.account_in_use; + self.too_many_account_locks += other.too_many_account_locks; + self.account_loaded_twice += other.account_loaded_twice; + self.account_not_found += other.account_not_found; + self.blockhash_not_found += other.blockhash_not_found; + self.blockhash_too_old += other.blockhash_too_old; + self.call_chain_too_deep += other.call_chain_too_deep; + self.already_processed += other.already_processed; + self.instruction_error += other.instruction_error; + self.insufficient_funds += other.insufficient_funds; + self.invalid_account_for_fee += other.invalid_account_for_fee; + self.invalid_account_index += other.invalid_account_index; + self.invalid_program_for_execution += other.invalid_program_for_execution; + self.invalid_compute_budget += other.invalid_compute_budget; + self.not_allowed_during_cluster_maintenance += other.not_allowed_during_cluster_maintenance; + self.invalid_writable_account += other.invalid_writable_account; + self.invalid_rent_paying_account += other.invalid_rent_paying_account; + self.would_exceed_max_block_cost_limit += other.would_exceed_max_block_cost_limit; + self.would_exceed_max_account_cost_limit += other.would_exceed_max_account_cost_limit; + self.would_exceed_max_vote_cost_limit += other.would_exceed_max_vote_cost_limit; + self.would_exceed_account_data_block_limit += other.would_exceed_account_data_block_limit; + self.max_loaded_accounts_data_size_exceeded += other.max_loaded_accounts_data_size_exceeded; + self.program_execution_temporarily_restricted += + other.program_execution_temporarily_restricted; } } diff --git a/svm/src/transaction_execution_result.rs b/svm/src/transaction_execution_result.rs index c226ae262a82ad..79ed41cbd61e60 100644 --- a/svm/src/transaction_execution_result.rs +++ b/svm/src/transaction_execution_result.rs @@ -7,7 +7,9 @@ pub use solana_sdk::inner_instruction::{InnerInstruction, InnerInstructionsList} use { crate::account_loader::LoadedTransaction, solana_program_runtime::loaded_programs::ProgramCacheEntry, - solana_sdk::{pubkey::Pubkey, transaction, transaction_context::TransactionReturnData}, + solana_pubkey::Pubkey, + solana_transaction_context::TransactionReturnData, + solana_transaction_error::TransactionResult, std::{collections::HashMap, sync::Arc}, }; @@ -32,7 +34,7 @@ impl ExecutedTransaction { #[derive(Clone, Debug, Eq, PartialEq)] pub struct TransactionExecutionDetails { - pub status: transaction::Result<()>, + pub status: TransactionResult<()>, pub log_messages: Option>, pub inner_instructions: Option, pub return_data: Option, diff --git a/svm/src/transaction_processing_callback.rs b/svm/src/transaction_processing_callback.rs index a1356fc06e6da2..803a7724ddfb8e 100644 --- a/svm/src/transaction_processing_callback.rs +++ b/svm/src/transaction_processing_callback.rs @@ -1,4 +1,4 @@ -use solana_sdk::{account::AccountSharedData, pubkey::Pubkey}; +use {solana_account::AccountSharedData, solana_pubkey::Pubkey}; /// Runtime callbacks for transaction processing. pub trait TransactionProcessingCallback { diff --git a/svm/src/transaction_processing_result.rs b/svm/src/transaction_processing_result.rs index 0658b5035fda0f..c8da2a941c1a9b 100644 --- a/svm/src/transaction_processing_result.rs +++ b/svm/src/transaction_processing_result.rs @@ -3,10 +3,8 @@ use { account_loader::FeesOnlyTransaction, transaction_execution_result::{ExecutedTransaction, TransactionExecutionDetails}, }, - solana_sdk::{ - fee::FeeDetails, - transaction::{Result as TransactionResult, TransactionError}, - }, + solana_fee_structure::FeeDetails, + solana_transaction_error::{TransactionError, TransactionResult}, }; pub type TransactionProcessingResult = TransactionResult; diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 21245436503456..01c4af21b55a42 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -20,48 +20,50 @@ use { }, log::debug, percentage::Percentage, + solana_account::{state_traits::StateMut, AccountSharedData, ReadableAccount, PROGRAM_OWNERS}, solana_bpf_loader_program::syscalls::{ create_program_runtime_environment_v1, create_program_runtime_environment_v2, }, + solana_clock::{Epoch, Slot}, solana_compute_budget::compute_budget::ComputeBudget, solana_compute_budget_instruction::instructions_processor::process_compute_budget_instructions, solana_feature_set::{ enable_transaction_loading_failure_fees, remove_accounts_executable_flag_checks, remove_rounding_in_fee_calculation, FeatureSet, }, + solana_fee_structure::{FeeBudgetLimits, FeeStructure}, + solana_hash::Hash, + solana_instruction::TRANSACTION_LEVEL_STACK_HEIGHT, solana_log_collector::LogCollector, solana_measure::{measure::Measure, measure_us}, + solana_message::compiled_instruction::CompiledInstruction, + solana_nonce::{ + state::{DurableNonce, State as NonceState}, + versions::Versions as NonceVersions, + }, solana_program_runtime::{ invoke_context::{EnvironmentConfig, InvokeContext}, loaded_programs::{ ForkGraph, ProgramCache, ProgramCacheEntry, ProgramCacheForTxBatch, ProgramCacheMatchCriteria, ProgramRuntimeEnvironment, }, - solana_rbpf::{ + solana_sbpf::{ program::{BuiltinProgram, FunctionRegistry}, vm::Config as VmConfig, }, sysvar_cache::SysvarCache, }, + solana_pubkey::Pubkey, solana_sdk::{ - account::{AccountSharedData, ReadableAccount, PROGRAM_OWNERS}, - account_utils::StateMut, - clock::{Epoch, Slot}, - fee::{FeeBudgetLimits, FeeStructure}, - hash::Hash, inner_instruction::{InnerInstruction, InnerInstructionsList}, - instruction::{CompiledInstruction, TRANSACTION_LEVEL_STACK_HEIGHT}, - native_loader, - nonce::state::{DurableNonce, State as NonceState, Versions as NonceVersions}, - pubkey::Pubkey, rent_collector::RentCollector, - saturating_add_assign, system_program, - transaction::{self, TransactionError}, - transaction_context::{ExecutionRecord, TransactionContext}, }, + solana_sdk_ids::{native_loader, system_program}, solana_svm_rent_collector::svm_rent_collector::SVMRentCollector, solana_svm_transaction::{svm_message::SVMMessage, svm_transaction::SVMTransaction}, solana_timings::{ExecuteTimingType, ExecuteTimings}, + solana_transaction_context::{ExecutionRecord, TransactionContext}, + solana_transaction_error::{TransactionError, TransactionResult}, solana_type_overrides::sync::{atomic::Ordering, Arc, RwLock, RwLockReadGuard}, std::{ collections::{hash_map::Entry, HashMap, HashSet}, @@ -525,7 +527,7 @@ impl TransactionBatchProcessor { fee_lamports_per_signature: u64, rent_collector: &dyn SVMRentCollector, error_counters: &mut TransactionErrorMetrics, - ) -> transaction::Result { + ) -> TransactionResult { // If this is a nonce transaction, validate the nonce info. // This must be done for every transaction to support SIMD83 because // it may have changed due to use, authorization, or deallocation. @@ -566,7 +568,7 @@ impl TransactionBatchProcessor { fee_lamports_per_signature: u64, rent_collector: &dyn SVMRentCollector, error_counters: &mut TransactionErrorMetrics, - ) -> transaction::Result { + ) -> TransactionResult { let compute_budget_limits = process_compute_budget_instructions( message.program_instructions_iter(), &account_loader.feature_set, @@ -643,7 +645,7 @@ impl TransactionBatchProcessor { nonce_info: &NonceInfo, next_durable_nonce: &DurableNonce, error_counters: &mut TransactionErrorMetrics, - ) -> transaction::Result<()> { + ) -> TransactionResult<()> { // When SIMD83 is enabled, if the nonce has been used in this batch already, we must drop // the transaction. This is the same as if it was used in different batches in the same slot. // If the nonce account was closed in the batch, we error as if the blockhash didn't validate. @@ -711,7 +713,7 @@ impl TransactionBatchProcessor { .for_each(|key| match result.entry(*key) { Entry::Occupied(mut entry) => { let (_, count) = entry.get_mut(); - saturating_add_assign!(*count, 1); + *count = count.saturating_add(1); } Entry::Vacant(entry) => { if let Some(index) = @@ -1197,6 +1199,8 @@ impl TransactionBatchProcessor { #[cfg(test)] mod tests { + #[allow(deprecated)] + use solana_sysvar::fees::Fees; use { super::*, crate::{ @@ -1205,28 +1209,28 @@ mod tests { rollback_accounts::RollbackAccounts, transaction_processing_callback::AccountState, }, + solana_account::{create_account_shared_data_for_test, WritableAccount}, + solana_clock::Clock, solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, + solana_compute_budget_interface::ComputeBudgetInstruction, + solana_epoch_schedule::EpochSchedule, solana_feature_set::FeatureSet, + solana_fee_calculator::FeeCalculator, + solana_fee_structure::{FeeDetails, FeeStructure}, + solana_hash::Hash, + solana_keypair::Keypair, + solana_message::{LegacyMessage, Message, MessageHeader, SanitizedMessage}, + solana_nonce as nonce, solana_program_runtime::loaded_programs::{BlockRelation, ProgramCacheEntryType}, - solana_sdk::{ - account::{create_account_shared_data_for_test, WritableAccount}, - bpf_loader, - compute_budget::ComputeBudgetInstruction, - epoch_schedule::EpochSchedule, - fee::{FeeDetails, FeeStructure}, - fee_calculator::FeeCalculator, - hash::Hash, - message::{LegacyMessage, Message, MessageHeader, SanitizedMessage}, - nonce, - rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, - rent_debits::RentDebits, - reserved_account_keys::ReservedAccountKeys, - signature::{Keypair, Signature}, - system_program, - sysvar::{self, rent::Rent}, - transaction::{SanitizedTransaction, Transaction, TransactionError}, - transaction_context::TransactionContext, - }, + solana_rent::Rent, + solana_rent_debits::RentDebits, + solana_reserved_account_keys::ReservedAccountKeys, + solana_sdk::rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, + solana_sdk_ids::{bpf_loader, system_program, sysvar}, + solana_signature::Signature, + solana_transaction::{sanitized::SanitizedTransaction, Transaction}, + solana_transaction_context::TransactionContext, + solana_transaction_error::TransactionError, test_case::test_case, }; @@ -1560,7 +1564,7 @@ mod tests { &processing_config, ); - assert_eq!(error_metrics.instruction_error, 1); + assert_eq!(error_metrics.instruction_error.0, 1); } #[test] @@ -1910,7 +1914,7 @@ mod tests { fn test_sysvar_cache_initialization1() { let mock_bank = MockBankCallback::default(); - let clock = sysvar::clock::Clock { + let clock = Clock { slot: 1, epoch_start_timestamp: 2, epoch: 3, @@ -1932,7 +1936,7 @@ mod tests { .unwrap() .insert(sysvar::epoch_schedule::id(), epoch_schedule_account); - let fees = sysvar::fees::Fees { + let fees = Fees { fee_calculator: FeeCalculator { lamports_per_signature: 123, }, @@ -1986,7 +1990,7 @@ mod tests { fn test_reset_and_fill_sysvar_cache() { let mock_bank = MockBankCallback::default(); - let clock = sysvar::clock::Clock { + let clock = Clock { slot: 1, epoch_start_timestamp: 2, epoch: 3, @@ -2008,7 +2012,7 @@ mod tests { .unwrap() .insert(sysvar::epoch_schedule::id(), epoch_schedule_account); - let fees = sysvar::fees::Fees { + let fees = Fees { fee_calculator: FeeCalculator { lamports_per_signature: 123, }, @@ -2139,7 +2143,9 @@ mod tests { epoch: current_epoch, ..RentCollector::default() }; - let min_balance = rent_collector.rent.minimum_balance(nonce::State::size()); + let min_balance = rent_collector + .rent + .minimum_balance(nonce::state::State::size()); let transaction_fee = lamports_per_signature; let priority_fee = 2_000_000u64; let starting_balance = transaction_fee + priority_fee; @@ -2311,7 +2317,7 @@ mod tests { &mut error_counters, ); - assert_eq!(error_counters.account_not_found, 1); + assert_eq!(error_counters.account_not_found.0, 1); assert_eq!(result, Err(TransactionError::AccountNotFound)); } @@ -2345,7 +2351,7 @@ mod tests { &mut error_counters, ); - assert_eq!(error_counters.insufficient_funds, 1); + assert_eq!(error_counters.insufficient_funds.0, 1); assert_eq!(result, Err(TransactionError::InsufficientFundsForFee)); } @@ -2419,7 +2425,7 @@ mod tests { &mut error_counters, ); - assert_eq!(error_counters.invalid_account_for_fee, 1); + assert_eq!(error_counters.invalid_account_for_fee.0, 1); assert_eq!(result, Err(TransactionError::InvalidAccountForFee)); } @@ -2451,7 +2457,7 @@ mod tests { &mut error_counters, ); - assert_eq!(error_counters.invalid_compute_budget, 1); + assert_eq!(error_counters.invalid_compute_budget.0, 1); assert_eq!(result, Err(TransactionError::DuplicateInstruction(1u8))); } @@ -2475,7 +2481,7 @@ mod tests { ) .unwrap(); let fee_payer_address = message.fee_payer(); - let min_balance = Rent::default().minimum_balance(nonce::State::size()); + let min_balance = Rent::default().minimum_balance(nonce::state::State::size()); let transaction_fee = lamports_per_signature; let priority_fee = compute_unit_limit; @@ -2483,11 +2489,13 @@ mod tests { { let fee_payer_account = AccountSharedData::new_data( min_balance + transaction_fee + priority_fee, - &nonce::state::Versions::new(nonce::State::Initialized(nonce::state::Data::new( - *fee_payer_address, - DurableNonce::default(), - lamports_per_signature, - ))), + &nonce::versions::Versions::new(nonce::state::State::Initialized( + nonce::state::Data::new( + *fee_payer_address, + DurableNonce::default(), + lamports_per_signature, + ), + )), &system_program::id(), ) .unwrap(); @@ -2556,7 +2564,7 @@ mod tests { { let fee_payer_account = AccountSharedData::new_data( transaction_fee + priority_fee, // no min_balance this time - &nonce::state::Versions::new(nonce::State::Initialized( + &nonce::versions::Versions::new(nonce::state::State::Initialized( nonce::state::Data::default(), )), &system_program::id(), @@ -2585,7 +2593,7 @@ mod tests { &mut error_counters, ); - assert_eq!(error_counters.insufficient_funds, 1); + assert_eq!(error_counters.insufficient_funds.0, 1); assert_eq!(result, Err(TransactionError::InsufficientFundsForFee)); } } diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 6cd692133d219b..6df8eedb3c28f8 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -11,8 +11,8 @@ use { solana_program_runtime::{ invoke_context::InvokeContext, loaded_programs::{BlockRelation, ForkGraph, ProgramCacheEntry}, - solana_rbpf::{ - program::{BuiltinFunction, BuiltinProgram, FunctionRegistry}, + solana_sbpf::{ + program::{BuiltinFunction, BuiltinProgram, FunctionRegistry, SBPFVersion}, vm::Config, }, }, @@ -302,10 +302,7 @@ pub fn create_custom_loader<'a>() -> BuiltinProgram> { reject_broken_elfs: true, noop_instruction_rate: 256, sanitize_user_provided_values: true, - external_internal_function_hash_collision: false, - reject_callx_r10: true, - enable_sbpf_v1: true, - enable_sbpf_v2: false, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V3, optimize_rodata: false, aligned_memory_mapping: true, }; diff --git a/tls-utils/src/config.rs b/tls-utils/src/config.rs new file mode 100644 index 00000000000000..c0f038821547fd --- /dev/null +++ b/tls-utils/src/config.rs @@ -0,0 +1,21 @@ +use { + rustls::{ + client::WantsClientCert, server::WantsServerCert, ClientConfig, ConfigBuilder, ServerConfig, + }, + std::sync::Arc, +}; + +pub fn tls_client_config_builder() -> ConfigBuilder { + ClientConfig::builder_with_provider(Arc::new(crate::crypto_provider())) + .with_safe_default_protocol_versions() + .unwrap() + .dangerous() + .with_custom_certificate_verifier(crate::SkipServerVerification::new()) +} + +pub fn tls_server_config_builder() -> ConfigBuilder { + ServerConfig::builder_with_provider(Arc::new(crate::crypto_provider())) + .with_safe_default_protocol_versions() + .unwrap() + .with_client_cert_verifier(crate::SkipClientVerification::new()) +} diff --git a/tls-utils/src/crypto_provider.rs b/tls-utils/src/crypto_provider.rs new file mode 100644 index 00000000000000..1e1d754fda4de8 --- /dev/null +++ b/tls-utils/src/crypto_provider.rs @@ -0,0 +1,10 @@ +use rustls::{crypto::CryptoProvider, NamedGroup}; + +pub fn crypto_provider() -> CryptoProvider { + let mut provider = rustls::crypto::ring::default_provider(); + // Disable all key exchange algorithms except X25519 + provider + .kx_groups + .retain(|kx| kx.name() == NamedGroup::X25519); + provider +} diff --git a/tls-utils/src/lib.rs b/tls-utils/src/lib.rs index 2985d127fd379a..3f20152cc894a0 100644 --- a/tls-utils/src/lib.rs +++ b/tls-utils/src/lib.rs @@ -1,6 +1,12 @@ //! Collection of TLS related code fragments that end up popping up everywhere where quic is used. //! Aggregated here to avoid bugs due to conflicting implementations of the same functionality. +mod config; +pub use config::*; + +mod crypto_provider; +pub use crypto_provider::*; + mod tls_certificates; pub use tls_certificates::*; diff --git a/tls-utils/src/skip_client_verification.rs b/tls-utils/src/skip_client_verification.rs index 92be37ab8be657..17269ab0eff6f2 100644 --- a/tls-utils/src/skip_client_verification.rs +++ b/tls-utils/src/skip_client_verification.rs @@ -1,4 +1,5 @@ use { + crate::crypto_provider, rustls::{ pki_types::{CertificateDer, UnixTime}, server::danger::ClientCertVerified, @@ -14,7 +15,7 @@ pub struct SkipClientVerification(Arc); impl SkipClientVerification { pub fn new() -> Arc { - Arc::new(Self(Arc::new(rustls::crypto::ring::default_provider()))) + Arc::new(Self(Arc::new(crypto_provider()))) } } impl rustls::server::danger::ClientCertVerifier for SkipClientVerification { diff --git a/tls-utils/src/skip_server_verification.rs b/tls-utils/src/skip_server_verification.rs index cac5dd59410825..4fdef2c389679a 100644 --- a/tls-utils/src/skip_server_verification.rs +++ b/tls-utils/src/skip_server_verification.rs @@ -1,7 +1,8 @@ use { + crate::crypto_provider, rustls::{ client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier}, - crypto::{ring, verify_tls12_signature, verify_tls13_signature, CryptoProvider}, + crypto::{verify_tls12_signature, verify_tls13_signature, CryptoProvider}, pki_types::{CertificateDer, ServerName, UnixTime}, DigitallySignedStruct, Error, SignatureScheme, }, @@ -19,7 +20,7 @@ pub struct SkipServerVerification(Arc); impl SkipServerVerification { pub fn new() -> Arc { - Arc::new(Self(Arc::new(ring::default_provider()))) + Arc::new(Self(Arc::new(crypto_provider()))) } } diff --git a/tps-client/Cargo.toml b/tps-client/Cargo.toml index c9bcf76325b5f7..1902b9f762dc73 100644 --- a/tps-client/Cargo.toml +++ b/tps-client/Cargo.toml @@ -10,15 +10,27 @@ edition = { workspace = true } [dependencies] log = { workspace = true } +solana-account = { workspace = true } solana-client = { workspace = true } +solana-client-traits = { workspace = true } +solana-clock = { workspace = true } +solana-commitment-config = { workspace = true } solana-connection-cache = { workspace = true } +solana-epoch-info = { workspace = true } +solana-hash = { workspace = true } +solana-keypair = { workspace = true } +solana-message = { workspace = true } +solana-pubkey = { workspace = true } solana-quic-client = { workspace = true } solana-rpc-client = { workspace = true } solana-rpc-client-api = { workspace = true } solana-runtime = { workspace = true } -solana-sdk = { workspace = true } +solana-signature = { workspace = true } +solana-signer = { workspace = true } solana-streamer = { workspace = true } solana-tpu-client = { workspace = true } +solana-transaction = { workspace = true } +solana-transaction-error = { workspace = true } solana-transaction-status = { workspace = true } thiserror = { workspace = true } diff --git a/tps-client/src/bank_client.rs b/tps-client/src/bank_client.rs index e6ae91e2db9202..95879cacb3750a 100644 --- a/tps-client/src/bank_client.rs +++ b/tps-client/src/bank_client.rs @@ -1,19 +1,17 @@ use { crate::{TpsClient, TpsClientError, TpsClientResult}, + solana_account::Account, + solana_client_traits::{AsyncClient, SyncClient}, + solana_commitment_config::CommitmentConfig, + solana_epoch_info::EpochInfo, + solana_hash::Hash, + solana_message::Message, + solana_pubkey::Pubkey, solana_rpc_client_api::config::RpcBlockConfig, solana_runtime::bank_client::BankClient, - solana_sdk::{ - account::Account, - client::{AsyncClient, SyncClient}, - commitment_config::CommitmentConfig, - epoch_info::EpochInfo, - hash::Hash, - message::Message, - pubkey::Pubkey, - signature::Signature, - slot_history::Slot, - transaction::{Result, Transaction}, - }, + solana_signature::Signature, + solana_transaction::Transaction, + solana_transaction_error::TransactionResult as Result, solana_transaction_status::UiConfirmedBlock, }; @@ -122,22 +120,22 @@ impl TpsClient for BankClient { fn get_slot_with_commitment( &self, commitment_config: CommitmentConfig, - ) -> TpsClientResult { + ) -> TpsClientResult { SyncClient::get_slot_with_commitment(self, commitment_config).map_err(|err| err.into()) } fn get_blocks_with_commitment( &self, - _start_slot: Slot, - _end_slot: Option, + _start_slot: u64, + _end_slot: Option, _commitment_config: CommitmentConfig, - ) -> TpsClientResult> { + ) -> TpsClientResult> { unimplemented!("BankClient doesn't support get_blocks"); } fn get_block_with_config( &self, - _slot: Slot, + _slot: u64, _rpc_block_config: RpcBlockConfig, ) -> TpsClientResult { unimplemented!("BankClient doesn't support get_block_with_config"); diff --git a/tps-client/src/lib.rs b/tps-client/src/lib.rs index 7e5e34bac546ef..180bfc49ef3f63 100644 --- a/tps-client/src/lib.rs +++ b/tps-client/src/lib.rs @@ -1,20 +1,17 @@ use { log::debug, + solana_account::Account, + solana_clock::DEFAULT_MS_PER_SLOT, + solana_commitment_config::CommitmentConfig, + solana_epoch_info::EpochInfo, + solana_hash::Hash, + solana_message::Message, + solana_pubkey::Pubkey, solana_rpc_client_api::{client_error::Error as ClientError, config::RpcBlockConfig}, - solana_sdk::{ - account::Account, - clock::DEFAULT_MS_PER_SLOT, - commitment_config::CommitmentConfig, - epoch_info::EpochInfo, - hash::Hash, - message::Message, - pubkey::Pubkey, - signature::Signature, - slot_history::Slot, - transaction::{Result, Transaction}, - transport::TransportError, - }, + solana_signature::Signature, solana_tpu_client::tpu_client::TpuSenderError, + solana_transaction::Transaction, + solana_transaction_error::{TransactionResult as Result, TransportError}, solana_transaction_status::UiConfirmedBlock, std::{ thread::sleep, @@ -126,21 +123,19 @@ pub trait TpsClient { fn get_multiple_accounts(&self, pubkeys: &[Pubkey]) -> TpsClientResult>>; - fn get_slot_with_commitment( - &self, - commitment_config: CommitmentConfig, - ) -> TpsClientResult; + fn get_slot_with_commitment(&self, commitment_config: CommitmentConfig) + -> TpsClientResult; fn get_blocks_with_commitment( &self, - start_slot: Slot, - end_slot: Option, + start_slot: u64, + end_slot: Option, commitment_config: CommitmentConfig, - ) -> TpsClientResult>; + ) -> TpsClientResult>; fn get_block_with_config( &self, - slot: Slot, + slot: u64, rpc_block_config: RpcBlockConfig, ) -> TpsClientResult; } diff --git a/tps-client/src/rpc_client.rs b/tps-client/src/rpc_client.rs index 0487b0837237bb..c5f3992a190c7c 100644 --- a/tps-client/src/rpc_client.rs +++ b/tps-client/src/rpc_client.rs @@ -1,19 +1,17 @@ use { crate::{TpsClient, TpsClientError, TpsClientResult}, + solana_account::Account, solana_client::rpc_config::RpcSendTransactionConfig, + solana_commitment_config::CommitmentConfig, + solana_epoch_info::EpochInfo, + solana_hash::Hash, + solana_message::Message, + solana_pubkey::Pubkey, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::config::RpcBlockConfig, - solana_sdk::{ - account::Account, - commitment_config::CommitmentConfig, - epoch_info::EpochInfo, - hash::Hash, - message::Message, - pubkey::Pubkey, - signature::Signature, - slot_history::Slot, - transaction::{Result, Transaction}, - }, + solana_signature::Signature, + solana_transaction::Transaction, + solana_transaction_error::TransactionResult as Result, solana_transaction_status::UiConfirmedBlock, }; @@ -134,23 +132,23 @@ impl TpsClient for RpcClient { fn get_slot_with_commitment( &self, commitment_config: CommitmentConfig, - ) -> TpsClientResult { + ) -> TpsClientResult { RpcClient::get_slot_with_commitment(self, commitment_config).map_err(|err| err.into()) } fn get_blocks_with_commitment( &self, - start_slot: Slot, - end_slot: Option, + start_slot: u64, + end_slot: Option, commitment_config: CommitmentConfig, - ) -> TpsClientResult> { + ) -> TpsClientResult> { RpcClient::get_blocks_with_commitment(self, start_slot, end_slot, commitment_config) .map_err(|err| err.into()) } fn get_block_with_config( &self, - slot: Slot, + slot: u64, rpc_block_config: RpcBlockConfig, ) -> TpsClientResult { RpcClient::get_block_with_config(self, slot, rpc_block_config).map_err(|err| err.into()) diff --git a/tps-client/src/tpu_client.rs b/tps-client/src/tpu_client.rs index dd34c0cd152672..296a17616965ec 100644 --- a/tps-client/src/tpu_client.rs +++ b/tps-client/src/tpu_client.rs @@ -1,21 +1,19 @@ use { crate::{TpsClient, TpsClientError, TpsClientResult}, + solana_account::Account, + solana_commitment_config::CommitmentConfig, solana_connection_cache::connection_cache::{ ConnectionManager, ConnectionPool, NewConnectionConfig, }, + solana_epoch_info::EpochInfo, + solana_hash::Hash, + solana_message::Message, + solana_pubkey::Pubkey, solana_rpc_client_api::config::RpcBlockConfig, - solana_sdk::{ - account::Account, - commitment_config::CommitmentConfig, - epoch_info::EpochInfo, - hash::Hash, - message::Message, - pubkey::Pubkey, - signature::Signature, - slot_history::Slot, - transaction::{Result, Transaction}, - }, + solana_signature::Signature, solana_tpu_client::tpu_client::TpuClient, + solana_transaction::Transaction, + solana_transaction_error::TransactionResult as Result, solana_transaction_status::UiConfirmedBlock, }; @@ -155,7 +153,7 @@ where fn get_slot_with_commitment( &self, commitment_config: CommitmentConfig, - ) -> TpsClientResult { + ) -> TpsClientResult { self.rpc_client() .get_slot_with_commitment(commitment_config) .map_err(|err| err.into()) @@ -163,10 +161,10 @@ where fn get_blocks_with_commitment( &self, - start_slot: Slot, - end_slot: Option, + start_slot: u64, + end_slot: Option, commitment_config: CommitmentConfig, - ) -> TpsClientResult> { + ) -> TpsClientResult> { self.rpc_client() .get_blocks_with_commitment(start_slot, end_slot, commitment_config) .map_err(|err| err.into()) @@ -174,7 +172,7 @@ where fn get_block_with_config( &self, - slot: Slot, + slot: u64, rpc_block_config: RpcBlockConfig, ) -> TpsClientResult { self.rpc_client() diff --git a/tps-client/src/utils.rs b/tps-client/src/utils.rs index 02cd622ab1fb8b..bf1d1326d0d571 100644 --- a/tps-client/src/utils.rs +++ b/tps-client/src/utils.rs @@ -1,8 +1,10 @@ use { log::{error, info}, solana_client::connection_cache::ConnectionCache as ClientConnectionCache, + solana_keypair::Keypair, + solana_pubkey::Pubkey, solana_rpc_client::rpc_client::RpcClient, - solana_sdk::{pubkey::Pubkey, signature::Signer, signer::keypair::Keypair}, + solana_signer::Signer, solana_streamer::streamer::StakedNodes, std::{ collections::HashMap, diff --git a/tpu-client-next/src/quic_networking.rs b/tpu-client-next/src/quic_networking.rs index 5183acaf391948..7aa65d969d8b11 100644 --- a/tpu-client-next/src/quic_networking.rs +++ b/tpu-client-next/src/quic_networking.rs @@ -7,7 +7,7 @@ use { }, solana_quic_definitions::{QUIC_KEEP_ALIVE, QUIC_MAX_TIMEOUT}, solana_streamer::nonblocking::quic::ALPN_TPU_PROTOCOL_ID, - solana_tls_utils::SkipServerVerification, + solana_tls_utils::tls_client_config_builder, std::{net::SocketAddr, sync::Arc}, }; @@ -20,9 +20,7 @@ pub use { pub(crate) fn create_client_config(client_certificate: QuicClientCertificate) -> ClientConfig { // adapted from QuicLazyInitializedEndpoint::create_endpoint - let mut crypto = rustls::ClientConfig::builder() - .dangerous() - .with_custom_certificate_verifier(SkipServerVerification::new()) + let mut crypto = tls_client_config_builder() .with_client_auth_cert( vec![client_certificate.certificate.clone()], client_certificate.key.clone_key(), diff --git a/turbine/benches/cluster_nodes.rs b/turbine/benches/cluster_nodes.rs index 1daa304ee74ccc..08862898196807 100644 --- a/turbine/benches/cluster_nodes.rs +++ b/turbine/benches/cluster_nodes.rs @@ -7,6 +7,7 @@ use { solana_gossip::contact_info::ContactInfo, solana_ledger::shred::{Shred, ShredFlags}, solana_sdk::{clock::Slot, genesis_config::ClusterType, pubkey::Pubkey}, + solana_streamer::socket::SocketAddrSpace, solana_turbine::{ cluster_nodes::{make_test_cluster, new_cluster_nodes, ClusterNodes}, retransmit_stage::RetransmitStage, @@ -45,8 +46,12 @@ fn get_retransmit_peers_deterministic( 0, 0, ); - let _retransmit_peers = - cluster_nodes.get_retransmit_peers(slot_leader, &shred.id(), /*fanout:*/ 200); + let _retransmit_peers = cluster_nodes.get_retransmit_addrs( + slot_leader, + &shred.id(), + 200, // fanout + &SocketAddrSpace::Unspecified, + ); } } diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index b8d5252c91f781..54812620e23ec0 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -503,11 +503,12 @@ impl BroadcastRun for StandardBroadcastRun { } } -fn should_chain_merkle_shreds(_slot: Slot, cluster_type: ClusterType) -> bool { +fn should_chain_merkle_shreds(slot: Slot, cluster_type: ClusterType) -> bool { match cluster_type { ClusterType::Development => true, ClusterType::Devnet => true, - ClusterType::MainnetBeta => false, + // Roll out chained Merkle shreds to ~5% of mainnet slots. + ClusterType::MainnetBeta => slot % 19 == 1, ClusterType::Testnet => true, } } diff --git a/turbine/src/cluster_nodes.rs b/turbine/src/cluster_nodes.rs index 6f8aa6a1522aa5..fd5b3e5e6dc454 100644 --- a/turbine/src/cluster_nodes.rs +++ b/turbine/src/cluster_nodes.rs @@ -28,7 +28,7 @@ use { std::{ any::TypeId, cmp::Reverse, - collections::HashMap, + collections::{HashMap, HashSet}, iter::repeat_with, marker::PhantomData, net::{IpAddr, SocketAddr}, @@ -83,20 +83,12 @@ pub struct ClusterNodesCache { ttl: Duration, // Time to live. } -pub struct RetransmitPeers<'a> { - root_distance: usize, // distance from the root node - children: Vec<&'a Node>, - // Maps tvu addresses to the first node - // in the shuffle with the same address. - addrs: HashMap, // tvu addresses -} - impl Node { #[inline] - fn pubkey(&self) -> Pubkey { + fn pubkey(&self) -> &Pubkey { match &self.node { - NodeId::Pubkey(pubkey) => *pubkey, - NodeId::ContactInfo(node) => *node.pubkey(), + NodeId::Pubkey(pubkey) => pubkey, + NodeId::ContactInfo(node) => node.pubkey(), } } @@ -168,33 +160,13 @@ impl ClusterNodes { } impl ClusterNodes { - pub(crate) fn get_retransmit_addrs( + pub fn get_retransmit_addrs( &self, slot_leader: &Pubkey, shred: &ShredId, fanout: usize, + socket_addr_space: &SocketAddrSpace, ) -> Result<(/*root_distance:*/ usize, Vec), Error> { - let RetransmitPeers { - root_distance, - children, - addrs, - } = self.get_retransmit_peers(slot_leader, shred, fanout)?; - let protocol = get_broadcast_protocol(shred); - let peers = children.into_iter().filter_map(|node| { - node.contact_info()? - .tvu(protocol) - .ok() - .filter(|addr| addrs.get(addr) == Some(&node.pubkey())) - }); - Ok((root_distance, peers.collect())) - } - - pub fn get_retransmit_peers( - &self, - slot_leader: &Pubkey, - shred: &ShredId, - fanout: usize, - ) -> Result { let mut weighted_shuffle = self.weighted_shuffle.clone(); // Exclude slot leader from list of nodes. if slot_leader == &self.pubkey { @@ -206,39 +178,30 @@ impl ClusterNodes { if let Some(index) = self.index.get(slot_leader) { weighted_shuffle.remove_index(*index); } - let mut addrs = HashMap::::with_capacity(self.nodes.len()); let mut rng = get_seeded_rng(slot_leader, shred); - let protocol = get_broadcast_protocol(shred); - let nodes: Vec<_> = weighted_shuffle - .shuffle(&mut rng) - .map(|index| &self.nodes[index]) - .inspect(|node| { - if let Some(node) = node.contact_info() { - if let Ok(addr) = node.tvu(protocol) { - addrs.entry(addr).or_insert(*node.pubkey()); - } - } + let nodes = { + let protocol = get_broadcast_protocol(shred); + // If there are 2 nodes in the shuffle with the same socket-addr, + // we only send shreds to the first one. The hash-set below allows + // to track if a socket-addr was observed earlier in the shuffle. + let mut addrs = HashSet::::with_capacity(self.nodes.len()); + weighted_shuffle.shuffle(&mut rng).map(move |index| { + let node = &self.nodes[index]; + let addr: Option = node + .contact_info() + .and_then(|node| node.tvu(protocol).ok()) + .filter(|&addr| addrs.insert(addr)); + (node, addr) }) - .collect(); - let self_index = nodes - .iter() - .position(|node| node.pubkey() == self.pubkey) - .unwrap(); - let root_distance = if self_index == 0 { - 0 - } else if self_index <= fanout { - 1 - } else if self_index <= fanout.saturating_add(1).saturating_mul(fanout) { - 2 - } else { - 3 // If changed, update MAX_NUM_TURBINE_HOPS. }; - let peers = get_retransmit_peers(fanout, self_index, &nodes); - Ok(RetransmitPeers { - root_distance, - children: peers.collect(), - addrs, - }) + let (index, peers) = + get_retransmit_peers(fanout, |(node, _)| node.pubkey() == &self.pubkey, nodes); + let peers = peers + .filter_map(|(_, addr)| addr) + .filter(|addr| socket_addr_space.check(addr)) + .collect(); + let root_distance = get_root_distance(index, fanout); + Ok((root_distance, peers)) } // Returns the parent node in the turbine broadcast tree. @@ -271,10 +234,10 @@ impl ClusterNodes { let nodes: Vec<_> = weighted_shuffle .shuffle(&mut rng) .map(|index| &self.nodes[index]) - .take_while(|node| node.pubkey() != self.pubkey) + .take_while(|node| node.pubkey() != &self.pubkey) .collect(); let parent = get_retransmit_parent(fanout, nodes.len(), &nodes); - Ok(parent.map(Node::pubkey)) + Ok(parent.map(Node::pubkey).copied()) } } @@ -288,7 +251,7 @@ pub fn new_cluster_nodes( let index: HashMap<_, _> = nodes .iter() .enumerate() - .map(|(ix, node)| (node.pubkey(), ix)) + .map(|(ix, node)| (*node.pubkey(), ix)) .collect(); let broadcast = TypeId::of::() == TypeId::of::(); let stakes: Vec = nodes.iter().map(|node| node.stake).collect(); @@ -344,7 +307,7 @@ fn get_nodes( stake, }), ) - .sorted_by_key(|node| Reverse((node.stake, node.pubkey()))) + .sorted_by_key(|node| Reverse((node.stake, *node.pubkey()))) // Since sorted_by_key is stable, in case of duplicates, this // will keep nodes with contact-info. .dedup_by(|a, b| a.pubkey() == b.pubkey()) @@ -367,7 +330,7 @@ fn get_nodes( // pubkey for deterministic shuffle, but strip the contact-info so // that no more packets are sent to this node. (node.stake > 0u64).then(|| Node { - node: NodeId::from(node.pubkey()), + node: NodeId::from(*node.pubkey()), stake: node.stake, }) } @@ -393,22 +356,29 @@ fn get_seeded_rng(leader: &Pubkey, shred: &ShredId) -> ChaChaRng { // Each other node retransmits shreds to fanout many nodes in the next layer. // For example the node k in the 1st layer will retransmit to nodes: // fanout + k, 2*fanout + k, ..., fanout*fanout + k -fn get_retransmit_peers( +fn get_retransmit_peers( fanout: usize, - index: usize, // Local node's index within the nodes slice. - nodes: &[T], -) -> impl Iterator + '_ { + // Predicate fn which identifies this node in the shuffle. + pred: impl Fn(T) -> bool, + nodes: impl IntoIterator, +) -> (/*this node's index:*/ usize, impl Iterator) { + let mut nodes = nodes.into_iter(); + // This node's index within shuffled nodes. + let index = nodes.by_ref().position(pred).unwrap(); // Node's index within its neighborhood. let offset = index.saturating_sub(1) % fanout; // First node in the neighborhood. let anchor = index - offset; let step = if index == 0 { 1 } else { fanout }; - (anchor * fanout + offset + 1..) + let peers = (anchor * fanout + offset + 1..) .step_by(step) .take(fanout) - .map(|i| nodes.get(i)) - .while_some() - .copied() + .scan(index, move |state, k| -> Option { + let peer = nodes.by_ref().nth(k - *state - 1)?; + *state = k; + Some(peer) + }); + (index, peers) } // Returns the parent node in the turbine broadcast tree. @@ -519,6 +489,19 @@ pub(crate) fn get_broadcast_protocol(_: &ShredId) -> Protocol { Protocol::UDP } +#[inline] +fn get_root_distance(index: usize, fanout: usize) -> usize { + if index == 0 { + 0 + } else if index <= fanout { + 1 + } else if index <= fanout.saturating_add(1).saturating_mul(fanout) { + 2 + } else { + 3 // If changed, update MAX_NUM_TURBINE_HOPS. + } +} + pub fn make_test_cluster( rng: &mut R, num_nodes: usize, @@ -710,7 +693,7 @@ mod tests { T: Copy + Eq + PartialEq + Debug + Hash, { // Map node identities to their index within the shuffled tree. - let index: HashMap<_, _> = nodes + let cache: HashMap<_, _> = nodes .iter() .copied() .enumerate() @@ -720,18 +703,22 @@ mod tests { // Root node's parent is None. assert_eq!(get_retransmit_parent(fanout, /*index:*/ 0, nodes), None); for (k, peers) in peers.into_iter().enumerate() { - assert_eq!( - get_retransmit_peers(fanout, k, nodes).collect::>(), - peers - ); + { + let (index, retransmit_peers) = + get_retransmit_peers(fanout, |node| node == &nodes[k], nodes); + assert_eq!(peers, retransmit_peers.copied().collect::>()); + assert_eq!(index, k); + } let parent = Some(nodes[k]); for peer in peers { - assert_eq!(get_retransmit_parent(fanout, index[&peer], nodes), parent); + assert_eq!(get_retransmit_parent(fanout, cache[&peer], nodes), parent); } } // Remaining nodes have no children. - for k in offset..=nodes.len() { - assert_eq!(get_retransmit_peers(fanout, k, nodes).next(), None); + for k in offset..nodes.len() { + let (index, mut peers) = get_retransmit_peers(fanout, |node| node == &nodes[k], nodes); + assert_eq!(peers.next(), None); + assert_eq!(index, k); } } @@ -860,7 +847,7 @@ mod tests { let mut nodes: Vec<_> = (0..size).collect(); nodes.shuffle(&mut rng); // Map node identities to their index within the shuffled tree. - let index: HashMap<_, _> = nodes + let cache: HashMap<_, _> = nodes .iter() .copied() .enumerate() @@ -870,13 +857,16 @@ mod tests { assert_eq!(get_retransmit_parent(fanout, /*index:*/ 0, &nodes), None); for k in 1..size { let parent = get_retransmit_parent(fanout, k, &nodes).unwrap(); - let mut peers = get_retransmit_peers(fanout, index[&parent], &nodes); - assert_eq!(peers.find(|&peer| peer == nodes[k]), Some(nodes[k])); + let (index, mut peers) = get_retransmit_peers(fanout, |node| node == &parent, &nodes); + assert_eq!(index, cache[&parent]); + assert_eq!(peers.find(|&&peer| peer == nodes[k]), Some(&nodes[k])); } for k in 0..size { let parent = Some(nodes[k]); - for peer in get_retransmit_peers(fanout, k, &nodes) { - assert_eq!(get_retransmit_parent(fanout, index[&peer], &nodes), parent); + let (index, peers) = get_retransmit_peers(fanout, |node| node == &nodes[k], &nodes); + assert_eq!(index, k); + for peer in peers { + assert_eq!(get_retransmit_parent(fanout, cache[peer], &nodes), parent); } } } diff --git a/turbine/src/quic_endpoint.rs b/turbine/src/quic_endpoint.rs index 175663bfa9fe2a..59205201bab9b9 100644 --- a/turbine/src/quic_endpoint.rs +++ b/turbine/src/quic_endpoint.rs @@ -16,7 +16,7 @@ use { solana_runtime::bank_forks::BankForks, solana_sdk::{pubkey::Pubkey, signature::Keypair}, solana_tls_utils::{ - new_dummy_x509_certificate, SkipClientVerification, SkipServerVerification, + new_dummy_x509_certificate, tls_client_config_builder, tls_server_config_builder, }, std::{ cmp::Reverse, @@ -157,9 +157,7 @@ fn new_server_config( cert: CertificateDer<'static>, key: PrivateKeyDer<'static>, ) -> Result { - let mut config = rustls::ServerConfig::builder() - .with_client_cert_verifier(SkipClientVerification::new()) - .with_single_cert(vec![cert], key)?; + let mut config = tls_server_config_builder().with_single_cert(vec![cert], key)?; config.alpn_protocols = vec![ALPN_TURBINE_PROTOCOL_ID.to_vec()]; config.key_log = Arc::new(KeyLogFile::new()); let quic_server_config = QuicServerConfig::try_from(config) @@ -176,10 +174,7 @@ fn new_client_config( cert: CertificateDer<'static>, key: PrivateKeyDer<'static>, ) -> Result { - let mut config = rustls::ClientConfig::builder() - .dangerous() - .with_custom_certificate_verifier(SkipServerVerification::new()) - .with_client_auth_cert(vec![cert], key)?; + let mut config = tls_client_config_builder().with_client_auth_cert(vec![cert], key)?; config.enable_early_data = true; config.alpn_protocols = vec![ALPN_TURBINE_PROTOCOL_ID.to_vec()]; let mut config = ClientConfig::new(Arc::new(QuicClientConfig::try_from(config).unwrap())); diff --git a/turbine/src/retransmit_stage.rs b/turbine/src/retransmit_stage.rs index e820851d03e4ba..db9d1f96662a16 100644 --- a/turbine/src/retransmit_stage.rs +++ b/turbine/src/retransmit_stage.rs @@ -328,12 +328,12 @@ fn retransmit_shred( ) -> Result<(/*root_distance:*/ usize, /*num_nodes:*/ usize), Error> { let mut compute_turbine_peers = Measure::start("turbine_start"); let data_plane_fanout = cluster_nodes::get_data_plane_fanout(key.slot(), root_bank); - let (root_distance, addrs) = - cluster_nodes.get_retransmit_addrs(slot_leader, key, data_plane_fanout)?; - let addrs: Vec<_> = addrs - .into_iter() - .filter(|addr| socket_addr_space.check(addr)) - .collect(); + let (root_distance, addrs) = cluster_nodes.get_retransmit_addrs( + slot_leader, + key, + data_plane_fanout, + socket_addr_space, + )?; compute_turbine_peers.stop(); stats .compute_turbine_peers_total diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index 4a27de9496bb8e..f22cb404d1fe38 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -21,7 +21,6 @@ use { execute_batch, TransactionBatchWithIndexes, TransactionStatusSender, }, solana_runtime::{ - bank::Bank, installed_scheduler_pool::{ initialized_result_with_timings, InstalledScheduler, InstalledSchedulerBox, InstalledSchedulerPool, InstalledSchedulerPoolArc, ResultWithTimings, ScheduleResult, @@ -309,7 +308,10 @@ where // This fn needs to return immediately due to being part of the blocking // `::wait_for_termination()` call. - fn return_scheduler(&self, scheduler: S::Inner, should_trash: bool) { + fn return_scheduler(&self, scheduler: S::Inner) { + // Refer to the comment in is_aborted() as to the exact definition of the concept of + // _trashed_ and the interaction among different parts of unified scheduler. + let should_trash = scheduler.is_trashed(); if should_trash { // Delay drop()-ing this trashed returned scheduler inner by stashing it in // self.trashed_scheduler_inners, which is periodically drained by the `solScCleaner` @@ -411,9 +413,8 @@ pub trait TaskHandler: Send + Sync + Debug + Sized + 'static { fn handle( result: &mut Result<()>, timings: &mut ExecuteTimings, - bank: &Arc, - transaction: &RuntimeTransaction, - index: usize, + scheduling_context: &SchedulingContext, + task: &Task, handler_context: &HandlerContext, ); } @@ -425,13 +426,16 @@ impl TaskHandler for DefaultTaskHandler { fn handle( result: &mut Result<()>, timings: &mut ExecuteTimings, - bank: &Arc, - transaction: &RuntimeTransaction, - index: usize, + scheduling_context: &SchedulingContext, + task: &Task, handler_context: &HandlerContext, ) { // scheduler must properly prevent conflicting tx executions. thus, task handler isn't // responsible for locking. + let bank = scheduling_context.bank(); + let transaction = task.transaction(); + let index = task.task_index(); + let batch = bank.prepare_unlocked_batch_from_single_tx(transaction); let batch_with_indexes = TransactionBatchWithIndexes { batch, @@ -550,7 +554,7 @@ mod chained_channel { pub(super) fn send_chained_channel( &mut self, - context: C, + context: &C, count: usize, ) -> std::result::Result<(), SendError>> { let (chained_sender, chained_receiver) = crossbeam_channel::unbounded(); @@ -712,14 +716,6 @@ where S: SpawnableScheduler, TH: TaskHandler, { - fn id(&self) -> SchedulerId { - self.thread_manager.scheduler_id - } - - fn is_trashed(&self) -> bool { - self.is_aborted() || self.is_overgrown() - } - fn is_aborted(&self) -> bool { // Schedulers can be regarded as being _trashed_ (thereby will be cleaned up later), if // threads are joined. Remember that unified scheduler _doesn't normally join threads_ even @@ -732,11 +728,12 @@ where // Note that this detection is done internally every time scheduler operations are run // (send_task() and end_session(); or schedule_execution() and wait_for_termination() in // terms of InstalledScheduler). So, it's ensured that the detection is done at least once - // for any scheudler which is taken out of the pool. + // for any scheduler which is taken out of the pool. // // Thus, any transaction errors are always handled without loss of information and // the aborted scheduler itself will always be handled as _trashed_ before returning the - // scheduler to the pool, considering is_trashed() is checked immediately before that. + // scheduler to the pool, considering is_aborted() is checked via is_trashed() immediately + // before that. self.thread_manager.are_threads_joined() } @@ -770,7 +767,6 @@ impl, TH: TaskHandler> ThreadManager { fn new(pool: Arc>) -> Self { let (new_task_sender, new_task_receiver) = crossbeam_channel::unbounded(); let (session_result_sender, session_result_receiver) = crossbeam_channel::unbounded(); - let handler_count = pool.handler_count; Self { scheduler_id: pool.new_scheduler_id(), @@ -781,12 +777,12 @@ impl, TH: TaskHandler> ThreadManager { session_result_receiver, session_result_with_timings: None, scheduler_thread: None, - handler_threads: Vec::with_capacity(handler_count), + handler_threads: vec![], } } fn execute_task_with_handler( - bank: &Arc, + scheduling_context: &SchedulingContext, executed_task: &mut Box, handler_context: &HandlerContext, ) { @@ -794,9 +790,8 @@ impl, TH: TaskHandler> ThreadManager { TH::handle( &mut executed_task.result_with_timings.0, &mut executed_task.result_with_timings.1, - bank, - executed_task.task.transaction(), - executed_task.task.task_index(), + scheduling_context, + &executed_task.task, handler_context, ); } @@ -1032,7 +1027,7 @@ impl, TH: TaskHandler> ThreadManager { recv(finished_blocked_task_receiver) -> executed_task => { let Some(executed_task) = Self::accumulate_result_with_timings( &mut result_with_timings, - executed_task.expect("alive handler") + executed_task.expect("alive handler"), ) else { break 'nonaborted_main_loop; }; @@ -1072,7 +1067,7 @@ impl, TH: TaskHandler> ThreadManager { recv(finished_idle_task_receiver) -> executed_task => { let Some(executed_task) = Self::accumulate_result_with_timings( &mut result_with_timings, - executed_task.expect("alive handler") + executed_task.expect("alive handler"), ) else { break 'nonaborted_main_loop; }; @@ -1092,26 +1087,28 @@ impl, TH: TaskHandler> ThreadManager { state_machine.reinitialize(); session_ending = false; - // Prepare for the new session. - match new_task_receiver.recv() { - Ok(NewTaskPayload::OpenSubchannel(context_and_result_with_timings)) => { - let (new_context, new_result_with_timings) = - *context_and_result_with_timings; - // We just received subsequent (= not initial) session and about to - // enter into the preceding `while(!is_finished) {...}` loop again. - // Before that, propagate new SchedulingContext to handler threads - runnable_task_sender - .send_chained_channel(new_context, handler_count) - .unwrap(); - result_with_timings = new_result_with_timings; - } - Err(_) => { - // This unusual condition must be triggered by ThreadManager::drop(). - // Initialize result_with_timings with a harmless value... - result_with_timings = initialized_result_with_timings(); - break 'nonaborted_main_loop; + { + // Prepare for the new session. + match new_task_receiver.recv() { + Ok(NewTaskPayload::OpenSubchannel(context_and_result_with_timings)) => { + let (new_context, new_result_with_timings) = + *context_and_result_with_timings; + // We just received subsequent (= not initial) session and about to + // enter into the preceding `while(!is_finished) {...}` loop again. + // Before that, propagate new SchedulingContext to handler threads + runnable_task_sender + .send_chained_channel(&new_context, handler_count) + .unwrap(); + result_with_timings = new_result_with_timings; + } + Err(_) => { + // This unusual condition must be triggered by ThreadManager::drop(). + // Initialize result_with_timings with a harmless value... + result_with_timings = initialized_result_with_timings(); + break 'nonaborted_main_loop; + } + Ok(_) => unreachable!(), } - Ok(_) => unreachable!(), } } @@ -1152,54 +1149,56 @@ impl, TH: TaskHandler> ThreadManager { // 2. Subsequent contexts are propagated explicitly inside `.after_select()` as part of // `select_biased!`, which are sent from `.send_chained_channel()` in the scheduler // thread for all-but-initial sessions. - move || loop { - let (task, sender) = select_biased! { - recv(runnable_task_receiver.for_select()) -> message => { - let Ok(message) = message else { - break; - }; - if let Some(task) = runnable_task_receiver.after_select(message) { - (task, &finished_blocked_task_sender) - } else { - continue; + move || { + loop { + let (task, sender) = select_biased! { + recv(runnable_task_receiver.for_select()) -> message => { + let Ok(message) = message else { + break; + }; + if let Some(task) = runnable_task_receiver.after_select(message) { + (task, &finished_blocked_task_sender) + } else { + continue; + } + }, + recv(runnable_task_receiver.aux_for_select()) -> task => { + if let Ok(task) = task { + (task, &finished_idle_task_sender) + } else { + runnable_task_receiver.never_receive_from_aux(); + continue; + } + }, + }; + defer! { + if !thread::panicking() { + return; } - }, - recv(runnable_task_receiver.aux_for_select()) -> task => { - if let Ok(task) = task { - (task, &finished_idle_task_sender) + + // The scheduler thread can't detect panics in handler threads with + // disconnected channel errors, unless all of them has died. So, send an + // explicit Err promptly. + let current_thread = thread::current(); + error!("handler thread is panicking: {:?}", current_thread); + if sender.send(Err(HandlerPanicked)).is_ok() { + info!("notified a panic from {:?}", current_thread); } else { - runnable_task_receiver.never_receive_from_aux(); - continue; + // It seems that the scheduler thread has been aborted already... + warn!("failed to notify a panic from {:?}", current_thread); } - }, - }; - defer! { - if !thread::panicking() { - return; } - - // The scheduler thread can't detect panics in handler threads with - // disconnected channel errors, unless all of them has died. So, send an - // explicit Err promptly. - let current_thread = thread::current(); - error!("handler thread is panicking: {:?}", current_thread); - if sender.send(Err(HandlerPanicked)).is_ok() { - info!("notified a panic from {:?}", current_thread); - } else { - // It seems that the scheduler thread has been aborted already... - warn!("failed to notify a panic from {:?}", current_thread); + let mut task = ExecutedTask::new_boxed(task); + Self::execute_task_with_handler( + runnable_task_receiver.context(), + &mut task, + &pool.handler_context, + ); + if sender.send(Ok(task)).is_err() { + warn!("handler_thread: scheduler thread aborted..."); + break; } } - let mut task = ExecutedTask::new_boxed(task); - Self::execute_task_with_handler( - runnable_task_receiver.context().bank(), - &mut task, - &pool.handler_context, - ); - if sender.send(Ok(task)).is_err() { - warn!("handler_thread: scheduler thread aborted..."); - break; - } } }; @@ -1341,8 +1340,13 @@ impl, TH: TaskHandler> ThreadManager { } } +pub trait SchedulerInner { + fn id(&self) -> SchedulerId; + fn is_trashed(&self) -> bool; +} + pub trait SpawnableScheduler: InstalledScheduler { - type Inner: Debug + Send + Sync; + type Inner: SchedulerInner + Debug + Send + Sync; fn into_inner(self) -> (ResultWithTimings, Self::Inner); @@ -1439,22 +1443,27 @@ impl InstalledScheduler for PooledScheduler { } } +impl SchedulerInner for PooledSchedulerInner +where + S: SpawnableScheduler, + TH: TaskHandler, +{ + fn id(&self) -> SchedulerId { + self.thread_manager.scheduler_id + } + + fn is_trashed(&self) -> bool { + self.is_aborted() || self.is_overgrown() + } +} + impl UninstalledScheduler for PooledSchedulerInner where - S: SpawnableScheduler>, + S: SpawnableScheduler, TH: TaskHandler, { fn return_to_pool(self: Box) { - // Refer to the comment in is_trashed() as to the exact definition of the concept of - // _trashed_ and the interaction among different parts of unified scheduler. - let should_trash = self.is_trashed(); - if should_trash { - info!("trashing scheduler (id: {})...", self.id()); - } - self.thread_manager - .pool - .clone() - .return_scheduler(*self, should_trash); + self.thread_manager.pool.clone().return_scheduler(*self); } } @@ -1752,9 +1761,8 @@ mod tests { fn handle( _result: &mut Result<()>, timings: &mut ExecuteTimings, - _bank: &Arc, - _transaction: &RuntimeTransaction, - _index: usize, + _bank: &SchedulingContext, + _task: &Task, _handler_context: &HandlerContext, ) { timings.metrics[ExecuteTimingType::CheckUs] += 123; @@ -1935,9 +1943,8 @@ mod tests { fn handle( result: &mut Result<()>, _timings: &mut ExecuteTimings, - _bank: &Arc, - _transaction: &RuntimeTransaction, - _index: usize, + _bank: &SchedulingContext, + _task: &Task, _handler_context: &HandlerContext, ) { *result = Err(TransactionError::AccountNotFound); @@ -2046,9 +2053,8 @@ mod tests { fn handle( _result: &mut Result<()>, _timings: &mut ExecuteTimings, - _bank: &Arc, - _transaction: &RuntimeTransaction, - _index: usize, + _bank: &SchedulingContext, + _task: &Task, _handler_context: &HandlerContext, ) { *TASK_COUNT.lock().unwrap() += 1; @@ -2117,10 +2123,10 @@ mod tests { let (result_with_timings, scheduler1) = scheduler1.into_inner(); assert_matches!(result_with_timings, (Ok(()), _)); - pool.return_scheduler(scheduler1, false); + pool.return_scheduler(scheduler1); let (result_with_timings, scheduler2) = scheduler2.into_inner(); assert_matches!(result_with_timings, (Ok(()), _)); - pool.return_scheduler(scheduler2, false); + pool.return_scheduler(scheduler2); let scheduler3 = pool.do_take_scheduler(context.clone()); assert_eq!(scheduler_id2, scheduler3.id()); @@ -2163,7 +2169,7 @@ mod tests { let scheduler = pool.do_take_scheduler(old_context.clone()); let scheduler_id = scheduler.id(); - pool.return_scheduler(scheduler.into_inner().1, false); + pool.return_scheduler(scheduler.into_inner().1); let scheduler = pool.take_scheduler(new_context.clone()); assert_eq!(scheduler_id, scheduler.id()); @@ -2383,11 +2389,11 @@ mod tests { fn handle( _result: &mut Result<()>, _timings: &mut ExecuteTimings, - _bank: &Arc, - _transaction: &RuntimeTransaction, - index: usize, + _bank: &SchedulingContext, + task: &Task, _handler_context: &HandlerContext, ) { + let index = task.task_index(); if index == 0 { sleepless_testing::at(PanickingHanlderCheckPoint::BeforeNotifiedPanic); } else if index == 1 { @@ -2463,11 +2469,11 @@ mod tests { fn handle( result: &mut Result<()>, _timings: &mut ExecuteTimings, - _bank: &Arc, - _transaction: &RuntimeTransaction, - index: usize, + _bank: &SchedulingContext, + task: &Task, _handler_context: &HandlerContext, ) { + let index = task.task_index(); *TASK_COUNT.lock().unwrap() += 1; if index == 1 { *result = Err(TransactionError::AccountNotFound); @@ -2532,24 +2538,17 @@ mod tests { fn handle( result: &mut Result<()>, timings: &mut ExecuteTimings, - bank: &Arc, - transaction: &RuntimeTransaction, - index: usize, + bank: &SchedulingContext, + task: &Task, handler_context: &HandlerContext, ) { + let index = task.task_index(); match index { STALLED_TRANSACTION_INDEX => *LOCK_TO_STALL.lock().unwrap(), BLOCKED_TRANSACTION_INDEX => {} _ => unreachable!(), }; - DefaultTaskHandler::handle( - result, - timings, - bank, - transaction, - index, - handler_context, - ); + DefaultTaskHandler::handle(result, timings, bank, task, handler_context); } } @@ -2617,13 +2616,12 @@ mod tests { fn handle( _result: &mut Result<()>, _timings: &mut ExecuteTimings, - bank: &Arc, - _transaction: &RuntimeTransaction, - index: usize, + context: &SchedulingContext, + task: &Task, _handler_context: &HandlerContext, ) { // The task index must always be matched to the slot. - assert_eq!(index as Slot, bank.slot()); + assert_eq!(task.task_index() as Slot, context.bank().slot()); } } @@ -2716,7 +2714,6 @@ mod tests { transaction: RuntimeTransaction, index: usize, ) -> ScheduleResult { - let transaction_and_index = (transaction, index); let context = self.context().clone(); let pool = self.3.clone(); @@ -2728,12 +2725,15 @@ mod tests { let mut result = Ok(()); let mut timings = ExecuteTimings::default(); + let task = SchedulingStateMachine::create_task(transaction, index, &mut |_| { + UsageQueue::default() + }); + ::handle( &mut result, &mut timings, - context.bank(), - &transaction_and_index.0, - transaction_and_index.1, + &context, + &task, &pool.handler_context, ); (result, timings) @@ -2768,11 +2768,21 @@ mod tests { } } + impl SchedulerInner for AsyncScheduler { + fn id(&self) -> SchedulerId { + 42 + } + + fn is_trashed(&self) -> bool { + false + } + } + impl UninstalledScheduler for AsyncScheduler { fn return_to_pool(self: Box) { - self.3.clone().return_scheduler(*self, false) + self.3.clone().return_scheduler(*self) } } @@ -2923,6 +2933,7 @@ mod tests { let result = &mut Ok(()); let timings = &mut ExecuteTimings::default(); let prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let scheduling_context = &SchedulingContext::new(bank.clone()); let handler_context = &HandlerContext { log_messages_bytes_limit: None, transaction_status_sender: None, @@ -2930,7 +2941,8 @@ mod tests { prioritization_fee_cache, }; - DefaultTaskHandler::handle(result, timings, bank, &tx, 0, handler_context); + let task = SchedulingStateMachine::create_task(tx, 0, &mut |_| UsageQueue::default()); + DefaultTaskHandler::handle(result, timings, scheduling_context, &task, handler_context); assert_matches!(result, Err(TransactionError::AccountLoadedTwice)); } } diff --git a/validator/src/cli.rs b/validator/src/cli.rs index aed7a3bffc1d9f..1cdaf02b49dc24 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -959,6 +959,27 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .default_value(&default_args.rpc_threads) .help("Number of threads to use for servicing RPC requests"), ) + .arg( + Arg::with_name("rpc_blocking_threads") + .long("rpc-blocking-threads") + .value_name("NUMBER") + .validator(is_parsable::) + .validator(|value| { + value + .parse::() + .map_err(|err| format!("error parsing '{value}': {err}")) + .and_then(|threads| { + if threads > 0 { + Ok(()) + } else { + Err("value must be >= 1".to_string()) + } + }) + }) + .takes_value(true) + .default_value(&default_args.rpc_blocking_threads) + .help("Number of blocking threads to use for servicing CPU bound RPC requests (eg getMultipleAccounts)"), + ) .arg( Arg::with_name("rpc_niceness_adj") .long("rpc-niceness-adjustment") @@ -2270,6 +2291,7 @@ pub struct DefaultArgs { pub rpc_send_transaction_batch_size: String, pub rpc_send_transaction_retry_pool_max_size: String, pub rpc_threads: String, + pub rpc_blocking_threads: String, pub rpc_niceness_adjustment: String, pub rpc_bigtable_timeout: String, pub rpc_bigtable_instance_name: String, @@ -2362,6 +2384,7 @@ impl DefaultArgs { .retry_pool_max_size .to_string(), rpc_threads: num_cpus::get().to_string(), + rpc_blocking_threads: 1.max(num_cpus::get() / 4).to_string(), rpc_niceness_adjustment: "0".to_string(), rpc_bigtable_timeout: "30".to_string(), rpc_bigtable_instance_name: solana_storage_bigtable::DEFAULT_INSTANCE_NAME.to_string(), diff --git a/validator/src/main.rs b/validator/src/main.rs index a7de615b3be9ac..cecf9f873d3019 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1492,6 +1492,7 @@ pub fn main() { ), disable_health_check: false, rpc_threads: value_t_or_exit!(matches, "rpc_threads", usize), + rpc_blocking_threads: value_t_or_exit!(matches, "rpc_blocking_threads", usize), rpc_niceness_adj: value_t_or_exit!(matches, "rpc_niceness_adj", i8), account_indexes: account_indexes.clone(), rpc_scan_and_fix_roots: matches.is_present("rpc_scan_and_fix_roots"), diff --git a/vortexor/Cargo.toml b/vortexor/Cargo.toml new file mode 100644 index 00000000000000..f87a457269ce25 --- /dev/null +++ b/vortexor/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "solana-vortexor" +description = "Solana TPU Vortexor" +documentation = "https://docs.rs/solana-vortexor" +default-run = "solana-vortexor" +publish = false +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +async-channel = { workspace = true } +bytes = { workspace = true } +clap = { workspace = true } +crossbeam-channel = { workspace = true } +dashmap = { workspace = true } +futures = { workspace = true } +futures-util = { workspace = true } +governor = { workspace = true } +histogram = { workspace = true } +indexmap = { workspace = true } +itertools = { workspace = true } +libc = { workspace = true } +log = { workspace = true } +nix = { workspace = true, features = ["net"] } +pem = { workspace = true } +percentage = { workspace = true } +quinn = { workspace = true } +quinn-proto = { workspace = true } +rand = { workspace = true } +rustls = { workspace = true } +smallvec = { workspace = true } +socket2 = { workspace = true } +solana-clap-utils = { workspace = true } +solana-measure = { workspace = true } +solana-metrics = { workspace = true } +solana-net-utils = { workspace = true } +solana-perf = { workspace = true } +solana-sdk = { workspace = true } +solana-streamer = { workspace = true } +solana-transaction-metrics-tracker = { workspace = true } +solana-version = { workspace = true } + +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } +x509-parser = { workspace = true } + +[dev-dependencies] +assert_matches = { workspace = true } +solana-logger = { workspace = true } +solana-streamer = { workspace = true, features = ["dev-context-only-utils"] } + +[lib] +crate-type = ["lib"] +name = "solana_vortexor" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/vortexor/Readme.md b/vortexor/Readme.md new file mode 100644 index 00000000000000..f12462d9019e0d --- /dev/null +++ b/vortexor/Readme.md @@ -0,0 +1,144 @@ +# Introduction +The Vortexor is a service that can offload the tasks of receiving transactions +from the public, performing signature verifications, and deduplications from the +core validator, enabling it to focus on processing and executing the +transactions. The verified and filtered transactions will then be forwarded to +the validators linked with the Vortexor. This setup makes the TPU transaction +ingestion and verification more scalable compared to a single-node solution. + +# Architecture +Figure 1 describes the architecture diagram of the Vortexor and its +relationship with the validator. + + +---------------------+ + | Solana | + | RPC / Web Socket | + | Service | + +---------------------+ + | + v + +--------------------- VORTEXOR ------------------------+ + | | | + | +------------------+ | + | | StakedKeyUpdater | | + | +------------------+ | + | | | + | v | + | +-------------+ +--------------------+ | + TPU --> | | TPU Streamer| -----> | SigVerifier/Dedup | | + /QUIC | +-------------+ +--------------------+ | + | | | | + | v v | + | +----------------+ +------------------------+ | + | | Subscription |<----| VerifiedPacketForwarder| | + | | Management | +------------------------+ | + | +----------------+ | | + +--------------------------------|----------------------+ + ^ | (UDP/QUIC) + Heartbeat/subscriptions | | + | v + +-------------------- AGAVE VALIDATOR ------------------+ + | | + | +----------------+ +-----------------------+ | + Config-> | | Subscription | | VerifiedPacketReceiver| | + Admin RPC | | Management | | | | + | +----------------+ +-----------------------+ | + | | | | + | | v | + | v +-----------+ | + | +--------------------+ | Banking | | + Gossip <--------|--| Gossip/Contact Info| | Stage | | + | +--------------------+ +-----------+ | + +-------------------------------------------------------+ + + Figure 1. + +The Vortexor is a new executable that can be deployed on nodes separate from +the core Agave validator. It can also be deployed on the same node as the core +validator if the node has sufficient performance bandwidth. + +It has the following major components: + +1. **The TPU Streamer** – This is built from the existing QUIC-based TPU streamer. +2. **The SigVerify/Dedup** – This is refactored from the existing SigVerify component. +3. **Subscription Management** – Responsible for managing subscriptions + from the validator. Actions include subscribing to transactions and canceling subscriptions. +4. **VerifiedPacketForwarder** – Responsible for forwarding verified + transaction packets to subscribed validators. It uses UDP/QUIC to send transactions. + Validators can bind to private addresses for receiving the verified packets. + Firewalls can also restrict transactions to the chosen Vortexor. +5. **The Vortexor StakedKeyUpdater** – Retrieves the stake map from the network and makes + it available to the TPU streamer for stake-weighted QoS. + +Validators include a new component that receives verified packets sent from +the Vortexor and directly sends them to the banking stage. The validator's +Admin RPC is enhanced to configure peering with the Vortexor. The ContactInfo of +the validator updates with the Vortexor's address when linked. + +# Relationship of Validator and Vortexor +The validator broadcasts one TPU address served by a Vortexor. A validator can +switch its paired Vortexor to another. A Vortexor, depending on its performance, +can serve one or more validators. The architecture also supports multiple +Vortexors sharing the TPU address behind a load balancer for scalability: + + Load Balancer + | + v + __________________________ + | | | + | | | + Vortexor Vortexor Vortexor + | | | + | | | + __________________________ + | + v + Validator + + Figure 2. + +When the validator is in 'Paired' mode, receiving active transactions or +heartbeat messages from the Vortexor, it receives TPU transactions solely from +the Vortexor. It publishes the TPU address via gossip. The regular TPU and TPU +forward services are disabled for security and performance reasons. + +The design assumes a trust relationship between the Vortexor and the validator, +achieved through a private network, firewall rules, or TLS verification. QUIC, +used for the VerifiedPacketReceiver, supports QoS to prioritize Vortexor traffic. + +Heartbeat messages from the Vortexor inform the validator of its status. If no +transactions or heartbeats are received within a configurable timeout, the +validator may switch to another Vortexor or revert to its built-in TPU streamer. + +# Deployment Considerations +Using a Vortexor enhances validator scalability but introduces complexities: + +1. **Deployment Complexity**: For validators not using a Vortexor, there is no + impact. For those using a Vortexor, additional setup is required. To minimize + complexity, the Vortexor and validator require minimal configuration changes + and provide clear documentation for pairing. Automatic fallback ensures + continued operation if the connection between the Vortexor and validator + breaks. + +2. **Latency**: An additional hop exists between the original client and the + leader validator. Latency is minimized by deploying the Vortexor on a node + with low-latency connections to the validator. UDP forwarding is supported + for speed. + +3. **Security**: The implicit trust between the validator and Vortexor is + safeguarded by private networks, firewalls, and QUIC with public key-based + rules. Validators can optionally enforce re-verification of transactions. + +4. **Compatibility**: The solution is compatible with existing setups, such as + jito-relayers. The Vortexor CLI mimics jito-relayer's CLI to reduce friction + for migrating users. + +5. **Networking**: The Vortexor can be exposed directly to the internet or + placed behind a load balancer. Communication with the validator is + encouraged via private networks for security and performance. + +# Upgrade Considerations +Operators can decide whether to adopt Vortexors without concerns about network +protocol changes. Upgrading involves specifying the Vortexor's TPU address and +verified packet receiver network address via CLI or Admin RPC. The transition is +designed to be seamless for operators. diff --git a/vortexor/src/cli.rs b/vortexor/src/cli.rs new file mode 100644 index 00000000000000..f813fb8e01bb1d --- /dev/null +++ b/vortexor/src/cli.rs @@ -0,0 +1,177 @@ +use { + clap::{crate_description, crate_name, App, AppSettings, Arg}, + solana_clap_utils::input_validators::{is_keypair_or_ask_keyword, is_parsable}, + solana_net_utils::{MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, VALIDATOR_PORT_RANGE}, + solana_sdk::quic::QUIC_PORT_OFFSET, + solana_streamer::{ + nonblocking::quic::{ + DEFAULT_MAX_CONNECTIONS_PER_IPADDR_PER_MINUTE, DEFAULT_MAX_STREAMS_PER_MS, + }, + quic::{MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS}, + }, +}; + +pub const DEFAULT_MAX_QUIC_CONNECTIONS_PER_PEER: usize = 8; +pub const DEFAULT_NUM_QUIC_ENDPOINTS: usize = 8; + +pub struct DefaultArgs { + pub bind_address: String, + pub dynamic_port_range: String, + pub max_connections_per_peer: String, + pub max_tpu_staked_connections: String, + pub max_tpu_unstaked_connections: String, + pub max_fwd_staked_connections: String, + pub max_fwd_unstaked_connections: String, + pub max_streams_per_ms: String, + pub max_connections_per_ipaddr_per_min: String, + pub num_quic_endpoints: String, +} + +impl Default for DefaultArgs { + fn default() -> Self { + Self { + bind_address: "0.0.0.0".to_string(), + dynamic_port_range: format!("{}-{}", VALIDATOR_PORT_RANGE.0, VALIDATOR_PORT_RANGE.1), + max_connections_per_peer: DEFAULT_MAX_QUIC_CONNECTIONS_PER_PEER.to_string(), + max_tpu_staked_connections: MAX_STAKED_CONNECTIONS.to_string(), + max_tpu_unstaked_connections: MAX_UNSTAKED_CONNECTIONS.to_string(), + max_fwd_staked_connections: MAX_STAKED_CONNECTIONS + .saturating_add(MAX_UNSTAKED_CONNECTIONS) + .to_string(), + max_fwd_unstaked_connections: 0.to_string(), + max_streams_per_ms: DEFAULT_MAX_STREAMS_PER_MS.to_string(), + max_connections_per_ipaddr_per_min: DEFAULT_MAX_CONNECTIONS_PER_IPADDR_PER_MINUTE + .to_string(), + num_quic_endpoints: DEFAULT_NUM_QUIC_ENDPOINTS.to_string(), + } + } +} + +fn port_range_validator(port_range: String) -> Result<(), String> { + if let Some((start, end)) = solana_net_utils::parse_port_range(&port_range) { + if end.saturating_sub(start) < MINIMUM_VALIDATOR_PORT_RANGE_WIDTH { + Err(format!( + "Port range is too small. Try --dynamic-port-range {}-{}", + start, + start.saturating_add(MINIMUM_VALIDATOR_PORT_RANGE_WIDTH) + )) + } else if end.checked_add(QUIC_PORT_OFFSET).is_none() { + Err("Invalid dynamic_port_range.".to_string()) + } else { + Ok(()) + } + } else { + Err("Invalid port range".to_string()) + } +} + +pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { + return App::new(crate_name!()) + .about(crate_description!()) + .version(version) + .global_setting(AppSettings::ColoredHelp) + .global_setting(AppSettings::InferSubcommands) + .global_setting(AppSettings::UnifiedHelpMessage) + .global_setting(AppSettings::VersionlessSubcommands) + .arg( + Arg::with_name("identity") + .short("i") + .long("identity") + .value_name("KEYPAIR") + .takes_value(true) + .validator(is_keypair_or_ask_keyword) + .help("Vortexor identity keypair"), + ) + .arg( + Arg::with_name("bind_address") + .long("bind-address") + .value_name("HOST") + .takes_value(true) + .validator(solana_net_utils::is_host) + .default_value(&default_args.bind_address) + .help("IP address to bind the validator ports"), + ) + .arg( + Arg::with_name("dynamic_port_range") + .long("dynamic-port-range") + .value_name("MIN_PORT-MAX_PORT") + .takes_value(true) + .default_value(&default_args.dynamic_port_range) + .validator(port_range_validator) + .help("Range to use for dynamically assigned ports"), + ) + .arg( + Arg::with_name("max_connections_per_peer") + .long("max-connections-per-peer") + .takes_value(true) + .default_value(&default_args.max_connections_per_peer) + .validator(is_parsable::) + .help("Controls the max concurrent connections per IpAddr."), + ) + .arg( + Arg::with_name("max_tpu_staked_connections") + .long("max-tpu-staked-connections") + .takes_value(true) + .default_value(&default_args.max_tpu_staked_connections) + .validator(is_parsable::) + .help("Controls the max concurrent connections for TPU from staked nodes."), + ) + .arg( + Arg::with_name("max_tpu_unstaked_connections") + .long("max-tpu-unstaked-connections") + .takes_value(true) + .default_value(&default_args.max_tpu_unstaked_connections) + .validator(is_parsable::) + .help("Controls the max concurrent connections fort TPU from unstaked nodes."), + ) + .arg( + Arg::with_name("max_fwd_staked_connections") + .long("max-fwd-staked-connections") + .takes_value(true) + .default_value(&default_args.max_fwd_staked_connections) + .validator(is_parsable::) + .help("Controls the max concurrent connections for TPU-forward from staked nodes."), + ) + .arg( + Arg::with_name("max_fwd_unstaked_connections") + .long("max-fwd-unstaked-connections") + .takes_value(true) + .default_value(&default_args.max_fwd_unstaked_connections) + .validator(is_parsable::) + .help("Controls the max concurrent connections for TPU-forward from unstaked nodes."), + ) + .arg( + Arg::with_name("max_connections_per_ipaddr_per_minute") + .long("max-connections-per-ipaddr-per-minute") + .takes_value(true) + .default_value(&default_args.max_connections_per_ipaddr_per_min) + .validator(is_parsable::) + .help("Controls the rate of the clients connections per IpAddr per minute."), + ) + .arg( + Arg::with_name("num_quic_endpoints") + .long("num-quic-endpoints") + .takes_value(true) + .default_value(&default_args.num_quic_endpoints) + .validator(is_parsable::) + .help("The number of QUIC endpoints used for TPU and TPU-Forward. It can be increased to \ + increase network ingest throughput, at the expense of higher CPU and general \ + validator load."), + ) + .arg( + Arg::with_name("max_streams_per_ms") + .long("max-streams-per-ms") + .takes_value(true) + .default_value(&default_args.max_streams_per_ms) + .validator(is_parsable::) + .help("Max streams per second for a streamer."), + ) + .arg( + Arg::with_name("tpu_coalesce_ms") + .long("tpu-coalesce-ms") + .value_name("MILLISECS") + .takes_value(true) + .validator(is_parsable::) + .help("Milliseconds to wait in the TPU receiver for packet coalescing."), + ); +} diff --git a/vortexor/src/lib.rs b/vortexor/src/lib.rs new file mode 100644 index 00000000000000..e92563196fb2d4 --- /dev/null +++ b/vortexor/src/lib.rs @@ -0,0 +1,2 @@ +pub mod cli; +pub mod vortexor; diff --git a/vortexor/src/main.rs b/vortexor/src/main.rs new file mode 100644 index 00000000000000..44f79b262f0113 --- /dev/null +++ b/vortexor/src/main.rs @@ -0,0 +1,82 @@ +use { + clap::{value_t, value_t_or_exit}, + crossbeam_channel::unbounded, + solana_clap_utils::input_parsers::keypair_of, + solana_sdk::net::DEFAULT_TPU_COALESCE, + solana_streamer::streamer::StakedNodes, + solana_vortexor::{ + cli::{app, DefaultArgs}, + vortexor::Vortexor, + }, + std::{ + sync::{atomic::AtomicBool, Arc, RwLock}, + time::Duration, + }, +}; + +pub fn main() { + let default_args = DefaultArgs::default(); + let solana_version = solana_version::version!(); + let cli_app = app(solana_version, &default_args); + let matches = cli_app.get_matches(); + + let identity_keypair = keypair_of(&matches, "identity").unwrap_or_else(|| { + clap::Error::with_description( + "The --identity argument is required", + clap::ErrorKind::ArgumentNotFound, + ) + .exit(); + }); + + let bind_address = solana_net_utils::parse_host(matches.value_of("bind_address").unwrap()) + .expect("invalid bind_address"); + let max_connections_per_peer = value_t_or_exit!(matches, "max_connections_per_peer", u64); + let max_tpu_staked_connections = value_t_or_exit!(matches, "max_tpu_staked_connections", u64); + let max_fwd_staked_connections = value_t_or_exit!(matches, "max_fwd_staked_connections", u64); + let max_fwd_unstaked_connections = + value_t_or_exit!(matches, "max_fwd_unstaked_connections", u64); + + let max_tpu_unstaked_connections = + value_t_or_exit!(matches, "max_tpu_unstaked_connections", u64); + + let max_connections_per_ipaddr_per_min = + value_t_or_exit!(matches, "max_connections_per_ipaddr_per_minute", u64); + let num_quic_endpoints = value_t_or_exit!(matches, "num_quic_endpoints", u64); + let tpu_coalesce = value_t!(matches, "tpu_coalesce_ms", u64) + .map(Duration::from_millis) + .unwrap_or(DEFAULT_TPU_COALESCE); + + let dynamic_port_range = + solana_net_utils::parse_port_range(matches.value_of("dynamic_port_range").unwrap()) + .expect("invalid dynamic_port_range"); + + let max_streams_per_ms = value_t_or_exit!(matches, "max_streams_per_ms", u64); + let exit = Arc::new(AtomicBool::new(false)); + // To be linked with the Tpu sigverify and forwarder service + let (tpu_sender, _tpu_receiver) = unbounded(); + let (tpu_fwd_sender, _tpu_fwd_receiver) = unbounded(); + + let tpu_sockets = + Vortexor::create_tpu_sockets(bind_address, dynamic_port_range, num_quic_endpoints); + + // To be linked with StakedNodes service. + let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); + + let vortexor = Vortexor::create_vortexor( + tpu_sockets, + staked_nodes, + tpu_sender, + tpu_fwd_sender, + max_connections_per_peer, + max_tpu_staked_connections, + max_tpu_unstaked_connections, + max_fwd_staked_connections, + max_fwd_unstaked_connections, + max_streams_per_ms, + max_connections_per_ipaddr_per_min, + tpu_coalesce, + &identity_keypair, + exit, + ); + vortexor.join().unwrap(); +} diff --git a/vortexor/src/vortexor.rs b/vortexor/src/vortexor.rs new file mode 100644 index 00000000000000..8a86d15ecec055 --- /dev/null +++ b/vortexor/src/vortexor.rs @@ -0,0 +1,171 @@ +use { + crossbeam_channel::Sender, + solana_net_utils::{bind_in_range_with_config, bind_more_with_config, SocketConfig}, + solana_perf::packet::PacketBatch, + solana_sdk::{quic::NotifyKeyUpdate, signature::Keypair}, + solana_streamer::{ + nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, + quic::{spawn_server_multi, EndpointKeyUpdater, QuicServerParams}, + streamer::StakedNodes, + }, + std::{ + net::UdpSocket, + sync::{atomic::AtomicBool, Arc, Mutex, RwLock}, + thread::{self, JoinHandle}, + time::Duration, + }, +}; + +pub struct TpuSockets { + pub tpu_quic: Vec, + pub tpu_quic_fwd: Vec, +} + +pub struct Vortexor { + thread_handles: Vec>, + key_update_notifier: Arc, +} + +struct KeyUpdateNotifier { + key_updaters: Mutex>>, +} + +impl KeyUpdateNotifier { + fn new(key_updaters: Vec>) -> Self { + Self { + key_updaters: Mutex::new(key_updaters), + } + } +} + +impl NotifyKeyUpdate for KeyUpdateNotifier { + fn update_key(&self, key: &Keypair) -> Result<(), Box> { + let updaters = self.key_updaters.lock().unwrap(); + for updater in updaters.iter() { + updater.update_key(key)? + } + Ok(()) + } +} + +impl Vortexor { + pub fn create_tpu_sockets( + bind_address: std::net::IpAddr, + dynamic_port_range: (u16, u16), + num_quic_endpoints: u64, + ) -> TpuSockets { + let quic_config = SocketConfig { reuseport: true }; + + let (_, tpu_quic) = + bind_in_range_with_config(bind_address, dynamic_port_range, quic_config.clone()) + .expect("expected bind to succeed"); + + let tpu_quic_port = tpu_quic.local_addr().unwrap().port(); + let tpu_quic = bind_more_with_config( + tpu_quic, + num_quic_endpoints.try_into().unwrap(), + quic_config.clone(), + ) + .unwrap(); + + let (_, tpu_quic_fwd) = bind_in_range_with_config( + bind_address, + (tpu_quic_port.saturating_add(1), dynamic_port_range.1), + quic_config.clone(), + ) + .expect("expected bind to succeed"); + + let tpu_quic_fwd = bind_more_with_config( + tpu_quic_fwd, + num_quic_endpoints.try_into().unwrap(), + quic_config, + ) + .unwrap(); + + TpuSockets { + tpu_quic, + tpu_quic_fwd, + } + } + + #[allow(clippy::too_many_arguments)] + pub fn create_vortexor( + tpu_sockets: TpuSockets, + staked_nodes: Arc>, + tpu_sender: Sender, + tpu_fwd_sender: Sender, + max_connections_per_peer: u64, + max_tpu_staked_connections: u64, + max_tpu_unstaked_connections: u64, + max_fwd_staked_connections: u64, + max_fwd_unstaked_connections: u64, + max_streams_per_ms: u64, + max_connections_per_ipaddr_per_min: u64, + tpu_coalesce: Duration, + identity_keypair: &Keypair, + exit: Arc, + ) -> Self { + let mut quic_server_params = QuicServerParams { + max_connections_per_peer: max_connections_per_peer.try_into().unwrap(), + max_staked_connections: max_tpu_staked_connections.try_into().unwrap(), + max_unstaked_connections: max_tpu_unstaked_connections.try_into().unwrap(), + max_streams_per_ms, + max_connections_per_ipaddr_per_min, + wait_for_chunk_timeout: DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, + coalesce: tpu_coalesce, + }; + + let TpuSockets { + tpu_quic, + tpu_quic_fwd, + } = tpu_sockets; + + let tpu_result = spawn_server_multi( + "solVtxTpu", + "quic_vortexor_tpu", + tpu_quic, + identity_keypair, + tpu_sender.clone(), + exit.clone(), + staked_nodes.clone(), + quic_server_params.clone(), + ) + .unwrap(); + + // Fot TPU forward -- we disallow unstaked connections. Allocate all connection resources + // for staked connections: + quic_server_params.max_staked_connections = max_fwd_staked_connections.try_into().unwrap(); + quic_server_params.max_unstaked_connections = + max_fwd_unstaked_connections.try_into().unwrap(); + let tpu_fwd_result = spawn_server_multi( + "solVtxTpuFwd", + "quic_vortexor_tpu_forwards", + tpu_quic_fwd, + identity_keypair, + tpu_fwd_sender, + exit.clone(), + staked_nodes.clone(), + quic_server_params, + ) + .unwrap(); + + Self { + thread_handles: vec![tpu_result.thread, tpu_fwd_result.thread], + key_update_notifier: Arc::new(KeyUpdateNotifier::new(vec![ + tpu_result.key_updater, + tpu_fwd_result.key_updater, + ])), + } + } + + pub fn get_key_update_notifier(&self) -> Arc { + self.key_update_notifier.clone() + } + + pub fn join(self) -> thread::Result<()> { + for t in self.thread_handles { + t.join()? + } + Ok(()) + } +} diff --git a/vortexor/tests/vortexor.rs b/vortexor/tests/vortexor.rs new file mode 100644 index 00000000000000..8d2f683c9e2006 --- /dev/null +++ b/vortexor/tests/vortexor.rs @@ -0,0 +1,76 @@ +use { + crossbeam_channel::unbounded, + solana_net_utils::VALIDATOR_PORT_RANGE, + solana_sdk::{net::DEFAULT_TPU_COALESCE, pubkey::Pubkey, signature::Keypair, signer::Signer}, + solana_streamer::{ + nonblocking::{ + quic::{DEFAULT_MAX_CONNECTIONS_PER_IPADDR_PER_MINUTE, DEFAULT_MAX_STREAMS_PER_MS}, + testing_utilities::check_multiple_streams, + }, + quic::{MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS}, + streamer::StakedNodes, + }, + solana_vortexor::{ + cli::{DEFAULT_MAX_QUIC_CONNECTIONS_PER_PEER, DEFAULT_NUM_QUIC_ENDPOINTS}, + vortexor::Vortexor, + }, + std::{ + collections::HashMap, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, + }, +}; + +#[tokio::test(flavor = "multi_thread")] +async fn test_vortexor() { + solana_logger::setup(); + + let bind_address = solana_net_utils::parse_host("127.0.0.1").expect("invalid bind_address"); + let keypair = Keypair::new(); + let exit = Arc::new(AtomicBool::new(false)); + + let (tpu_sender, tpu_receiver) = unbounded(); + let (tpu_fwd_sender, tpu_fwd_receiver) = unbounded(); + let tpu_sockets = Vortexor::create_tpu_sockets( + bind_address, + VALIDATOR_PORT_RANGE, + DEFAULT_NUM_QUIC_ENDPOINTS.try_into().unwrap(), + ); + + let tpu_address = tpu_sockets.tpu_quic[0].local_addr().unwrap(); + let tpu_fwd_address = tpu_sockets.tpu_quic_fwd[0].local_addr().unwrap(); + + let stakes = HashMap::from([(keypair.pubkey(), 10000)]); + let staked_nodes = Arc::new(RwLock::new(StakedNodes::new( + Arc::new(stakes), + HashMap::::default(), // overrides + ))); + + let vortexor = Vortexor::create_vortexor( + tpu_sockets, + staked_nodes, + tpu_sender, + tpu_fwd_sender, + DEFAULT_MAX_QUIC_CONNECTIONS_PER_PEER.try_into().unwrap(), + MAX_STAKED_CONNECTIONS.try_into().unwrap(), + MAX_UNSTAKED_CONNECTIONS.try_into().unwrap(), + MAX_STAKED_CONNECTIONS + .saturating_add(MAX_UNSTAKED_CONNECTIONS) + .try_into() + .unwrap(), // max_fwd_staked_connections + 0, // max_fwd_unstaked_connections + DEFAULT_MAX_STREAMS_PER_MS, + DEFAULT_MAX_CONNECTIONS_PER_IPADDR_PER_MINUTE, + DEFAULT_TPU_COALESCE, + &keypair, + exit.clone(), + ); + + check_multiple_streams(tpu_receiver, tpu_address, Some(&keypair)).await; + check_multiple_streams(tpu_fwd_receiver, tpu_fwd_address, Some(&keypair)).await; + + exit.store(true, Ordering::Relaxed); + vortexor.join().unwrap(); +}