diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ef93826e9..c462ed3a7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,7 +22,7 @@ concurrency: env: nightly_toolchain: nightly-2023-06-12 - stable_toolchain: 1.77 + stable_toolchain: 1.82 CARGO_HTTP_MULTIPLEXING: false CARGO_TERM_COLOR: always TARI_TARGET_NETWORK: localnet diff --git a/Cargo.lock b/Cargo.lock index 3a4d83746..4c85d46a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -247,7 +247,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror", + "thiserror 1.0.68", "time", ] @@ -263,7 +263,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror", + "thiserror 1.0.68", "time", ] @@ -404,7 +404,7 @@ dependencies = [ "serde_urlencoded", "static_assertions", "tempfile", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -437,7 +437,7 @@ dependencies = [ "proc-macro2", "quote", "syn 1.0.109", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -794,7 +794,7 @@ dependencies = [ "axum 0.6.20", "serde", "serde_json", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -830,7 +830,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978e81a45367d2409ecd33369a45dda2e9a3ca516153ec194de1fbda4b9fb79d" dependencies = [ - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -1171,7 +1171,7 @@ dependencies = [ "sha2", "ssri", "tempfile", - "thiserror", + "thiserror 1.0.68", "walkdir", ] @@ -1577,16 +1577,15 @@ dependencies = [ name = "consensus_tests" version = "0.7.0" dependencies = [ - "anyhow", "async-trait", "fern", "futures 0.3.31", "humantime 2.1.0", - "indexmap 2.6.0", "itertools 0.11.0", "log", "rand", "serde", + "serde_json", "tari_bor", "tari_common", "tari_common_types", @@ -1597,12 +1596,11 @@ dependencies = [ "tari_dan_storage", "tari_engine_types", "tari_epoch_manager", - "tari_mmr", "tari_shutdown", + "tari_sidechain", "tari_state_store_sqlite", "tari_template_lib", "tari_transaction", - "thiserror", "tokio", ] @@ -2125,7 +2123,7 @@ dependencies = [ "anyhow", "serde", "serde_json", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -2417,15 +2415,15 @@ dependencies = [ "console", "shell-words", "tempfile", - "thiserror", + "thiserror 1.0.68", "zeroize", ] [[package]] name = "diesel" -version = "2.2.4" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "158fe8e2e68695bd615d7e4f3227c0727b151330d3e253b525086c348d055d5e" +checksum = "ccf1bedf64cdb9643204a36dd15b19a6ce8e7aa7f7b105868e9f1fad5ffa7d12" dependencies = [ "bigdecimal", "chrono", @@ -2907,6 +2905,18 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +[[package]] +name = "filetime" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" +dependencies = [ + "cfg-if", + "libc", + "libredox", + "windows-sys 0.59.0", +] + [[package]] name = "fixed-hash" version = "0.8.0" @@ -2987,6 +2997,15 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" +[[package]] +name = "fsevent-sys" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2" +dependencies = [ + "libc", +] + [[package]] name = "funty" version = "2.0.0" @@ -3227,7 +3246,7 @@ dependencies = [ "serde_json", "syn 2.0.87", "textwrap 0.16.1", - "thiserror", + "thiserror 1.0.68", "typed-builder", ] @@ -3250,9 +3269,9 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "git2" -version = "0.18.3" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "232e6a7bfe35766bf715e55a88b39a700596c0ccfd88cd3680b4cdb40d66ef70" +checksum = "b903b73e45dc0c6c596f2d37eccece7c1c8bb6e4407b001096387c63d0d93724" dependencies = [ "bitflags 2.6.0", "libc", @@ -3369,7 +3388,7 @@ dependencies = [ "pest_derive", "serde", "serde_json", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -3540,7 +3559,7 @@ dependencies = [ "once_cell", "radix_trie", "rand", - "thiserror", + "thiserror 1.0.68", "tokio", "tracing", ] @@ -3563,7 +3582,7 @@ dependencies = [ "once_cell", "rand", "socket2", - "thiserror", + "thiserror 1.0.68", "tinyvec", "tokio", "tracing", @@ -3592,7 +3611,7 @@ dependencies = [ "ring 0.17.8", "rustls 0.23.16", "rustls-pemfile 2.2.0", - "thiserror", + "thiserror 1.0.68", "time", "tinyvec", "tokio", @@ -3617,7 +3636,7 @@ dependencies = [ "rand", "resolv-conf", "smallvec 1.13.2", - "thiserror", + "thiserror 1.0.68", "tokio", "tracing", ] @@ -3638,7 +3657,7 @@ dependencies = [ "rand", "resolv-conf", "smallvec 1.13.2", - "thiserror", + "thiserror 1.0.68", "tokio", "tracing", ] @@ -4299,6 +4318,26 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a257582fdcde896fd96463bf2d40eefea0580021c0712a0e2b028b60b47a837a" +[[package]] +name = "inotify" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd168d97690d0b8c412d6b6c10360277f4d7ee495c5d0d5d5fe0854923255cc" +dependencies = [ + "bitflags 1.3.2", + "inotify-sys", + "libc", +] + +[[package]] +name = "inotify-sys" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" +dependencies = [ + "libc", +] + [[package]] name = "inout" version = "0.1.3" @@ -4343,6 +4382,7 @@ dependencies = [ "minotari_node", "minotari_node_grpc_client", "minotari_wallet", + "notify", "rand", "regex", "reqwest", @@ -4367,6 +4407,7 @@ dependencies = [ "tari_key_manager", "tari_p2p", "tari_shutdown", + "tari_sidechain", "tari_template_builtin", "tari_template_lib", "tari_transaction", @@ -4392,7 +4433,7 @@ dependencies = [ "rand", "rtcp", "rtp 0.9.0", - "thiserror", + "thiserror 1.0.68", "tokio", "waitgroup", "webrtc-srtp", @@ -4586,6 +4627,26 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "kqueue" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7447f1ca1b7b563588a205fe93dea8df60fd981423a768bc1c0ded35ed147d0c" +dependencies = [ + "kqueue-sys", + "libc", +] + +[[package]] +name = "kqueue-sys" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b" +dependencies = [ + "bitflags 1.3.2", + "libc", +] + [[package]] name = "kstring" version = "2.0.2" @@ -4716,7 +4777,7 @@ dependencies = [ "ledger-transport 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc", "log", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -4733,9 +4794,9 @@ checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libgit2-sys" -version = "0.16.2+1.7.2" +version = "0.17.0+1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4126d8b4ee5c9d9ea891dd875cfdc1e9d0950437179104b183d7d8a74d24e8" +checksum = "10472326a8a6477c3c20a64547b0059e4b0d086869eee31e6d7da728a8eb7224" dependencies = [ "cc", "libc", @@ -4801,7 +4862,7 @@ dependencies = [ "multiaddr 0.18.1", "pin-project 1.1.7", "rw-stream-sink", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -4835,7 +4896,7 @@ dependencies = [ "quick-protobuf-codec 0.3.1 (git+https://github.com/tari-project/rust-libp2p.git?rev=3d918ccbf5ae1cbec0815a2156079b0fba4ba558)", "rand", "rand_core", - "thiserror", + "thiserror 1.0.68", "tracing", "void", "web-time", @@ -4872,7 +4933,7 @@ dependencies = [ "rand", "rw-stream-sink", "smallvec 1.13.2", - "thiserror", + "thiserror 1.0.68", "tracing", "unsigned-varint 0.8.0", "void", @@ -4895,7 +4956,7 @@ dependencies = [ "lru", "quick-protobuf", "quick-protobuf-codec 0.3.1 (git+https://github.com/tari-project/rust-libp2p.git?rev=3d918ccbf5ae1cbec0815a2156079b0fba4ba558)", - "thiserror", + "thiserror 1.0.68", "tracing", "void", "web-time", @@ -4963,7 +5024,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec 0.3.1 (git+https://github.com/tari-project/rust-libp2p.git?rev=3d918ccbf5ae1cbec0815a2156079b0fba4ba558)", "smallvec 1.13.2", - "thiserror", + "thiserror 1.0.68", "tracing", "void", ] @@ -4982,7 +5043,7 @@ dependencies = [ "serde", "sha2", "tari_crypto", - "thiserror", + "thiserror 1.0.68", "tracing", "zeroize", ] @@ -5057,7 +5118,7 @@ dependencies = [ "sha2", "snow", "static_assertions", - "thiserror", + "thiserror 1.0.68", "tracing", "x25519-dalek", "zeroize", @@ -5076,7 +5137,7 @@ dependencies = [ "pb-rs", "quick-protobuf", "quick-protobuf-codec 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "thiserror", + "thiserror 1.0.68", "tracing", ] @@ -5115,7 +5176,7 @@ dependencies = [ "ring 0.17.8", "rustls 0.23.16", "socket2", - "thiserror", + "thiserror 1.0.68", "tokio", "tracing", ] @@ -5138,7 +5199,7 @@ dependencies = [ "quick-protobuf-codec 0.3.1 (git+https://github.com/tari-project/rust-libp2p.git?rev=3d918ccbf5ae1cbec0815a2156079b0fba4ba558)", "rand", "static_assertions", - "thiserror", + "thiserror 1.0.68", "tracing", "void", "web-time", @@ -5235,7 +5296,7 @@ dependencies = [ "ring 0.17.8", "rustls 0.23.16", "rustls-webpki 0.101.7", - "thiserror", + "thiserror 1.0.68", "x509-parser 0.16.0", "yasna", ] @@ -5263,7 +5324,7 @@ dependencies = [ "either", "futures 0.3.31", "libp2p-core", - "thiserror", + "thiserror 1.0.68", "tracing", "yamux 0.12.1", "yamux 0.13.3", @@ -5277,6 +5338,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.6.0", "libc", + "redox_syscall 0.5.7", ] [[package]] @@ -5489,7 +5551,7 @@ dependencies = [ "serde-value", "serde_json", "serde_yaml", - "thiserror", + "thiserror 1.0.68", "thread-id", "typemap-ors", "winapi", @@ -5624,7 +5686,7 @@ checksum = "59bb584eaeeab6bd0226ccf3509a69d7936d148cf3d036ad350abe35e8c6856e" dependencies = [ "miette-derive", "once_cell", - "thiserror", + "thiserror 1.0.68", "unicode-width", ] @@ -5709,7 +5771,7 @@ dependencies = [ [[package]] name = "minotari_app_grpc" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "argon2", "base64 0.13.1", @@ -5728,8 +5790,9 @@ dependencies = [ "tari_features", "tari_max_size", "tari_script", + "tari_sidechain", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "tokio", "tonic 0.12.3", "tonic-build", @@ -5739,7 +5802,7 @@ dependencies = [ [[package]] name = "minotari_app_utilities" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "clap 3.2.25", "dialoguer 0.10.4", @@ -5753,7 +5816,7 @@ dependencies = [ "tari_comms", "tari_features", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "tokio", "tonic 0.12.3", ] @@ -5761,7 +5824,7 @@ dependencies = [ [[package]] name = "minotari_console_wallet" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "blake2", "chrono", @@ -5804,7 +5867,7 @@ dependencies = [ "tari_script", "tari_shutdown", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "tokio", "tonic 0.12.3", "tui", @@ -5818,7 +5881,7 @@ dependencies = [ [[package]] name = "minotari_ledger_wallet_common" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "bs58 0.5.1", ] @@ -5826,7 +5889,7 @@ dependencies = [ [[package]] name = "minotari_ledger_wallet_comms" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "borsh", "dialoguer 0.11.0", @@ -5843,13 +5906,13 @@ dependencies = [ "tari_crypto", "tari_script", "tari_utilities", - "thiserror", + "thiserror 1.0.68", ] [[package]] name = "minotari_node" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "anyhow", "async-trait", @@ -5889,7 +5952,7 @@ dependencies = [ "tari_shutdown", "tari_storage", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "tokio", "tonic 0.12.3", ] @@ -5897,7 +5960,7 @@ dependencies = [ [[package]] name = "minotari_node_grpc_client" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "minotari_app_grpc", ] @@ -5905,7 +5968,7 @@ dependencies = [ [[package]] name = "minotari_wallet" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "argon2", "async-trait", @@ -5944,9 +6007,10 @@ dependencies = [ "tari_script", "tari_service_framework", "tari_shutdown", + "tari_sidechain", "tari_utilities", "tempfile", - "thiserror", + "thiserror 1.0.68", "tokio", "tower 0.4.13", "zeroize", @@ -5955,11 +6019,11 @@ dependencies = [ [[package]] name = "minotari_wallet_grpc_client" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "minotari_app_grpc", "tari_common_types", - "thiserror", + "thiserror 1.0.68", "tonic 0.12.3", ] @@ -5996,6 +6060,7 @@ checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi 0.3.9", "libc", + "log", "wasi", "windows-sys 0.52.0", ] @@ -6022,7 +6087,7 @@ dependencies = [ "hex-literal 0.4.1", "sealed", "serde", - "thiserror", + "thiserror 1.0.68", "tiny-keccak", ] @@ -6209,7 +6274,7 @@ dependencies = [ "anyhow", "byteorder", "paste", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -6223,7 +6288,7 @@ dependencies = [ "log", "netlink-packet-core", "netlink-sys", - "thiserror", + "thiserror 1.0.68", "tokio", ] @@ -6343,6 +6408,34 @@ dependencies = [ "nom", ] +[[package]] +name = "notify" +version = "7.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c533b4c39709f9ba5005d8002048266593c1cfaf3c5f0739d5b8ab0c6c504009" +dependencies = [ + "bitflags 2.6.0", + "filetime", + "fsevent-sys", + "inotify", + "kqueue", + "libc", + "log", + "mio 1.0.2", + "notify-types", + "walkdir", + "windows-sys 0.52.0", +] + +[[package]] +name = "notify-types" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7393c226621f817964ffb3dc5704f9509e107a8b024b489cc2c1b217378785df" +dependencies = [ + "instant", +] + [[package]] name = "ntapi" version = "0.3.7" @@ -6793,7 +6886,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", - "thiserror", + "thiserror 1.0.68", "ucd-trie", ] @@ -6887,7 +6980,7 @@ dependencies = [ "sha3", "signature", "smallvec 1.13.2", - "thiserror", + "thiserror 1.0.68", "twofish", "x25519-dalek", "zeroize", @@ -7126,7 +7219,7 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" dependencies = [ - "thiserror", + "thiserror 1.0.68", "toml 0.5.11", ] @@ -7184,7 +7277,7 @@ dependencies = [ "memchr", "parking_lot 0.12.3", "protobuf", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -7280,7 +7373,7 @@ checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes 1.8.0", "heck 0.5.0", - "itertools 0.11.0", + "itertools 0.12.1", "log", "multimap 0.10.0", "once_cell", @@ -7301,7 +7394,7 @@ checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ "bytes 1.8.0", "heck 0.5.0", - "itertools 0.11.0", + "itertools 0.13.0", "log", "multimap 0.10.0", "once_cell", @@ -7347,7 +7440,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.12.1", "proc-macro2", "quote", "syn 2.0.87", @@ -7360,7 +7453,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.13.0", "proc-macro2", "quote", "syn 2.0.87", @@ -7488,7 +7581,7 @@ dependencies = [ "asynchronous-codec", "bytes 1.8.0", "quick-protobuf", - "thiserror", + "thiserror 1.0.68", "unsigned-varint 0.8.0", ] @@ -7500,7 +7593,7 @@ dependencies = [ "asynchronous-codec", "bytes 1.8.0", "quick-protobuf", - "thiserror", + "thiserror 1.0.68", "unsigned-varint 0.8.0", ] @@ -7527,7 +7620,7 @@ dependencies = [ "rustc-hash", "rustls 0.23.16", "socket2", - "thiserror", + "thiserror 1.0.68", "tokio", "tracing", ] @@ -7544,7 +7637,7 @@ dependencies = [ "rustc-hash", "rustls 0.23.16", "slab", - "thiserror", + "thiserror 1.0.68", "tinyvec", "tracing", ] @@ -7637,7 +7730,7 @@ checksum = "9abb8f2aa3432700c2b64a67406ac0da4956d78991f50559509cecc2b6abf249" dependencies = [ "bitflags 1.3.2", "libc", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -7711,7 +7804,7 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -7991,7 +8084,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33648a781874466a62d89e265fee9f17e32bc7d05a256e6cca41bf97eadcd8aa" dependencies = [ "bytes 1.8.0", - "thiserror", + "thiserror 1.0.68", "webrtc-util", ] @@ -8006,7 +8099,7 @@ dependencies = [ "netlink-packet-route", "netlink-proto", "nix 0.24.3", - "thiserror", + "thiserror 1.0.68", "tokio", ] @@ -8019,7 +8112,7 @@ dependencies = [ "bytes 1.8.0", "rand", "serde", - "thiserror", + "thiserror 1.0.68", "webrtc-util", ] @@ -8032,7 +8125,7 @@ dependencies = [ "bytes 1.8.0", "rand", "serde", - "thiserror", + "thiserror 1.0.68", "webrtc-util", ] @@ -8287,7 +8380,7 @@ checksum = "13254db766b17451aced321e7397ebf0a446ef0c8d2942b6e67a95815421093f" dependencies = [ "rand", "substring", - "thiserror", + "thiserror 1.0.68", "url", ] @@ -8468,15 +8561,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.3.3" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ff71d2c147a7b57362cead5e22f772cd52f6ab31cfcd9edcd7f6aeb2a0afbe" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" dependencies = [ - "base64 0.13.1", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", + "indexmap 2.6.0", "serde", + "serde_derive", "serde_json", "serde_with_macros", "time", @@ -8484,9 +8579,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "2.3.3" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ "darling 0.20.10", "proc-macro2", @@ -8648,7 +8743,7 @@ checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ "num-bigint", "num-traits", - "thiserror", + "thiserror 1.0.68", "time", ] @@ -8843,7 +8938,7 @@ dependencies = [ "serde", "sha-1", "sha2", - "thiserror", + "thiserror 1.0.68", "xxhash-rust", ] @@ -8994,7 +9089,7 @@ dependencies = [ "rand", "ring 0.17.8", "subtle", - "thiserror", + "thiserror 1.0.68", "tokio", "url", "webrtc-util", @@ -9173,7 +9268,7 @@ dependencies = [ "tari_core", "tari_dan_common_types", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "tonic 0.12.3", "ts-rs", "url", @@ -9213,7 +9308,7 @@ dependencies = [ [[package]] name = "tari_common" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "anyhow", "cargo_toml", @@ -9232,28 +9327,28 @@ dependencies = [ "structopt", "tari_features", "tempfile", - "thiserror", + "thiserror 1.0.68", "toml 0.5.11", ] [[package]] name = "tari_common_sqlite" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "diesel", "diesel_migrations", "log", "serde", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "tokio", ] [[package]] name = "tari_common_types" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "base64 0.21.7", "bitflags 2.6.0", @@ -9273,13 +9368,13 @@ dependencies = [ "tari_common", "tari_crypto", "tari_utilities", - "thiserror", + "thiserror 1.0.68", ] [[package]] name = "tari_comms" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "anyhow", "async-trait", @@ -9310,7 +9405,7 @@ dependencies = [ "tari_shutdown", "tari_storage", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "tokio", "tokio-stream", "tokio-util 0.6.10", @@ -9323,7 +9418,7 @@ dependencies = [ [[package]] name = "tari_comms_dht" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "anyhow", "bitflags 2.6.0", @@ -9349,7 +9444,7 @@ dependencies = [ "tari_shutdown", "tari_storage", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "tokio", "tower 0.4.13", "zeroize", @@ -9358,7 +9453,7 @@ dependencies = [ [[package]] name = "tari_comms_rpc_macros" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "proc-macro2", "quote", @@ -9374,6 +9469,7 @@ dependencies = [ "indexmap 2.6.0", "log", "serde", + "serde_json", "tari_common", "tari_common_types", "tari_crypto", @@ -9382,16 +9478,17 @@ dependencies = [ "tari_engine_types", "tari_epoch_manager", "tari_shutdown", + "tari_sidechain", "tari_state_tree", "tari_transaction", - "thiserror", + "thiserror 1.0.68", "tokio", ] [[package]] name = "tari_contacts" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "chrono", "diesel", @@ -9415,7 +9512,7 @@ dependencies = [ "tari_service_framework", "tari_shutdown", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "tokio", "tower 0.4.13", "uuid 1.11.0", @@ -9424,7 +9521,7 @@ dependencies = [ [[package]] name = "tari_core" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "async-trait", "bincode 1.3.3", @@ -9480,10 +9577,11 @@ dependencies = [ "tari_script", "tari_service_framework", "tari_shutdown", + "tari_sidechain", "tari_storage", "tari_test_utils", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "tokio", "tracing", "zeroize", @@ -9529,7 +9627,6 @@ dependencies = [ "libp2p-identity", "log", "mini-moka", - "minotari_app_grpc", "multiaddr 0.18.1", "prost 0.12.6", "rand", @@ -9559,7 +9656,7 @@ dependencies = [ "tari_transaction", "tari_validator_node_client", "tari_validator_node_rpc", - "thiserror", + "thiserror 1.0.68", "tokio", "tokio-stream", ] @@ -9568,7 +9665,7 @@ dependencies = [ name = "tari_dan_common_types" version = "0.7.0" dependencies = [ - "blake2", + "borsh", "ethnum", "indexmap 2.6.0", "libp2p-identity", @@ -9585,7 +9682,7 @@ dependencies = [ "tari_hashing", "tari_mmr", "tari_template_lib", - "thiserror", + "thiserror 1.0.68", "ts-rs", ] @@ -9618,7 +9715,7 @@ dependencies = [ "tari_transaction_manifest", "tari_utilities", "tempfile", - "thiserror", + "thiserror 1.0.68", "wasmer", "wasmer-middlewares", ] @@ -9648,6 +9745,7 @@ name = "tari_dan_storage" version = "0.7.0" dependencies = [ "anyhow", + "borsh", "chrono", "indexmap 2.6.0", "log", @@ -9657,13 +9755,15 @@ dependencies = [ "strum_macros 0.26.4", "tari_common", "tari_common_types", - "tari_core", "tari_crypto", "tari_dan_common_types", "tari_engine_types", + "tari_hashing", + "tari_sidechain", "tari_state_tree", + "tari_template_lib", "tari_transaction", - "thiserror", + "thiserror 1.0.68", "time", "ts-rs", ] @@ -9675,6 +9775,7 @@ dependencies = [ "chrono", "diesel", "diesel_migrations", + "log", "rand", "serde", "serde_json", @@ -9683,7 +9784,7 @@ dependencies = [ "tari_dan_common_types", "tari_dan_storage", "tari_utilities", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -9708,7 +9809,7 @@ dependencies = [ "tari_transaction_manifest", "tari_utilities", "tari_wallet_daemon_client", - "thiserror", + "thiserror 1.0.68", "time", "tokio", "url", @@ -9730,7 +9831,7 @@ dependencies = [ "tari_template_lib", "tari_template_test_tooling", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "zeroize", ] @@ -9775,7 +9876,7 @@ dependencies = [ "tari_transaction", "tari_utilities", "tari_wallet_daemon_client", - "thiserror", + "thiserror 1.0.68", "tokio", "tower-http", "url", @@ -9809,7 +9910,7 @@ dependencies = [ "tari_template_lib", "tari_transaction", "tempfile", - "thiserror", + "thiserror 1.0.68", "ts-rs", ] @@ -9857,7 +9958,7 @@ dependencies = [ "tari_template_abi", "tari_template_lib", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "ts-rs", ] @@ -9868,6 +9969,7 @@ dependencies = [ "anyhow", "async-trait", "log", + "serde", "tari_base_node_client", "tari_common_types", "tari_core", @@ -9875,15 +9977,16 @@ dependencies = [ "tari_dan_storage", "tari_dan_storage_sqlite", "tari_shutdown", + "tari_sidechain", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "tokio", ] [[package]] name = "tari_features" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" [[package]] name = "tari_generate" @@ -9905,8 +10008,9 @@ dependencies = [ [[package]] name = "tari_hashing" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ + "blake2", "borsh", "digest", "tari_crypto", @@ -9958,7 +10062,7 @@ dependencies = [ "tari_template_lib", "tari_transaction", "tari_validator_node_rpc", - "thiserror", + "thiserror 1.0.68", "tokio", "tower-http", "url", @@ -9981,7 +10085,7 @@ dependencies = [ "tari_engine_types", "tari_template_abi", "tari_transaction", - "thiserror", + "thiserror 1.0.68", "ts-rs", ] @@ -10001,14 +10105,14 @@ dependencies = [ "tari_template_lib", "tari_transaction", "tari_validator_node_rpc", - "thiserror", + "thiserror 1.0.68", "tokio", ] [[package]] name = "tari_key_manager" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "argon2", "async-trait", @@ -10033,7 +10137,7 @@ dependencies = [ "tari_crypto", "tari_service_framework", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "tokio", "zeroize", ] @@ -10041,7 +10145,7 @@ dependencies = [ [[package]] name = "tari_libtor" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "derivative", "libtor", @@ -10056,28 +10160,28 @@ dependencies = [ [[package]] name = "tari_max_size" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "borsh", "serde", "tari_utilities", - "thiserror", + "thiserror 1.0.68", ] [[package]] name = "tari_metrics" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "once_cell", "prometheus", - "thiserror", + "thiserror 1.0.68", ] [[package]] name = "tari_mmr" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "borsh", "digest", @@ -10085,7 +10189,7 @@ dependencies = [ "serde", "tari_crypto", "tari_utilities", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -10100,14 +10204,14 @@ dependencies = [ "tari_rpc_framework", "tari_shutdown", "tari_swarm", - "thiserror", + "thiserror 1.0.68", "tokio", ] [[package]] name = "tari_p2p" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "anyhow", "fs2", @@ -10131,7 +10235,7 @@ dependencies = [ "tari_shutdown", "tari_storage", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "tokio", "tokio-stream", "tower 0.4.13", @@ -10156,7 +10260,7 @@ dependencies = [ "proto_builder", "tari_metrics", "tari_shutdown", - "thiserror", + "thiserror 1.0.68", "tokio", "tokio-util 0.7.12", "tower 0.4.13", @@ -10191,7 +10295,7 @@ dependencies = [ "tari_state_tree", "tari_transaction", "tari_validator_node_rpc", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -10211,7 +10315,7 @@ dependencies = [ [[package]] name = "tari_script" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "blake2", "borsh", @@ -10223,20 +10327,20 @@ dependencies = [ "tari_crypto", "tari_max_size", "tari_utilities", - "thiserror", + "thiserror 1.0.68", ] [[package]] name = "tari_service_framework" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "anyhow", "async-trait", "futures 0.3.31", "log", "tari_shutdown", - "thiserror", + "thiserror 1.0.68", "tokio", "tower-service", ] @@ -10244,11 +10348,26 @@ dependencies = [ [[package]] name = "tari_shutdown" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "futures 0.3.31", ] +[[package]] +name = "tari_sidechain" +version = "1.7.0-pre.3" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" +dependencies = [ + "borsh", + "log", + "serde", + "tari_common_types", + "tari_crypto", + "tari_hashing", + "tari_utilities", + "thiserror 2.0.3", +] + [[package]] name = "tari_signaling_server" version = "0.7.0" @@ -10293,7 +10412,7 @@ dependencies = [ "tari_state_tree", "tari_transaction", "tari_utilities", - "thiserror", + "thiserror 1.0.68", "time", ] @@ -10301,29 +10420,30 @@ dependencies = [ name = "tari_state_tree" version = "0.7.0" dependencies = [ - "hex", + "blake2", "indexmap 2.6.0", "itertools 0.11.0", "log", "serde", + "tari_bor", "tari_common_types", "tari_crypto", "tari_dan_common_types", "tari_engine_types", "tari_template_lib", - "thiserror", + "thiserror 1.0.68", ] [[package]] name = "tari_storage" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "bincode 1.3.3", "lmdb-zero", "log", "serde", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -10334,7 +10454,7 @@ dependencies = [ "libp2p-messaging", "libp2p-peersync", "libp2p-substream", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -10371,7 +10491,7 @@ dependencies = [ "tari_shutdown", "tari_validator_node_client", "tari_wallet_daemon_client", - "thiserror", + "thiserror 1.0.68", "tokio", "toml 0.8.19", "tonic 0.12.3", @@ -10446,7 +10566,7 @@ dependencies = [ [[package]] name = "tari_test_utils" version = "1.7.0-pre.3" -source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#9005c119dd090ee95d294755b0756abcffffdd43" +source = "git+https://github.com/tari-project/tari.git?branch=feature-dan2#817bacb3f53b7f61f252071e6b8f3363428c4252" dependencies = [ "futures 0.3.31", "rand", @@ -10460,6 +10580,7 @@ dependencies = [ name = "tari_transaction" version = "0.7.0" dependencies = [ + "borsh", "indexmap 2.6.0", "rand", "serde", @@ -10481,7 +10602,7 @@ dependencies = [ "tari_engine_types", "tari_template_builtin", "tari_template_lib", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -10508,7 +10629,6 @@ name = "tari_validator_node" version = "0.7.0" dependencies = [ "anyhow", - "async-trait", "axum 0.6.20", "axum-jrpc", "clap 3.2.25", @@ -10556,7 +10676,7 @@ dependencies = [ "tari_transaction", "tari_validator_node_client", "tari_validator_node_rpc", - "thiserror", + "thiserror 1.0.68", "time", "tokio", "tower-http", @@ -10586,7 +10706,7 @@ dependencies = [ "tari_transaction", "tari_transaction_manifest", "tari_validator_node_client", - "thiserror", + "thiserror 1.0.68", "time", "tokio", ] @@ -10605,7 +10725,7 @@ dependencies = [ "tari_dan_storage", "tari_engine_types", "tari_transaction", - "thiserror", + "thiserror 1.0.68", "ts-rs", ] @@ -10627,7 +10747,7 @@ dependencies = [ "tari_rpc_framework", "tari_rpc_macros", "tari_transaction", - "thiserror", + "thiserror 1.0.68", "tokio", ] @@ -10646,7 +10766,7 @@ dependencies = [ "tari_template_abi", "tari_template_lib", "tari_transaction", - "thiserror", + "thiserror 1.0.68", "ts-rs", ] @@ -10670,7 +10790,9 @@ dependencies = [ "tari_common_types", "tari_core", "tari_crypto", + "tari_dan_common_types", "tari_shutdown", + "tari_sidechain", "tokio", "toml 0.8.19", "tonic 0.12.3", @@ -10768,7 +10890,16 @@ version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.68", +] + +[[package]] +name = "thiserror" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +dependencies = [ + "thiserror-impl 2.0.3", ] [[package]] @@ -10782,6 +10913,17 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "thiserror-impl" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "thiserror-impl-no-std" version = "2.0.2" @@ -11139,7 +11281,7 @@ dependencies = [ "hex-literal 0.3.4", "rand", "sha1 0.6.0", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -11327,7 +11469,7 @@ checksum = "fc2cae1fc5d05d47aa24b64f9a4f7cba24cdc9187a2084dd97ac57bef5eccae6" dependencies = [ "chrono", "indexmap 2.6.0", - "thiserror", + "thiserror 1.0.68", "ts-rs-macros", ] @@ -11371,7 +11513,7 @@ dependencies = [ "log", "rand", "sha1 0.10.6", - "thiserror", + "thiserror 1.0.68", "url", "utf-8", ] @@ -11390,7 +11532,7 @@ dependencies = [ "rand", "ring 0.17.8", "stun", - "thiserror", + "thiserror 1.0.68", "tokio", "tokio-util 0.7.12", "webrtc-util", @@ -11813,7 +11955,7 @@ dependencies = [ "serde-wasm-bindgen", "shared-buffer", "target-lexicon", - "thiserror", + "thiserror 1.0.68", "tracing", "wasm-bindgen", "wasmer-compiler", @@ -11846,7 +11988,7 @@ dependencies = [ "self_cell", "shared-buffer", "smallvec 1.13.2", - "thiserror", + "thiserror 1.0.68", "wasmer-types", "wasmer-vm", "wasmparser", @@ -11912,7 +12054,7 @@ dependencies = [ "rkyv", "sha2", "target-lexicon", - "thiserror", + "thiserror 1.0.68", "xxhash-rust", ] @@ -11939,7 +12081,7 @@ dependencies = [ "more-asserts", "region", "scopeguard", - "thiserror", + "thiserror 1.0.68", "wasmer-types", "windows-sys 0.59.0", ] @@ -12038,7 +12180,7 @@ dependencies = [ "sha2", "smol_str", "stun", - "thiserror", + "thiserror 1.0.68", "time", "tokio", "turn", @@ -12062,7 +12204,7 @@ checksum = "e8c08e648e10572b9edbe741074e0f4d3cb221aa7cdf9a814ee71606de312f33" dependencies = [ "bytes 1.8.0", "log", - "thiserror", + "thiserror 1.0.68", "tokio", "webrtc-sctp", "webrtc-util", @@ -12097,7 +12239,7 @@ dependencies = [ "sha1 0.10.6", "sha2", "subtle", - "thiserror", + "thiserror 1.0.68", "tokio", "webrtc-util", "x25519-dalek", @@ -12118,7 +12260,7 @@ dependencies = [ "serde", "serde_json", "stun", - "thiserror", + "thiserror 1.0.68", "tokio", "turn", "url", @@ -12136,7 +12278,7 @@ checksum = "ce981f93104a8debb3563bb0cedfe4aa2f351fdf6b53f346ab50009424125c08" dependencies = [ "log", "socket2", - "thiserror", + "thiserror 1.0.68", "tokio", "webrtc-util", ] @@ -12151,7 +12293,7 @@ dependencies = [ "bytes 1.8.0", "rand", "rtp 0.10.0", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -12166,7 +12308,7 @@ dependencies = [ "crc", "log", "rand", - "thiserror", + "thiserror 1.0.68", "tokio", "webrtc-util", ] @@ -12189,7 +12331,7 @@ dependencies = [ "rtp 0.9.0", "sha1 0.10.6", "subtle", - "thiserror", + "thiserror 1.0.68", "tokio", "webrtc-util", ] @@ -12209,7 +12351,7 @@ dependencies = [ "log", "nix 0.26.4", "rand", - "thiserror", + "thiserror 1.0.68", "tokio", "winapi", ] @@ -12603,7 +12745,7 @@ dependencies = [ "oid-registry 0.6.1", "ring 0.16.20", "rusticata-macros", - "thiserror", + "thiserror 1.0.68", "time", ] @@ -12620,7 +12762,7 @@ dependencies = [ "nom", "oid-registry 0.7.1", "rusticata-macros", - "thiserror", + "thiserror 1.0.68", "time", ] diff --git a/Cargo.toml b/Cargo.toml index c85c86513..482506f89 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -121,6 +121,7 @@ minotari_wallet_grpc_client = { git = "https://github.com/tari-project/tari.git" tari_common = { git = "https://github.com/tari-project/tari.git", branch = "feature-dan2" } tari_common_types = { git = "https://github.com/tari-project/tari.git", branch = "feature-dan2" } tari_hashing = { git = "https://github.com/tari-project/tari.git", branch = "feature-dan2" } +tari_sidechain = { git = "https://github.com/tari-project/tari.git", branch = "feature-dan2" } # avoid including default features so each crate can choose which ones to import tari_core = { git = "https://github.com/tari-project/tari.git", branch = "feature-dan2", default-features = false } @@ -148,7 +149,7 @@ bigdecimal = "0.4.1" bincode = "2.0.0-rc.3" bitflags = "2.4" blake2 = "0.10.6" -borsh = "1.3" +borsh = "1.5" bytes = "1.5" cacache = "12.0.0" cargo_metadata = "0.15.3" @@ -163,7 +164,7 @@ convert_case = "0.6.0" cucumber = "0.21.0" d3ne = { git = "https://github.com/stringhandler/d3ne-rs.git", tag = "v0.8.0-pre.3" } dashmap = "5.5.0" -diesel = { version = "2.2.4", default-features = false } +diesel = { version = "2.2.6", default-features = false } diesel_migrations = "2.2.0" digest = "0.10" dirs = "4.0.0" @@ -181,7 +182,7 @@ httpmock = "0.6.8" humantime = "2.1.0" humantime-serde = "1.1.1" include_dir = "0.7.2" -indexmap = "2.5.0" +indexmap = "2.6.0" indoc = "1.0.6" itertools = "0.11.0" lazy_static = "1.4.0" @@ -216,7 +217,7 @@ reqwest = "0.11.16" semver = "1.0" serde = { version = "1.0", default-features = false } serde_json = "1.0" -serde_with = "2.3" +serde_with = "3.11.0" sha2 = "0.10.8" smallvec = "2.0.0-alpha.1" std-semaphore = "0.1.0" @@ -276,6 +277,7 @@ overflow-checks = true #tari_metrics = { git = "https://github.com/account/tari.git", branch = "my-branch" } #tari_libtor = { git = "https://github.com/account/tari.git", branch = "my-branch" } #tari_hashing = { git = "https://github.com/account/tari.git", branch = "my-branch" } +#tari_sidechain = { git = "https://github.com/account/tari.git", branch = "my-branch" } #[patch."https://github.com/tari-project/tari.git"] @@ -302,4 +304,4 @@ overflow-checks = true #tari_metrics = { path = "../tari/infrastructure/metrics" } #tari_libtor = { path = "../tari/infrastructure/libtor" } #tari_hashing = { path = "../tari/hashing" } - +#tari_sidechain = { path = "../tari/base_layer/sidechain" } diff --git a/applications/tari_dan_app_utilities/Cargo.toml b/applications/tari_dan_app_utilities/Cargo.toml index ea4f5a5cd..2f33963c3 100644 --- a/applications/tari_dan_app_utilities/Cargo.toml +++ b/applications/tari_dan_app_utilities/Cargo.toml @@ -29,7 +29,6 @@ tari_bor = { workspace = true, default-features = true } tari_indexer_lib = { workspace = true } tari_networking = { workspace = true } tari_validator_node_rpc = { workspace = true } -minotari_app_grpc = { workspace = true } anyhow = { workspace = true } async-trait = { workspace = true } diff --git a/applications/tari_dan_app_utilities/src/base_layer_scanner.rs b/applications/tari_dan_app_utilities/src/base_layer_scanner.rs index 22d995a1f..0452c0309 100644 --- a/applications/tari_dan_app_utilities/src/base_layer_scanner.rs +++ b/applications/tari_dan_app_utilities/src/base_layer_scanner.rs @@ -23,7 +23,6 @@ use std::time::Duration; use log::*; -use minotari_app_grpc::tari_rpc::ValidatorNodeChangeState; use tari_base_node_client::{ grpc::GrpcBaseNodeClient, types::{BaseLayerMetadata, BlockInfo}, @@ -32,20 +31,29 @@ use tari_base_node_client::{ }; use tari_common_types::types::{Commitment, FixedHash, FixedHashSizeError, PublicKey}; use tari_consensus::consensus_constants::ConsensusConstants; -use tari_core::transactions::{ - tari_amount::MicroMinotari, - transaction_components::{ - CodeTemplateRegistration, - SideChainFeature, - TransactionOutput, - ValidatorNodeRegistration, +use tari_core::{ + base_node::comms_interface::ValidatorNodeChange, + transactions::{ + tari_amount::MicroMinotari, + transaction_components::{ + CodeTemplateRegistration, + SideChainFeatureData, + TransactionOutput, + ValidatorNodeRegistration, + }, }, }; use tari_crypto::{ ristretto::RistrettoPublicKey, - tari_utilities::{hex::Hex, ByteArray, ByteArrayError}, + tari_utilities::{ByteArray, ByteArrayError}, +}; +use tari_dan_common_types::{ + option::DisplayContainer, + optional::Optional, + Epoch, + NodeAddressable, + VersionedSubstateId, }; -use tari_dan_common_types::{optional::Optional, NodeAddressable, VersionedSubstateId}; use tari_dan_storage::{ consensus_models::{BurntUtxo, SubstateRecord}, global::{GlobalDb, MetadataKey}, @@ -53,10 +61,7 @@ use tari_dan_storage::{ StorageError, }; use tari_dan_storage_sqlite::{error::SqliteStorageError, global::SqliteGlobalDbAdapter}; -use tari_engine_types::{ - confidential::UnclaimedConfidentialOutput, - substate::{SubstateId, SubstateValue}, -}; +use tari_engine_types::{confidential::UnclaimedConfidentialOutput, substate::SubstateId}; use tari_epoch_manager::{base_layer::EpochManagerHandle, EpochManagerError, EpochManagerReader}; use tari_shutdown::ShutdownSignal; use tari_state_store_sqlite::SqliteStateStore; @@ -217,7 +222,7 @@ impl BaseLayerScanner { tip.height_of_longest_chain .saturating_sub(self.consensus_constants.base_layer_confirmations) ); - self.sync_blockchain().await?; + self.sync_blockchain(tip).await?; }, BlockchainProgression::Reorged => { error!( @@ -228,7 +233,7 @@ impl BaseLayerScanner { self.last_scanned_hash = None; self.last_scanned_validator_node_mr = None; self.last_scanned_height = 0; - self.sync_blockchain().await?; + self.sync_blockchain(tip).await?; }, BlockchainProgression::NoProgress => { trace!(target: LOG_TARGET, "No new blocks to scan."); @@ -249,6 +254,9 @@ impl BaseLayerScanner { &mut self, tip: &BaseLayerMetadata, ) -> Result { + if tip.height_of_longest_chain == 0 { + return Ok(BlockchainProgression::NoProgress); + } match self.last_scanned_tip { Some(hash) if hash == tip.tip_hash => Ok(BlockchainProgression::NoProgress), Some(hash) => { @@ -264,10 +272,9 @@ impl BaseLayerScanner { } #[allow(clippy::too_many_lines)] - async fn sync_blockchain(&mut self) -> Result<(), BaseLayerScannerError> { + async fn sync_blockchain(&mut self, tip: BaseLayerMetadata) -> Result<(), BaseLayerScannerError> { let start_scan_height = self.last_scanned_height; let mut current_hash = self.last_scanned_hash; - let tip = self.base_node_client.get_tip_info().await?; let end_height = match tip .height_of_longest_chain .checked_sub(self.consensus_constants.base_layer_confirmations) @@ -281,72 +288,14 @@ impl BaseLayerScanner { }, Some(end_height) => end_height, }; - let mut scan = tip.tip_hash; - let mut current_last_validator_nodes_mr = self.last_scanned_validator_node_mr; - loop { - let header = self.base_node_client.get_header_by_hash(scan).await?; - if let Some(last_tip) = self.last_scanned_tip { - if last_tip == scan { - // This was processed on the previous call to this function. - break; - } - } - if header.height == end_height { - // This will be processed down below. - break; - } - current_last_validator_nodes_mr = Some(header.validator_node_mr); - self.epoch_manager.add_block_hash(header.height, scan).await?; - scan = header.prev_hash; - } - // syncing validator node changes - if current_last_validator_nodes_mr != self.last_scanned_validator_node_mr { - info!(target: LOG_TARGET, - "⛓️ Syncing validator nodes (sidechain ID: {:?}) from base node (height range: {}-{})", - self.validator_node_sidechain_id, - start_scan_height, - end_height, - ); - - let node_changes = self - .base_node_client - .get_validator_node_changes(start_scan_height, end_height, self.validator_node_sidechain_id.as_ref()) - .await - .map_err(BaseLayerScannerError::BaseNodeError)?; - - for node_change in node_changes { - if node_change.registration.is_none() { - warn!( - target: LOG_TARGET, - "Can't register validator node \"{}\" because it has empty registration!", - node_change.public_key.to_hex(), - ); - continue; - } - let registration = ValidatorNodeRegistration::try_from(node_change.registration.clone().unwrap()) - .map_err(BaseLayerScannerError::GrpcConversion)?; - match node_change.state() { - ValidatorNodeChangeState::Add => { - self.add_validator_node_registration( - node_change.start_height, - registration, - node_change.minimum_value_promise.into(), - ) - .await?; - }, - ValidatorNodeChangeState::Remove => { - self.remove_validator_node_registration( - PublicKey::from_canonical_bytes(node_change.public_key.as_slice()) - .map_err(BaseLayerScannerError::PublicKeyConversion)?, - registration.sidechain_id().cloned(), - ) - .await?; - }, - } + // Recover the last scanned validator node MR if it is not set yet, i.e the node has scanned BL blocks + // previously. + if self.last_scanned_validator_node_mr.is_none() { + if let Some(hash) = self.last_scanned_hash { + let header = self.base_node_client.get_header_by_hash(hash).await?; + self.last_scanned_validator_node_mr = Some(header.validator_node_mr); } - - self.last_scanned_validator_node_mr = current_last_validator_nodes_mr; } for current_height in start_scan_height..=end_height { @@ -362,10 +311,11 @@ impl BaseLayerScanner { )) })?; let block_info = utxos.block_info; + // TODO: Because we don't know the next hash when we're done scanning to the tip, we need to load the // previous scanned block again to get it. This isn't ideal, but won't be an issue when we scan a few // blocks back. - if self.last_scanned_hash.map(|h| h == block_info.hash).unwrap_or(false) { + if self.last_scanned_hash.is_some_and(|h| h == block_info.hash) { if let Some(hash) = block_info.next_block_hash { current_hash = Some(hash); continue; @@ -377,23 +327,39 @@ impl BaseLayerScanner { "⛓️ Scanning base layer block {} of {}", block_info.height, end_height ); + let header = self.base_node_client.get_header_by_hash(block_info.hash).await?; + let current_validator_node_mr = header.validator_node_mr; + self.epoch_manager + .add_block_hash(header.height, block_info.hash) + .await?; + for output in utxos.outputs { let output_hash = output.hash(); let Some(sidechain_feature) = output.features.sidechain_feature.as_ref() else { warn!(target: LOG_TARGET, "Base node returned invalid data: Sidechain utxo output must have sidechain features"); continue; }; - match sidechain_feature { - SideChainFeature::ValidatorNodeRegistration(reg) => { + match sidechain_feature.data() { + SideChainFeatureData::ValidatorNodeRegistration(reg) => { + if sidechain_feature.sidechain_public_key() != self.validator_node_sidechain_id.as_ref() { + debug!( + target: LOG_TARGET, + "Ignoring code template registration for sidechain ID {:?}. Local node's sidechain ID: {:?}", + sidechain_feature.sidechain_public_key(), + self.template_sidechain_id, + ); + continue; + } trace!(target: LOG_TARGET, "New validator node registration scanned: {reg:?}"); }, - SideChainFeature::CodeTemplateRegistration(reg) => { - if reg.sidechain_id != self.template_sidechain_id { - warn!( + SideChainFeatureData::CodeTemplateRegistration(reg) => { + if sidechain_feature.sidechain_public_key() != self.template_sidechain_id.as_ref() { + debug!( target: LOG_TARGET, - "Ignoring code template registration for sidechain ID {:?}. Expected sidechain ID: {:?}", - reg.sidechain_id.as_ref().map(|v| v.to_hex()), - self.template_sidechain_id.as_ref().map(|v| v.to_hex())); + "Ignoring code template registration for sidechain ID {:?}. Local node's sidechain ID: {:?}", + sidechain_feature.sidechain_public_key(), + self.template_sidechain_id, + ); continue; } self.register_code_template_registration( @@ -404,7 +370,7 @@ impl BaseLayerScanner { ) .await?; }, - SideChainFeature::ConfidentialOutput(data) => { + SideChainFeatureData::ConfidentialOutput(_) => { // Should be checked by the base layer if !output.is_burned() { warn!( @@ -415,12 +381,13 @@ impl BaseLayerScanner { ); continue; } - if data.sidechain_id.as_ref() != self.burnt_utxo_sidechain_id.as_ref() { - warn!( + if sidechain_feature.sidechain_public_key() != self.burnt_utxo_sidechain_id.as_ref() { + debug!( target: LOG_TARGET, - "Ignoring burnt UTXO for sidechain ID {:?}. Expected sidechain ID: {:?}", - data.sidechain_id.as_ref().map(|v| v.to_hex()), - self.burnt_utxo_sidechain_id.as_ref().map(|v| v.to_hex())); + "Ignoring burnt UTXO for sidechain ID {:?}. Local node's sidechain ID: {:?}", + sidechain_feature.sidechain_public_key(), + self.burnt_utxo_sidechain_id, + ); continue; } info!( @@ -431,10 +398,37 @@ impl BaseLayerScanner { ); self.register_burnt_utxo(output, &block_info).await?; }, + SideChainFeatureData::EvictionProof(proof) => { + if sidechain_feature.sidechain_public_key() != self.validator_node_sidechain_id.as_ref() { + debug!( + target: LOG_TARGET, + "Ignoring eviction for sidechain ID {:?}. Local node's sidechain ID: {:?}", + sidechain_feature.sidechain_public_key(), + self.validator_node_sidechain_id, + ); + continue; + } + trace!(target: LOG_TARGET, "Eviction proof scanned: {proof:?}"); + }, } } - // Once we have all the UTXO data, we "activate" the new epoch if applicable. + debug!( + target: LOG_TARGET, + "⛓️ last_scanned_validator_node_mr = {} current = {}", self.last_scanned_validator_node_mr.display(), current_validator_node_mr + ); + // if the validator node MR has changed, we need to update the active validator node set + if self + .last_scanned_validator_node_mr + .map_or(true, |last| last != current_validator_node_mr) + { + let constants = self.base_node_client.get_consensus_constants(block_info.height).await?; + let scanned_epoch = constants.height_to_epoch(block_info.height); + self.update_validators(scanned_epoch).await?; + self.last_scanned_validator_node_mr = Some(current_validator_node_mr); + } + + // Once we have all the UTXO and validator data, we "activate" the new epoch if applicable. self.epoch_manager .update_epoch(block_info.height, block_info.hash) .await?; @@ -466,16 +460,57 @@ impl BaseLayerScanner { Ok(()) } + async fn update_validators(&mut self, epoch: Epoch) -> Result<(), BaseLayerScannerError> { + info!( + target: LOG_TARGET, + "⛓️ Updating active validator node set (sidechain ID: {:?}) from base node for epoch {epoch}", + self.validator_node_sidechain_id, + ); + + let node_changes = self + .base_node_client + .get_validator_node_changes(epoch, self.validator_node_sidechain_id.as_ref()) + .await + .map_err(BaseLayerScannerError::BaseNodeError)?; + + info!( + target: LOG_TARGET, + "⛓️ {} validator node change(s) for epoch {}", node_changes.len(), epoch, + ); + + for node_change in node_changes { + match node_change { + ValidatorNodeChange::Add { + registration, + activation_epoch, + minimum_value_promise, + } => { + self.add_validator_node_registration( + Epoch(activation_epoch.as_u64()), + registration, + minimum_value_promise, + ) + .await?; + }, + ValidatorNodeChange::Remove { public_key } => { + self.remove_validator_node_registration(public_key, epoch).await?; + }, + } + } + + Ok(()) + } + async fn register_burnt_utxo( &mut self, output: TransactionOutput, block_info: &BlockInfo, ) -> Result<(), BaseLayerScannerError> { - let substate_id = SubstateId::UnclaimedConfidentialOutput( - UnclaimedConfidentialOutputAddress::try_from_commitment(output.commitment.as_bytes()).map_err(|e| - // Technically impossible, but anyway - BaseLayerScannerError::InvalidSideChainUtxoResponse(format!("Invalid commitment: {}", e)))?, - ); + let commitment_address = UnclaimedConfidentialOutputAddress::try_from_commitment(output.commitment.as_bytes()) + .map_err(|e| + // Technically impossible, but anyway + BaseLayerScannerError::InvalidSideChainUtxoResponse(format!("Invalid commitment: {}", e)))?; + let substate_id = SubstateId::UnclaimedConfidentialOutput(commitment_address); let consensus_constants = self.epoch_manager.get_base_layer_consensus_constants().await?; let epoch = consensus_constants.height_to_epoch(block_info.height); let Some(local_committee_info) = self.epoch_manager.get_local_committee_info(epoch).await.optional()? else { @@ -501,10 +536,10 @@ impl BaseLayerScanner { )) })?; - let substate = SubstateValue::UnclaimedConfidentialOutput(UnclaimedConfidentialOutput { + let substate = UnclaimedConfidentialOutput { commitment: output.commitment.clone(), encrypted_data, - }); + }; info!( target: LOG_TARGET, @@ -522,10 +557,10 @@ impl BaseLayerScanner { return Ok(()); } - BurntUtxo::new(substate_id, substate, block_info.height).insert(tx) + BurntUtxo::new(commitment_address, substate, block_info.height).insert(tx) }) .map_err(|source| BaseLayerScannerError::CouldNotRegisterBurntUtxo { - commitment: Box::new(output.commitment.clone()), + commitment: Box::new(output.commitment), source, })?; @@ -534,19 +569,19 @@ impl BaseLayerScanner { async fn add_validator_node_registration( &mut self, - height: u64, + activation_epoch: Epoch, registration: ValidatorNodeRegistration, minimum_value_promise: MicroMinotari, ) -> Result<(), BaseLayerScannerError> { info!( target: LOG_TARGET, - "⛓️ Validator node registration UTXO for {} found at height {}", + "⛓️ Validator node {} activated at {}", registration.public_key(), - height, + activation_epoch, ); self.epoch_manager - .add_validator_node_registration(height, registration, minimum_value_promise) + .add_validator_node_registration(activation_epoch, registration, minimum_value_promise) .await?; Ok(()) @@ -555,17 +590,16 @@ impl BaseLayerScanner { async fn remove_validator_node_registration( &mut self, public_key: PublicKey, - sidechain_id: Option, + deactivation_epoch: Epoch, ) -> Result<(), BaseLayerScannerError> { info!( target: LOG_TARGET, - "⛓️ Remove validator node registration for {}(side chain ID: {:?})", + "⛓️ Deactivating validator node registration for {}", public_key, - sidechain_id ); self.epoch_manager - .remove_validator_node_registration(public_key, sidechain_id) + .deactivate_validator_node(public_key, deactivation_epoch) .await?; Ok(()) diff --git a/applications/tari_dan_app_utilities/src/common.rs b/applications/tari_dan_app_utilities/src/common.rs new file mode 100644 index 000000000..7f99f016c --- /dev/null +++ b/applications/tari_dan_app_utilities/src/common.rs @@ -0,0 +1,26 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use anyhow::{bail, Context}; +use tari_base_node_client::BaseNodeClient; +use tari_common::configuration::Network; + +pub async fn verify_correct_network( + base_node_client: &mut TClient, + configured_network: Network, +) -> anyhow::Result<()> { + let base_node_network_byte = base_node_client.get_network().await?; + + let base_node_network = + Network::try_from(base_node_network_byte).context("base node returned an invalid network byte")?; + + if configured_network != base_node_network { + bail!( + "Base node network is not the same as the configured network. Base node network: {}, Configured network: \ + {}.", + base_node_network, + configured_network, + ); + } + Ok(()) +} diff --git a/applications/tari_dan_app_utilities/src/lib.rs b/applications/tari_dan_app_utilities/src/lib.rs index c25b26074..2fef04f92 100644 --- a/applications/tari_dan_app_utilities/src/lib.rs +++ b/applications/tari_dan_app_utilities/src/lib.rs @@ -21,6 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pub mod base_layer_scanner; +pub mod common; pub mod configuration; pub mod json_encoding; pub mod keypair; diff --git a/applications/tari_indexer/src/bootstrap.rs b/applications/tari_indexer/src/bootstrap.rs index 5393d8661..e0458f766 100644 --- a/applications/tari_indexer/src/bootstrap.rs +++ b/applications/tari_indexer/src/bootstrap.rs @@ -20,11 +20,12 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{fs, io, str::FromStr}; +use std::{convert::Infallible, fs, io, str::FromStr}; use anyhow::Context; use libp2p::identity; use minotari_app_utilities::identity_management; +use serde::Serialize; use tari_base_node_client::grpc::GrpcBaseNodeClient; use tari_common::{ configuration::bootstrap::{grpc_default_port, ApplicationType}, @@ -34,15 +35,19 @@ use tari_consensus::consensus_constants::ConsensusConstants; use tari_crypto::tari_utilities::ByteArray; use tari_dan_app_utilities::{ base_layer_scanner, + common::verify_correct_network, keypair::RistrettoKeypair, seed_peer::SeedPeer, template_manager::{self, implementation::TemplateManager}, }; -use tari_dan_common_types::PeerAddress; +use tari_dan_common_types::{layer_one_transaction::LayerOneTransactionDef, PeerAddress}; use tari_dan_p2p::TariMessagingSpec; use tari_dan_storage::global::GlobalDb; use tari_dan_storage_sqlite::global::SqliteGlobalDbAdapter; -use tari_epoch_manager::base_layer::{EpochManagerConfig, EpochManagerHandle}; +use tari_epoch_manager::{ + base_layer::{EpochManagerConfig, EpochManagerHandle}, + traits::LayerOneTransactionSubmitter, +}; use tari_networking::{MessagingMode, NetworkingHandle, RelayCircuitLimits, RelayReservationLimits, SwarmConfig}; use tari_shutdown::ShutdownSignal; use tari_state_store_sqlite::SqliteStateStore; @@ -62,12 +67,15 @@ pub async fn spawn_services( ensure_directories_exist(config)?; // GRPC client connection to base node - let base_node_client = GrpcBaseNodeClient::new(config.indexer.base_node_grpc_url.clone().unwrap_or_else(|| { - let port = grpc_default_port(ApplicationType::BaseNode, config.network); - format!("http://127.0.0.1:{port}") - .parse() - .expect("Default base node GRPC URL is malformed") - })); + let mut base_node_client = + GrpcBaseNodeClient::new(config.indexer.base_node_grpc_url.clone().unwrap_or_else(|| { + let port = grpc_default_port(ApplicationType::BaseNode, config.network); + format!("http://127.0.0.1:{port}") + .parse() + .expect("Default base node GRPC URL is malformed") + })); + + verify_correct_network(&mut base_node_client, config.network).await?; // Initialize networking let identity = identity::Keypair::sr25519_from_bytes(keypair.secret_key().as_bytes().to_vec()).map_err(|e| { @@ -129,6 +137,7 @@ pub async fn spawn_services( global_db.clone(), base_node_client.clone(), keypair.public_key().clone(), + NoopL1Submitter, shutdown.clone(), ); @@ -190,3 +199,16 @@ fn save_identities(config: &ApplicationConfig, identity: &RistrettoKeypair) -> R Ok(()) } + +struct NoopL1Submitter; + +impl LayerOneTransactionSubmitter for NoopL1Submitter { + type Error = Infallible; + + async fn submit_transaction( + &self, + _proof: LayerOneTransactionDef, + ) -> Result<(), Self::Error> { + Ok(()) + } +} diff --git a/applications/tari_swarm_daemon/src/process_manager/handle.rs b/applications/tari_swarm_daemon/src/process_manager/handle.rs index df6689436..11d9488c6 100644 --- a/applications/tari_swarm_daemon/src/process_manager/handle.rs +++ b/applications/tari_swarm_daemon/src/process_manager/handle.rs @@ -146,6 +146,20 @@ impl ProcessManagerHandle { Ok(intances.into_iter().find(|i| i.name == name)) } + pub async fn get_instance(&self, id: InstanceId) -> anyhow::Result> { + let (tx_reply, rx_reply) = oneshot::channel(); + // TODO: consider optimizing this by adding a new request variant + self.tx_request + .send(ProcessManagerRequest::ListInstances { + by_type: None, + reply: tx_reply, + }) + .await?; + + let intances = rx_reply.await??; + Ok(intances.into_iter().find(|i| i.id == id)) + } + // pub async fn list_minotari_nodes(&self) -> anyhow::Result> { // self.list_instances(Some(InstanceType::MinoTariNode)).await // } diff --git a/applications/tari_swarm_daemon/src/process_manager/manager.rs b/applications/tari_swarm_daemon/src/process_manager/manager.rs index cd7218ae9..c0943e3a8 100644 --- a/applications/tari_swarm_daemon/src/process_manager/manager.rs +++ b/applications/tari_swarm_daemon/src/process_manager/manager.rs @@ -123,6 +123,7 @@ impl ProcessManager { } // "Mine in" the validators and templates + // 10 for new epoch + 10 for BL scan lag self.mine(20).await?; } diff --git a/applications/tari_swarm_daemon/src/webserver/rpc/instances.rs b/applications/tari_swarm_daemon/src/webserver/rpc/instances.rs index 521603049..d847ad37f 100644 --- a/applications/tari_swarm_daemon/src/webserver/rpc/instances.rs +++ b/applications/tari_swarm_daemon/src/webserver/rpc/instances.rs @@ -29,7 +29,11 @@ pub async fn start_all(context: &HandlerContext, req: StartAllRequest) -> Result Ok(StartAllResponse { num_instances }) } -pub type StartInstanceRequest = String; +#[derive(Debug, Clone, Deserialize)] +pub struct StartInstanceRequest { + pub by_name: Option, + pub by_id: Option, +} #[derive(Debug, Clone, Serialize)] pub struct StartInstanceResponse { @@ -40,26 +44,37 @@ pub async fn start( context: &HandlerContext, req: StartInstanceRequest, ) -> Result { - let name = req; - - let instance = context - .process_manager() - .get_instance_by_name(name) - .await? - .ok_or_else(|| { - JsonRpcError::new( - JsonRpcErrorReason::ApplicationError(404), - "Instance not found".to_string(), + let instance = match (req.by_name, req.by_id) { + (_, Some(id)) => context.process_manager().get_instance(id).await?, + (Some(name), None) => context.process_manager().get_instance_by_name(name).await?, + (None, None) => { + return Err(JsonRpcError::new( + JsonRpcErrorReason::InvalidParams, + "Either `by_name` or `by_id` must be provided".to_string(), serde_json::Value::Null, ) - })?; + .into()); + }, + }; + + let instance = instance.ok_or_else(|| { + JsonRpcError::new( + JsonRpcErrorReason::ApplicationError(404), + "Instance not found".to_string(), + serde_json::Value::Null, + ) + })?; context.process_manager().start_instance(instance.id).await?; Ok(StartInstanceResponse { success: true }) } -pub type StopInstanceRequest = String; +#[derive(Debug, Clone, Deserialize)] +pub struct StopInstanceRequest { + pub by_name: Option, + pub by_id: Option, +} #[derive(Debug, Clone, Serialize)] pub struct StopInstanceResponse { @@ -67,19 +82,26 @@ pub struct StopInstanceResponse { } pub async fn stop(context: &HandlerContext, req: StopInstanceRequest) -> Result { - let name = req; - - let instance = context - .process_manager() - .get_instance_by_name(name) - .await? - .ok_or_else(|| { - JsonRpcError::new( - JsonRpcErrorReason::ApplicationError(404), - "Instance not found".to_string(), + let instance = match (req.by_name, req.by_id) { + (_, Some(id)) => context.process_manager().get_instance(id).await?, + (Some(name), None) => context.process_manager().get_instance_by_name(name).await?, + (None, None) => { + return Err(JsonRpcError::new( + JsonRpcErrorReason::InvalidParams, + "Either `by_name` or `by_id` must be provided".to_string(), serde_json::Value::Null, ) - })?; + .into()); + }, + }; + + let instance = instance.ok_or_else(|| { + JsonRpcError::new( + JsonRpcErrorReason::ApplicationError(404), + "Instance not found".to_string(), + serde_json::Value::Null, + ) + })?; context.process_manager().stop_instance(instance.id).await?; diff --git a/applications/tari_swarm_daemon/src/webserver/server.rs b/applications/tari_swarm_daemon/src/webserver/server.rs index 8bc14afc9..e46f8535b 100644 --- a/applications/tari_swarm_daemon/src/webserver/server.rs +++ b/applications/tari_swarm_daemon/src/webserver/server.rs @@ -113,9 +113,9 @@ async fn json_rpc_handler(Extension(context): Extension>, va "add_asset_wallet" | "add_wallet_daemon" => call_handler(context, value, rpc::dan_wallets::create).await, "add_indexer" => call_handler(context, value, rpc::indexers::create).await, "add_validator_node" => call_handler(context, value, rpc::validator_nodes::create).await, - "start" => call_handler(context, value, rpc::instances::start).await, + "start_instance" => call_handler(context, value, rpc::instances::start).await, "start_all" => call_handler(context, value, rpc::instances::start_all).await, - "stop" => call_handler(context, value, rpc::instances::stop).await, + "stop_instance" => call_handler(context, value, rpc::instances::stop).await, "stop_all" => call_handler(context, value, rpc::instances::stop_all).await, "list_instances" => call_handler(context, value, rpc::instances::list).await, "delete_data" => call_handler(context, value, rpc::instances::delete_data).await, diff --git a/applications/tari_swarm_daemon/webui/src/components/MinotariNodes.tsx b/applications/tari_swarm_daemon/webui/src/components/MinotariNodes.tsx index b29b641fe..ff86fc06b 100644 --- a/applications/tari_swarm_daemon/webui/src/components/MinotariNodes.tsx +++ b/applications/tari_swarm_daemon/webui/src/components/MinotariNodes.tsx @@ -14,9 +14,11 @@ export default function MinotariNodes(props: Props) { const [isLoading, setIsLoading] = React.useState(true); + const reload = () => + jsonRpc("list_instances", { by_type: "MinoTariNode" }).then((nodes: any) => setNodes(nodes.instances)); + React.useEffect(() => { - jsonRpc("list_instances", { by_type: "MinoTariNode" }).then((nodes: any) => setNodes(nodes.instances)) - .then(() => setIsLoading(false)); + reload().then(() => setIsLoading(false)); }, []); if (isLoading) { @@ -26,7 +28,7 @@ export default function MinotariNodes(props: Props) { return (
{nodes!.map((node: any, i: number) => ( - + ))}
); @@ -34,15 +36,18 @@ export default function MinotariNodes(props: Props) { function Node(props: any) { const onStart = () => { - jsonRpc("start_instance", { instance_id: props.id }); + jsonRpc("start_instance", { by_id: props.id }) + .then(props.onReload); }; const onStop = () => { - jsonRpc("stop_instance", { instance_id: props.id }); + jsonRpc("stop_instance", { by_id: props.id }) + .then(props.onReload); }; const onDeleteData = () => { - jsonRpc("delete_instance_data", { instance_id: props.id }); + jsonRpc("delete_instance_data", { instance_id: props.id }) + .then(props.onReload); }; return ( diff --git a/applications/tari_swarm_daemon/webui/src/components/MinotariWallet.tsx b/applications/tari_swarm_daemon/webui/src/components/MinotariWallet.tsx index ed05761cc..49816f0ff 100644 --- a/applications/tari_swarm_daemon/webui/src/components/MinotariWallet.tsx +++ b/applications/tari_swarm_daemon/webui/src/components/MinotariWallet.tsx @@ -15,11 +15,14 @@ export default function MinotariWallet(props: Props) { const [danWallets, setDanWallets] = React.useState(null); const [isLoading, setIsLoading] = React.useState(true); - - React.useEffect(() => { + const reload = () => jsonRpc("list_instances", { by_type: "MinoTariConsoleWallet" }).then((wallets: any) => setWallets(wallets.instances)) .then(() => jsonRpc("list_instances", { by_type: "TariWalletDaemon" }).then((wallets: any) => setDanWallets(wallets.instances))) .then(() => setIsLoading(false)); + + + React.useEffect(() => { + reload(); }, []); if (isLoading) { @@ -29,7 +32,7 @@ export default function MinotariWallet(props: Props) { return (
{wallets!.map((wallet: any, i: number) => ( - + ))}
); @@ -37,15 +40,18 @@ export default function MinotariWallet(props: Props) { function Wallet(props: any) { const onStart = () => { - jsonRpc("start_instance", { instance_id: props.id }); + jsonRpc("start_instance", { by_id: props.id }) + .then(props.onReload); }; const onStop = () => { - jsonRpc("stop_instance", { instance_id: props.id }); + jsonRpc("stop_instance", { by_id: props.id }) + .then(props.onReload); }; const onDeleteData = () => { - jsonRpc("delete_instance_data", { instance_id: props.id }); + jsonRpc("delete_instance_data", { instance_id: props.id }) + .then(props.onReload); }; const wallet = props.danWallets[0]; diff --git a/applications/tari_swarm_daemon/webui/src/routes/Main.tsx b/applications/tari_swarm_daemon/webui/src/routes/Main.tsx index 0b4918ca1..77881dc11 100644 --- a/applications/tari_swarm_daemon/webui/src/routes/Main.tsx +++ b/applications/tari_swarm_daemon/webui/src/routes/Main.tsx @@ -49,6 +49,7 @@ function ExtraInfoVN({ name, url, addTxToPool, autoRefresh, state, horizontal }: horizontal: boolean }) { const [epochManagerStats, setEpochManagerStats] = useState(null); + const [consensusStatus, setConsensusStatus] = useState(null); const [pool, setPool] = useState([]); const [copied, setCopied] = useState(null); const [missingTxStates, setMissingTxStates] = useState({}); // {tx_id: [vn1, vn2, ...]} @@ -65,11 +66,15 @@ function ExtraInfoVN({ name, url, addTxToPool, autoRefresh, state, horizontal }: }, [tick, autoRefresh]); useEffect(() => { jsonRpc2(url, "get_epoch_manager_stats").then((resp) => { - // setRow(resp.committee_info.shard + 1); setEpochManagerStats(resp); }).catch((resp) => { console.error("err", resp); }); + jsonRpc2(url, "get_consensus_status").then((resp) => { + setConsensusStatus(resp); + }).catch((resp) => { + console.error("err", resp); + }); jsonRpc2(url, "get_tx_pool").then((resp) => { setPool(resp.tx_pool); addTxToPool(resp.tx_pool.filter((tx: any) => Boolean(tx?.transaction)).map((tx: any) => tx.transaction.id).sort()); @@ -194,11 +199,17 @@ function ExtraInfoVN({ name, url, addTxToPool, autoRefresh, state, horizontal }: const { committee_info: committeeInfo, - current_block_height: height, - current_epoch: epoch, + current_block_height: baseLayerheight, + current_epoch: baseLayerEpoch, start_epoch: startEpoch, } = epochManagerStats || {} as any; + const { + height: consensusHeight, + epoch: consensusEpoch, + state: consensusState, + } = consensusStatus || {} as any; + return (

@@ -209,13 +220,14 @@ function ExtraInfoVN({ name, url, addTxToPool, autoRefresh, state, horizontal }: gridTemplateRows: "auto auto auto auto auto", }}>
Shard Group
-
Height
-
Epoch
+
Base layer
+
Consensus
Public key
Peer id
{committeeInfo ? `${committeeInfo?.shard_group.start}-${committeeInfo?.shard_group.end_inclusive} (${committeeInfo?.num_shard_group_members} members)` : "--"}
-
{height}
-
{epoch}{startEpoch ? ` (since epoch ${startEpoch})` : " "}
+
Height: {baseLayerheight}, + Epoch: {baseLayerEpoch} {startEpoch ? ` (since epoch ${startEpoch})` : " "}
+
Height: {consensusHeight}, Epoch: {consensusEpoch}, Status: {consensusState}
{publicKey}
{peerId}
@@ -296,11 +308,11 @@ function ShowInfo(params: any) { }; const handleOnStart = () => { - jsonRpc("start", name).then(onReload); + jsonRpc("start_instance", { by_name: name }).then(onReload); }; const handleOnStop = () => { - jsonRpc("stop", name).then(onReload); + jsonRpc("stop_instance", { by_name: name }).then(onReload); }; const handleDeleteData = () => { diff --git a/applications/tari_validator_node/Cargo.toml b/applications/tari_validator_node/Cargo.toml index b5a7c4e55..dcede3d1f 100644 --- a/applications/tari_validator_node/Cargo.toml +++ b/applications/tari_validator_node/Cargo.toml @@ -48,7 +48,6 @@ sqlite_message_logger = { workspace = true } libp2p = { workspace = true } anyhow = { workspace = true } -async-trait = { workspace = true } axum = { workspace = true } axum-jrpc = { workspace = true, features = ["anyhow_error"] } clap = { workspace = true, features = ["env"] } @@ -79,6 +78,7 @@ tokio = { workspace = true, features = [ "time", "sync", "rt-multi-thread", + "fs" ] } tower-http = { workspace = true, features = ["default", "cors"] } url = { workspace = true, features = ["serde"] } diff --git a/applications/tari_validator_node/log4rs_sample.yml b/applications/tari_validator_node/log4rs_sample.yml index e17d54083..48e3395e9 100644 --- a/applications/tari_validator_node/log4rs_sample.yml +++ b/applications/tari_validator_node/log4rs_sample.yml @@ -149,7 +149,7 @@ loggers: additive: false tari::dan: - level: info + level: debug appenders: - dan_layer - stdout @@ -170,19 +170,6 @@ loggers: appenders: - engine - # Route log events sent to the "comms" logger to the "network" appender - comms: - level: debug - appenders: - - network - - # Route log events sent to the "yamux" logger to the "network" appender - yamux: - level: info - appenders: - - network - - # spits out the whole wasm if in debug mode regalloc: level: info diff --git a/applications/tari_validator_node/src/bootstrap.rs b/applications/tari_validator_node/src/bootstrap.rs index 6c0af34bf..2e4186823 100644 --- a/applications/tari_validator_node/src/bootstrap.rs +++ b/applications/tari_validator_node/src/bootstrap.rs @@ -34,7 +34,7 @@ use tari_common::{ configuration::Network, exit_codes::{ExitCode, ExitError}, }; -use tari_common_types::types::FixedHash; +use tari_common_types::{epoch::VnEpoch, types::FixedHash}; use tari_consensus::consensus_constants::ConsensusConstants; #[cfg(not(feature = "metrics"))] use tari_consensus::traits::hooks::NoopHooks; @@ -100,13 +100,17 @@ use tari_template_lib::{ }; use tari_transaction::Transaction; use tari_validator_node_rpc::client::TariValidatorNodeRpcClientFactory; -use tokio::{sync::mpsc, task::JoinHandle}; +use tokio::{ + sync::{broadcast, mpsc}, + task::JoinHandle, +}; #[cfg(feature = "metrics")] use crate::consensus::metrics::PrometheusConsensusMetrics; use crate::{ consensus::{self, ConsensusHandle, TariDanBlockTransactionExecutor}, dry_run_transaction_processor::DryRunTransactionProcessor, + file_l1_submitter::FileLayerOneSubmitter, p2p::{ create_tari_validator_node_rpc_service, services::{ @@ -229,11 +233,12 @@ pub async fn spawn_services( global_db.clone(), base_node_client.clone(), keypair.public_key().clone(), + FileLayerOneSubmitter::new(config.get_layer_one_transaction_base_path()), shutdown.clone(), ); // Create registration file - if let Err(err) = create_registration_file(config, &epoch_manager, &keypair).await { + if let Err(err) = create_registration_file(config, &epoch_manager, sidechain_id.as_ref(), &keypair).await { error!(target: LOG_TARGET, "Error creating registration file: {}", err); if epoch_manager_join_handle.is_finished() { return epoch_manager_join_handle @@ -262,9 +267,11 @@ pub async fn spawn_services( per_log_cost: 1, }; + let (tx_hotstuff_events, _) = broadcast::channel(100); // Consensus gossip let (consensus_gossip_service, join_handle, rx_consensus_gossip_messages) = consensus_gossip::spawn( epoch_manager.subscribe(), + tx_hotstuff_events.subscribe(), networking.clone(), rx_consensus_gossip_messages, ); @@ -322,6 +329,7 @@ pub async fn spawn_services( metrics, shutdown.clone(), transaction_executor, + tx_hotstuff_events, consensus_constants.clone(), ) .await; @@ -412,6 +420,7 @@ pub async fn spawn_services( async fn create_registration_file( config: &ApplicationConfig, epoch_manager: &EpochManagerHandle, + sidechain_pk: Option<&RistrettoPublicKey>, keypair: &RistrettoKeypair, ) -> Result<(), anyhow::Error> { let fee_claim_public_key = config.validator_node.fee_claim_public_key.clone(); @@ -420,7 +429,17 @@ async fn create_registration_file( .await .context("set_fee_claim_public_key failed when creating registration file")?; - let signature = ValidatorNodeSignature::sign(keypair.secret_key(), &fee_claim_public_key, b""); + // TODO: this signature can be replayed since it is not bound to any single use data (e.g. epoch). This + // could be used to re-register a validator node after that node has exited. However, this is costly and AFAICS + // could only potentially do reputational damage since an attacker would not be able to operate as the node + // (missed propsals etc). Suggest: perhaps a JSON-rpc call that triggers this file to be re-signed + // with the current epoch. File system access is still required to read the updated signature. + let signature = ValidatorNodeSignature::sign( + keypair.secret_key(), + sidechain_pk, + &fee_claim_public_key, + VnEpoch::zero(), + ); let registration = ValidatorRegistrationFile { signature, @@ -513,10 +532,7 @@ where TTx::Addr: NodeAddressable + Serialize, { // Assume that if the public identity resource exists, then the rest of the state has been bootstrapped - if SubstateRecord::exists( - &**tx, - &VersionedSubstateId::new(PUBLIC_IDENTITY_RESOURCE_ADDRESS.into(), 0), - )? { + if SubstateRecord::exists(&**tx, &VersionedSubstateId::new(PUBLIC_IDENTITY_RESOURCE_ADDRESS, 0))? { return Ok(()); } diff --git a/applications/tari_validator_node/src/config.rs b/applications/tari_validator_node/src/config.rs index e00911b02..4f019231d 100644 --- a/applications/tari_validator_node/src/config.rs +++ b/applications/tari_validator_node/src/config.rs @@ -61,6 +61,15 @@ impl ApplicationConfig { config.validator_node.set_base_path(config.common.base_path()); Ok(config) } + + pub fn get_layer_one_transaction_base_path(&self) -> PathBuf { + if self.validator_node.layer_one_transaction_path.is_absolute() { + return self.validator_node.layer_one_transaction_path.clone(); + } + self.common + .base_path() + .join(&self.validator_node.layer_one_transaction_path) + } } #[derive(Debug, Serialize, Deserialize, Clone)] @@ -107,6 +116,8 @@ pub struct ValidatorNodeConfig { pub template_sidechain_id: Option, /// The burnt utxo sidechain id pub burnt_utxo_sidechain_id: Option, + /// The path to store layer one transactions. + pub layer_one_transaction_path: PathBuf, } impl ValidatorNodeConfig { @@ -150,6 +161,7 @@ impl Default for ValidatorNodeConfig { validator_node_sidechain_id: None, template_sidechain_id: None, burnt_utxo_sidechain_id: None, + layer_one_transaction_path: PathBuf::from("data/layer_one_transactions"), } } } diff --git a/applications/tari_validator_node/src/consensus/mod.rs b/applications/tari_validator_node/src/consensus/mod.rs index dd4954b66..5d3a68bfe 100644 --- a/applications/tari_validator_node/src/consensus/mod.rs +++ b/applications/tari_validator_node/src/consensus/mod.rs @@ -51,7 +51,7 @@ mod spec; pub use block_transaction_executor::*; pub use handle::*; pub use signature_service::*; -use tari_consensus::consensus_constants::ConsensusConstants; +use tari_consensus::{consensus_constants::ConsensusConstants, hotstuff::HotstuffEvent}; use crate::{p2p::NopLogger, transaction_validators::WithContext}; @@ -73,13 +73,13 @@ pub async fn spawn( TariDanTransactionProcessor>, ConsensusTransactionValidator, >, + tx_hotstuff_events: broadcast::Sender, consensus_constants: ConsensusConstants, ) -> (JoinHandle>, ConsensusHandle) { let (tx_new_transaction, rx_new_transactions) = mpsc::channel(10); let leader_strategy = RoundRobinLeaderStrategy::new(); let transaction_pool = TransactionPool::new(); - let (tx_hotstuff_events, _) = broadcast::channel(100); let hs_config = HotstuffConfig { network, diff --git a/applications/tari_validator_node/src/file_l1_submitter.rs b/applications/tari_validator_node/src/file_l1_submitter.rs new file mode 100644 index 000000000..c7be7aaae --- /dev/null +++ b/applications/tari_validator_node/src/file_l1_submitter.rs @@ -0,0 +1,43 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use std::{io, path::PathBuf}; + +use log::*; +use rand::{rngs::OsRng, RngCore}; +use serde::Serialize; +use tari_dan_common_types::layer_one_transaction::LayerOneTransactionDef; +use tari_epoch_manager::traits::LayerOneTransactionSubmitter; +use tokio::fs; + +const LOG_TARGET: &str = "tari::validator_node::file_layer_one_submitter"; + +#[derive(Debug, Clone)] +pub struct FileLayerOneSubmitter { + path: PathBuf, +} + +impl FileLayerOneSubmitter { + pub fn new(path: PathBuf) -> Self { + Self { path } + } +} + +impl LayerOneTransactionSubmitter for FileLayerOneSubmitter { + type Error = io::Error; + + async fn submit_transaction( + &self, + proof: LayerOneTransactionDef, + ) -> Result<(), Self::Error> { + fs::create_dir_all(&self.path).await?; + let id = OsRng.next_u64(); + let file_name = format!("{}-{}.json", proof.proof_type, id); + let path = self.path.join(file_name); + info!(target: LOG_TARGET, "Saving layer one transaction proof to {}", path.display()); + let file = fs::File::create(path).await?; + let mut file = file.into_std().await; + serde_json::to_writer_pretty(&mut file, &proof)?; + Ok(()) + } +} diff --git a/applications/tari_validator_node/src/json_rpc/handlers.rs b/applications/tari_validator_node/src/json_rpc/handlers.rs index 0ab0ee7a2..0e34bdc09 100644 --- a/applications/tari_validator_node/src/json_rpc/handlers.rs +++ b/applications/tari_validator_node/src/json_rpc/handlers.rs @@ -31,7 +31,7 @@ use log::*; use serde_json::{self as json, json}; use tari_base_node_client::{grpc::GrpcBaseNodeClient, BaseNodeClient}; use tari_dan_app_utilities::{keypair::RistrettoKeypair, template_manager::interface::TemplateManagerHandle}; -use tari_dan_common_types::{optional::Optional, public_key_to_peer_id, PeerAddress, SubstateAddress}; +use tari_dan_common_types::{optional::Optional, public_key_to_peer_id, Epoch, PeerAddress, SubstateAddress}; use tari_dan_p2p::TariMessagingSpec; use tari_dan_storage::{ consensus_models::{Block, ExecutedTransaction, LeafBlock, QuorumDecision, SubstateRecord, TransactionRecord}, @@ -50,6 +50,8 @@ use tari_validator_node_client::types::{ DryRunTransactionFinalizeResult, GetAllVnsRequest, GetAllVnsResponse, + GetBaseLayerEpochChangesRequest, + GetBaseLayerEpochChangesResponse, GetBlockRequest, GetBlockResponse, GetBlocksCountResponse, @@ -59,6 +61,7 @@ use tari_validator_node_client::types::{ GetCommitteeResponse, GetCommsStatsResponse, GetConnectionsResponse, + GetConsensusStatusResponse, GetEpochManagerStatsResponse, GetFilteredBlocksCountRequest, GetIdentityResponse, @@ -91,6 +94,7 @@ use tari_validator_node_client::types::{ }; use crate::{ + consensus::ConsensusHandle, dry_run_transaction_processor::DryRunTransactionProcessor, json_rpc::jrpc_errors::{internal_error, not_found}, p2p::services::mempool::MempoolHandle, @@ -104,6 +108,7 @@ pub struct JsonRpcHandlers { mempool: MempoolHandle, template_manager: TemplateManagerHandle, epoch_manager: EpochManagerHandle, + consensus: ConsensusHandle, networking: NetworkingHandle, base_node_client: GrpcBaseNodeClient, state_store: SqliteStateStore, @@ -116,6 +121,7 @@ impl JsonRpcHandlers { keypair: services.keypair.clone(), mempool: services.mempool.clone(), epoch_manager: services.epoch_manager.clone(), + consensus: services.consensus_handle.clone(), template_manager: services.template_manager.clone(), networking: services.networking.clone(), base_node_client, @@ -703,22 +709,16 @@ impl JsonRpcHandlers { pub async fn get_shard_key(&self, value: JsonRpcExtractor) -> JrpcResult { let answer_id = value.get_answer_id(); let request = value.parse_params::()?; - if let Ok(shard_key) = self - .base_node_client() - .get_shard_key(request.height, &request.public_key) + let maybe_vn = self + .epoch_manager + .get_our_validator_node(request.epoch) .await - { - Ok(JsonRpcResponse::success(answer_id, GetShardKeyResponse { shard_key })) - } else { - Err(JsonRpcResponse::error( - answer_id, - JsonRpcError::new( - JsonRpcErrorReason::InvalidParams, - "Something went wrong".to_string(), - json::Value::Null, - ), - )) - } + .optional() + .map_err(internal_error(answer_id))?; + + Ok(JsonRpcResponse::success(answer_id, GetShardKeyResponse { + shard_key: maybe_vn.map(|vn| vn.shard_key), + })) } pub async fn get_committee(&self, value: JsonRpcExtractor) -> JrpcResult { @@ -759,6 +759,59 @@ impl JsonRpcHandlers { } } + pub async fn get_base_layer_validator_changes(&self, value: JsonRpcExtractor) -> JrpcResult { + let answer_id = value.get_answer_id(); + let GetBaseLayerEpochChangesRequest { start_epoch, end_epoch } = + value.parse_params::()?; + let mut changes = Vec::new(); + fn convert_change( + change: tari_core::base_node::comms_interface::ValidatorNodeChange, + ) -> types::ValidatorNodeChange { + match change { + tari_core::base_node::comms_interface::ValidatorNodeChange::Add { + registration, + activation_epoch, + minimum_value_promise, + } => types::ValidatorNodeChange::Add { + public_key: registration.public_key().clone(), + activation_epoch: Epoch(activation_epoch.as_u64()), + minimum_value_promise: minimum_value_promise.as_u64(), + }, + tari_core::base_node::comms_interface::ValidatorNodeChange::Remove { public_key } => { + types::ValidatorNodeChange::Remove { + public_key: public_key.clone(), + } + }, + } + } + for epoch in start_epoch.as_u64()..=end_epoch.as_u64() { + let epoch = Epoch(epoch); + let vns = self + .base_node_client() + .get_validator_node_changes(epoch, None) + .await + .map_err(internal_error(answer_id))?; + changes.push((epoch, vns.into_iter().map(convert_change).collect())); + } + + Ok(JsonRpcResponse::success(answer_id, GetBaseLayerEpochChangesResponse { + changes, + })) + } + + pub async fn get_consensus_status(&self, value: JsonRpcExtractor) -> JrpcResult { + let answer_id = value.get_answer_id(); + let epoch = self.consensus.current_epoch(); + let height = self.consensus.current_view().get_height(); + let state = self.consensus.get_current_state(); + + Ok(JsonRpcResponse::success(answer_id, GetConsensusStatusResponse { + epoch, + height, + state: format!("{:?}", state), + })) + } + pub async fn get_validator_fees(&self, value: JsonRpcExtractor) -> JrpcResult { let answer_id = value.get_answer_id(); let request = value.parse_params::()?; diff --git a/applications/tari_validator_node/src/json_rpc/server.rs b/applications/tari_validator_node/src/json_rpc/server.rs index dcfb68cc1..e392b92f5 100644 --- a/applications/tari_validator_node/src/json_rpc/server.rs +++ b/applications/tari_validator_node/src/json_rpc/server.rs @@ -91,6 +91,8 @@ async fn handler(Extension(handlers): Extension>, value: Js "get_shard_key" => handlers.get_shard_key(value).await, "get_committee" => handlers.get_committee(value).await, "get_all_vns" => handlers.get_all_vns(value).await, + "get_base_layer_validator_changes" => handlers.get_base_layer_validator_changes(value).await, + "get_consensus_status" => handlers.get_consensus_status(value).await, // "get_network_committees" => handlers.get_network_committees(value).await, "get_fees" => handlers.get_validator_fees(value).await, // Comms diff --git a/applications/tari_validator_node/src/lib.rs b/applications/tari_validator_node/src/lib.rs index 8330abe0b..bce00550a 100644 --- a/applications/tari_validator_node/src/lib.rs +++ b/applications/tari_validator_node/src/lib.rs @@ -35,6 +35,7 @@ mod p2p; mod substate_resolver; mod virtual_substate; +mod file_l1_submitter; pub mod transaction_validators; mod validator; mod validator_registration_file; @@ -49,7 +50,7 @@ use tari_common::{ exit_codes::{ExitCode, ExitError}, }; use tari_consensus::consensus_constants::ConsensusConstants; -use tari_dan_app_utilities::keypair::setup_keypair_prompt; +use tari_dan_app_utilities::{common::verify_correct_network, keypair::setup_keypair_prompt}; use tari_dan_common_types::SubstateAddress; use tari_dan_storage::global::DbFactory; use tari_dan_storage_sqlite::SqliteDbFactory; @@ -119,7 +120,8 @@ pub async fn run_validator_node( let metrics_registry = create_metrics_registry(keypair.public_key()); let consensus_constants = ConsensusConstants::from(config.network); - let base_node_client = create_base_layer_client(config).await?; + let mut base_node_client = create_base_layer_client(config).await?; + verify_correct_network(&mut base_node_client, config.network).await?; let services = spawn_services( config, shutdown_signal.clone(), diff --git a/applications/tari_validator_node/src/p2p/rpc/service_impl.rs b/applications/tari_validator_node/src/p2p/rpc/service_impl.rs index 7094bb478..23ac76148 100644 --- a/applications/tari_validator_node/src/p2p/rpc/service_impl.rs +++ b/applications/tari_validator_node/src/p2p/rpc/service_impl.rs @@ -96,7 +96,7 @@ impl ValidatorNodeRpcServiceImpl { } } -#[async_trait::async_trait] +#[tari_rpc_framework::async_trait] impl ValidatorNodeRpcService for ValidatorNodeRpcServiceImpl { async fn submit_transaction( &self, @@ -299,9 +299,9 @@ impl ValidatorNodeRpcService for ValidatorNodeRpcServiceImpl { .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; let Some(block_id) = block_ids.pop() else { - return Err(RpcStatus::not_found( - "Block not found with epoch={epoch},height={height}", - )); + return Err(RpcStatus::not_found(format!( + "Block not found with epoch={epoch},height=0" + ))); }; if !block_ids.is_empty() { return Err(RpcStatus::conflict(format!( diff --git a/applications/tari_validator_node/src/p2p/services/consensus_gossip/handle.rs b/applications/tari_validator_node/src/p2p/services/consensus_gossip/handle.rs index a700c32b9..ddeed9a28 100644 --- a/applications/tari_validator_node/src/p2p/services/consensus_gossip/handle.rs +++ b/applications/tari_validator_node/src/p2p/services/consensus_gossip/handle.rs @@ -49,7 +49,7 @@ impl ConsensusGossipHandle { } } - pub async fn multicast( + pub async fn publish( &mut self, shard_group: ShardGroup, message: HotstuffMessage, diff --git a/applications/tari_validator_node/src/p2p/services/consensus_gossip/initializer.rs b/applications/tari_validator_node/src/p2p/services/consensus_gossip/initializer.rs index 07e0ec77c..0071204b6 100644 --- a/applications/tari_validator_node/src/p2p/services/consensus_gossip/initializer.rs +++ b/applications/tari_validator_node/src/p2p/services/consensus_gossip/initializer.rs @@ -22,6 +22,7 @@ use libp2p::{gossipsub, PeerId}; use log::*; +use tari_consensus::hotstuff::HotstuffEvent; use tari_dan_p2p::{proto, TariMessagingSpec}; use tari_epoch_manager::EpochManagerEvent; use tari_networking::NetworkingHandle; @@ -37,6 +38,7 @@ const LOG_TARGET: &str = "tari::validator_node::consensus_gossip::initializer"; pub fn spawn( epoch_manager_events: broadcast::Receiver, + consensus_events: broadcast::Receiver, networking: NetworkingHandle, rx_gossip: mpsc::UnboundedReceiver<(PeerId, gossipsub::Message)>, ) -> ( @@ -46,8 +48,13 @@ pub fn spawn( ) { let (tx_consensus_gossip, rx_consensus_gossip) = mpsc::channel(10); - let consensus_gossip = - ConsensusGossipService::new(epoch_manager_events, networking.clone(), rx_gossip, tx_consensus_gossip); + let consensus_gossip = ConsensusGossipService::new( + epoch_manager_events, + consensus_events, + networking.clone(), + rx_gossip, + tx_consensus_gossip, + ); let handle = ConsensusGossipHandle::new(networking); let join_handle = task::spawn(consensus_gossip.run()); diff --git a/applications/tari_validator_node/src/p2p/services/consensus_gossip/service.rs b/applications/tari_validator_node/src/p2p/services/consensus_gossip/service.rs index 874717cfe..2f4e305c1 100644 --- a/applications/tari_validator_node/src/p2p/services/consensus_gossip/service.rs +++ b/applications/tari_validator_node/src/p2p/services/consensus_gossip/service.rs @@ -22,6 +22,7 @@ use libp2p::{gossipsub, PeerId}; use log::*; +use tari_consensus::hotstuff::HotstuffEvent; use tari_dan_common_types::ShardGroup; use tari_dan_p2p::{proto, TariMessagingSpec}; use tari_epoch_manager::EpochManagerEvent; @@ -38,6 +39,7 @@ pub const TOPIC_PREFIX: &str = "consensus"; #[derive(Debug)] pub(super) struct ConsensusGossipService { epoch_manager_events: broadcast::Receiver, + consensus_events: broadcast::Receiver, is_subscribed: Option, networking: NetworkingHandle, codec: ProstCodec, @@ -48,12 +50,14 @@ pub(super) struct ConsensusGossipService { impl ConsensusGossipService { pub fn new( epoch_manager_events: broadcast::Receiver, + consensus_events: broadcast::Receiver, networking: NetworkingHandle, rx_gossip: mpsc::UnboundedReceiver<(PeerId, gossipsub::Message)>, tx_consensus_gossip: mpsc::Sender<(PeerId, proto::consensus::HotStuffMessage)>, ) -> Self { Self { epoch_manager_events, + consensus_events, is_subscribed: None, networking, codec: ProstCodec::default(), @@ -63,17 +67,25 @@ impl ConsensusGossipService { } pub async fn run(mut self) -> anyhow::Result<()> { + let mut initial_subscription = false; loop { tokio::select! { + Ok(HotstuffEvent::EpochChanged{ registered_shard_group, .. }) = self.consensus_events.recv() => { + if let Some(shard_group) = registered_shard_group{ + self.subscribe(shard_group).await?; + } + }, Some(msg) = self.rx_gossip.recv() => { if let Err(err) = self.handle_incoming_gossip_message(msg).await { warn!(target: LOG_TARGET, "Consensus gossip service error: {}", err); } }, - Ok(event) = self.epoch_manager_events.recv() => { - let EpochManagerEvent::EpochChanged{ registered_shard_group, ..} = event ; - if let Some(shard_group) = registered_shard_group{ - self.subscribe(shard_group).await?; + Ok(EpochManagerEvent::EpochChanged{ registered_shard_group, .. }) = self.epoch_manager_events.recv() => { + if !initial_subscription { + if let Some(shard_group) = registered_shard_group { + self.subscribe(shard_group).await?; + initial_subscription = true; + } } }, else => { diff --git a/applications/tari_validator_node/src/p2p/services/mempool/service.rs b/applications/tari_validator_node/src/p2p/services/mempool/service.rs index 45a026c22..4c5c0cba0 100644 --- a/applications/tari_validator_node/src/p2p/services/mempool/service.rs +++ b/applications/tari_validator_node/src/p2p/services/mempool/service.rs @@ -24,11 +24,12 @@ use std::{collections::HashSet, fmt::Display, iter}; use libp2p::{gossipsub, PeerId}; use log::*; +use tari_consensus::hotstuff::HotstuffEvent; use tari_dan_common_types::{optional::Optional, NumPreshards, PeerAddress, ShardGroup, ToSubstateAddress}; use tari_dan_p2p::{DanMessage, NewTransactionMessage, TariMessagingSpec}; use tari_dan_storage::{consensus_models::TransactionRecord, StateStore}; use tari_engine_types::commit_result::RejectReason; -use tari_epoch_manager::{base_layer::EpochManagerHandle, EpochManagerEvent, EpochManagerReader}; +use tari_epoch_manager::{base_layer::EpochManagerHandle, EpochManagerReader}; use tari_networking::NetworkingHandle; use tari_state_store_sqlite::SqliteStateStore; use tari_transaction::{Transaction, TransactionId}; @@ -90,7 +91,7 @@ where TValidator: Validator anyhow::Result<()> { - let mut events = self.epoch_manager.subscribe(); + let mut consensus_events = self.consensus_handle.subscribe_to_hotstuff_events(); loop { tokio::select! { @@ -100,11 +101,13 @@ where TValidator: Validator { - let EpochManagerEvent::EpochChanged { epoch, registered_shard_group} = event; + Ok(HotstuffEvent::EpochChanged { epoch, registered_shard_group}) = consensus_events.recv() => { if let Some(shard_group) = registered_shard_group { info!(target: LOG_TARGET, "Mempool service subscribing transaction messages for {shard_group} in {epoch}"); self.gossip.subscribe(shard_group).await?; + } else { + info!(target: LOG_TARGET, "Not registered for epoch {epoch}, unsubscribing from gossip"); + self.gossip.unsubscribe().await?; } }, diff --git a/applications/tari_validator_node/src/p2p/services/mempool/traits.rs b/applications/tari_validator_node/src/p2p/services/mempool/traits.rs index 139acb9f4..ba39cdce8 100644 --- a/applications/tari_validator_node/src/p2p/services/mempool/traits.rs +++ b/applications/tari_validator_node/src/p2p/services/mempool/traits.rs @@ -1,9 +1,8 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use std::collections::HashSet; +use std::{collections::HashSet, future::Future}; -use async_trait::async_trait; use indexmap::IndexMap; use tari_dan_common_types::{Epoch, SubstateRequirement}; use tari_engine_types::{ @@ -17,20 +16,19 @@ pub struct ResolvedSubstates { pub unresolved_foreign: HashSet, } -#[async_trait] pub trait SubstateResolver { type Error: Send + Sync + 'static; fn try_resolve_local(&self, transaction: &Transaction) -> Result; - async fn try_resolve_foreign( + fn try_resolve_foreign( &self, requested_substates: &HashSet, - ) -> Result, Self::Error>; + ) -> impl Future, Self::Error>> + Send; - async fn resolve_virtual_substates( + fn resolve_virtual_substates( &self, transaction: &Transaction, current_epoch: Epoch, - ) -> Result; + ) -> impl Future> + Send; } diff --git a/applications/tari_validator_node/src/p2p/services/messaging/inbound.rs b/applications/tari_validator_node/src/p2p/services/messaging/inbound.rs index f8104b6a8..6e0e1df28 100644 --- a/applications/tari_validator_node/src/p2p/services/messaging/inbound.rs +++ b/applications/tari_validator_node/src/p2p/services/messaging/inbound.rs @@ -1,7 +1,6 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use async_trait::async_trait; use libp2p::PeerId; use tari_consensus::{messages::HotstuffMessage, traits::InboundMessagingError}; use tari_dan_common_types::PeerAddress; @@ -53,7 +52,6 @@ impl ConsensusInboundMessaging { } } -#[async_trait] impl tari_consensus::traits::InboundMessaging for ConsensusInboundMessaging { diff --git a/applications/tari_validator_node/src/p2p/services/messaging/outbound.rs b/applications/tari_validator_node/src/p2p/services/messaging/outbound.rs index f822acd02..b029e6687 100644 --- a/applications/tari_validator_node/src/p2p/services/messaging/outbound.rs +++ b/applications/tari_validator_node/src/p2p/services/messaging/outbound.rs @@ -23,7 +23,6 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use async_trait::async_trait; use tari_consensus::{messages::HotstuffMessage, traits::OutboundMessagingError}; use tari_dan_common_types::{PeerAddress, ShardGroup}; use tari_dan_p2p::{proto, TariMessagingSpec}; @@ -60,7 +59,6 @@ impl ConsensusOutboundMessaging { } } -#[async_trait] impl tari_consensus::traits::OutboundMessaging for ConsensusOutboundMessaging { @@ -104,15 +102,12 @@ impl tari_consensus::traits::OutboundMessaging Ok(()) } - async fn multicast<'a, T>(&mut self, shard_group: ShardGroup, message: T) -> Result<(), OutboundMessagingError> - where - Self::Addr: 'a, - T: Into + Send, - { + async fn multicast(&mut self, shard_group: ShardGroup, message: T) -> Result<(), OutboundMessagingError> + where T: Into + Send { let message = message.into(); self.consensus_gossip - .multicast(shard_group, message) + .publish(shard_group, message) .await .map_err(OutboundMessagingError::from_error)?; diff --git a/applications/tari_validator_node/src/substate_resolver.rs b/applications/tari_validator_node/src/substate_resolver.rs index 27ba2be0c..7c3f5cabf 100644 --- a/applications/tari_validator_node/src/substate_resolver.rs +++ b/applications/tari_validator_node/src/substate_resolver.rs @@ -3,7 +3,6 @@ use std::{collections::HashSet, time::Instant}; -use async_trait::async_trait; use indexmap::IndexMap; use log::*; use tari_common_types::types::PublicKey; @@ -217,7 +216,6 @@ where } } -#[async_trait] impl SubstateResolver for TariSubstateResolver where diff --git a/applications/tari_validator_node_cli/src/command/transaction.rs b/applications/tari_validator_node_cli/src/command/transaction.rs index 2e7ebf34c..cf795baae 100644 --- a/applications/tari_validator_node_cli/src/command/transaction.rs +++ b/applications/tari_validator_node_cli/src/command/transaction.rs @@ -29,7 +29,12 @@ use std::{ use anyhow::anyhow; use clap::{Args, Subcommand}; -use tari_dan_common_types::{optional::Optional, SubstateAddress, SubstateRequirement}; +use tari_dan_common_types::{ + option::{DisplayCont, DisplayContainer}, + optional::Optional, + SubstateAddress, + SubstateRequirement, +}; use tari_dan_engine::abi::Type; use tari_engine_types::{ commit_result::{ExecuteResult, FinalizeResult, RejectReason, TransactionResult}, @@ -491,44 +496,44 @@ fn summarize_finalize_result(finalize: &FinalizeResult) { } fn display_vec(writer: &mut W, ty: &Type, result: &InstructionResult) -> fmt::Result { - fn stringify_slice(slice: &[T]) -> String { - slice.iter().map(|v| v.to_string()).collect::>().join(", ") + fn display_slice(slice: &[T]) -> DisplayCont<&[T]> { + slice.display() } match &ty { Type::Unit => {}, Type::Bool => { - write!(writer, "{}", stringify_slice(&result.decode::>().unwrap()))?; + write!(writer, "{}", display_slice(&result.decode::>().unwrap()))?; }, Type::I8 => { - write!(writer, "{}", stringify_slice(&result.decode::>().unwrap()))?; + write!(writer, "{}", display_slice(&result.decode::>().unwrap()))?; }, Type::I16 => { - write!(writer, "{}", stringify_slice(&result.decode::>().unwrap()))?; + write!(writer, "{}", display_slice(&result.decode::>().unwrap()))?; }, Type::I32 => { - write!(writer, "{}", stringify_slice(&result.decode::>().unwrap()))?; + write!(writer, "{}", display_slice(&result.decode::>().unwrap()))?; }, Type::I64 => { - write!(writer, "{}", stringify_slice(&result.decode::>().unwrap()))?; + write!(writer, "{}", display_slice(&result.decode::>().unwrap()))?; }, Type::I128 => { - write!(writer, "{}", stringify_slice(&result.decode::>().unwrap()))?; + write!(writer, "{}", display_slice(&result.decode::>().unwrap()))?; }, Type::U8 => { - write!(writer, "{}", stringify_slice(&result.decode::>().unwrap()))?; + write!(writer, "{}", display_slice(&result.decode::>().unwrap()))?; }, Type::U16 => { - write!(writer, "{}", stringify_slice(&result.decode::>().unwrap()))?; + write!(writer, "{}", display_slice(&result.decode::>().unwrap()))?; }, Type::U32 => { - write!(writer, "{}", stringify_slice(&result.decode::>().unwrap()))?; + write!(writer, "{}", display_slice(&result.decode::>().unwrap()))?; }, Type::U64 => { - write!(writer, "{}", stringify_slice(&result.decode::>().unwrap()))?; + write!(writer, "{}", display_slice(&result.decode::>().unwrap()))?; }, Type::U128 => { - write!(writer, "{}", stringify_slice(&result.decode::>().unwrap()))?; + write!(writer, "{}", display_slice(&result.decode::>().unwrap()))?; }, Type::String => { write!(writer, "{}", result.decode::>().unwrap().join(", "))?; @@ -550,13 +555,13 @@ fn display_vec(writer: &mut W, ty: &Type, result: &InstructionRes write!(writer, "{}", str)?; }, Type::Other { name } if name == "Amount" => { - write!(writer, "{}", stringify_slice(&result.decode::>().unwrap()))?; + write!(writer, "{}", display_slice(&result.decode::>().unwrap()))?; }, Type::Other { name } if name == "NonFungibleId" => { write!( writer, "{}", - stringify_slice(&result.decode::>().unwrap()) + display_slice(&result.decode::>().unwrap()) )?; }, Type::Other { .. } => { diff --git a/applications/tari_validator_node_web_ui/src/App.tsx b/applications/tari_validator_node_web_ui/src/App.tsx index e1e8debf0..3c2a1fcc2 100644 --- a/applications/tari_validator_node_web_ui/src/App.tsx +++ b/applications/tari_validator_node_web_ui/src/App.tsx @@ -163,7 +163,7 @@ export default function App() { useEffect(() => { if (epoch !== undefined && identity !== undefined) { // The *10 is from the hardcoded constant in VN. - getShardKey({ height: epoch.current_epoch * 10, public_key: identity.public_key }).then((response) => { + getShardKey({ epoch: epoch.current_epoch, public_key: identity.public_key }).then((response) => { setShardKey(response.shard_key); }); } diff --git a/applications/tari_validator_node_web_ui/src/routes/Blocks/BlockDetails.tsx b/applications/tari_validator_node_web_ui/src/routes/Blocks/BlockDetails.tsx index 09283970e..e7e7b2d03 100644 --- a/applications/tari_validator_node_web_ui/src/routes/Blocks/BlockDetails.tsx +++ b/applications/tari_validator_node_web_ui/src/routes/Blocks/BlockDetails.tsx @@ -314,7 +314,7 @@ export default function BlockDetails() { {mintedUtxos.map((utxo, i) => (
- Unclaimed UTXO: {JSON.stringify(utxo.substate_id)} + Unclaimed UTXO: {utxo.commitment}
))}
diff --git a/applications/tari_watcher/Cargo.toml b/applications/tari_watcher/Cargo.toml index 2437e9a8a..ab425c570 100644 --- a/applications/tari_watcher/Cargo.toml +++ b/applications/tari_watcher/Cargo.toml @@ -17,7 +17,9 @@ tari_core = { workspace = true } # Used for VN registration signature tari_crypto = { workspace = true } # Used for `.to_vec()` in registration request tari_common = { workspace = true } tari_common_types = { workspace = true } +tari_dan_common_types = { workspace = true } tari_shutdown = { workspace = true } +tari_sidechain = { workspace = true } clap = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] } anyhow = { workspace = true } diff --git a/applications/tari_watcher/src/cli.rs b/applications/tari_watcher/src/cli.rs index deee8e7f8..61bb5b0ac 100644 --- a/applications/tari_watcher/src/cli.rs +++ b/applications/tari_watcher/src/cli.rs @@ -1,19 +1,15 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use std::path::PathBuf; +use std::{ + env, + path::{Path, PathBuf}, +}; use clap::Parser; +use tari_common::configuration::Network; -use crate::{ - config::Config, - constants::{ - DEFAULT_VALIDATOR_DIR, - DEFAULT_VALIDATOR_KEY_PATH, - DEFAULT_WATCHER_BASE_PATH, - DEFAULT_WATCHER_CONFIG_PATH, - }, -}; +use crate::{config::Config, constants::DEFAULT_VALIDATOR_DIR}; #[derive(Clone, Debug, Parser)] pub struct Cli { @@ -29,20 +25,44 @@ impl Cli { } pub fn get_config_path(&self) -> PathBuf { - self.common.config_path.clone() + self.relative_to_base_path(&self.common.config_path) + } + + pub fn get_validator_node_base_dir(&self) -> PathBuf { + abs_or_with_base_dir(&self.common.validator_dir) } + + pub fn get_base_path(&self) -> PathBuf { + abs_or_with_base_dir(&self.common.base_dir) + } + + fn relative_to_base_path>(&self, path: P) -> PathBuf { + let path = path.as_ref(); + if path.is_absolute() { + return path.to_path_buf(); + } + self.get_base_path().join(path) + } +} + +fn abs_or_with_base_dir>(path: P) -> PathBuf { + let p = path.as_ref(); + if p.is_absolute() { + return p.to_path_buf(); + } + env::current_dir().expect("Failed to get current directory").join(p) } #[derive(Debug, Clone, clap::Args)] pub struct CommonCli { - #[clap(short = 'b', long, parse(from_os_str), default_value = DEFAULT_WATCHER_BASE_PATH)] + #[clap(short = 'b', long, parse(from_os_str), default_value = "data/watcher/")] pub base_dir: PathBuf, - #[clap(short = 'c', long, parse(from_os_str), default_value = DEFAULT_WATCHER_CONFIG_PATH)] + #[clap(short = 'c', long, parse(from_os_str), default_value = "config.toml")] pub config_path: PathBuf, - #[clap(short = 'k', long, parse(from_os_str), default_value = DEFAULT_VALIDATOR_KEY_PATH)] - pub key_path: PathBuf, #[clap(short = 'v', long, parse(from_os_str), default_value = DEFAULT_VALIDATOR_DIR)] pub validator_dir: PathBuf, + #[clap(short = 'n', long)] + pub network: Option, } #[derive(Clone, Debug, clap::Subcommand)] @@ -60,6 +80,9 @@ pub struct InitArgs { #[clap(long)] /// Disable auto restart of the validator node pub no_auto_restart: bool, + + #[clap(long, short = 'f')] + pub force: bool, } impl InitArgs { diff --git a/applications/tari_watcher/src/config.rs b/applications/tari_watcher/src/config.rs index 466a1656f..daa624153 100644 --- a/applications/tari_watcher/src/config.rs +++ b/applications/tari_watcher/src/config.rs @@ -3,6 +3,7 @@ use std::path::PathBuf; +use tari_common::configuration::Network; use tokio::io::{self, AsyncWriteExt}; use url::Url; @@ -13,6 +14,8 @@ use crate::{ #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] pub struct Config { + /// Allow watcher to restart the validator node if it crashes and stops running + pub network: Network, /// Allow watcher to submit a new validator node registration transaction initially and before /// the current registration expires pub auto_register: bool, @@ -29,10 +32,6 @@ pub struct Config { /// The base directory of the watcher with configuration and data files pub base_dir: PathBuf, - /// The path of the validator node registration file, containing signed information required to - /// submit a registration transaction on behalf of the node - pub vn_registration_file: PathBuf, - /// The path of the validator node base directory. This directory is automatically created when starting a new VN. pub vn_base_dir: PathBuf, @@ -52,6 +51,19 @@ impl Config { writer.write_all(toml.as_bytes()).await?; Ok(()) } + + pub fn get_registration_file(&self) -> PathBuf { + self.vn_base_dir + .join(self.network.as_key_str()) + .join("registration.json") + } + + pub fn get_layer_one_transaction_path(&self) -> PathBuf { + self.vn_base_dir + .join(self.network.as_key_str()) + .join("data") + .join("layer_one_transactions") + } } #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] @@ -71,17 +83,17 @@ pub struct Channels { pub fn get_base_config(cli: &Cli) -> anyhow::Result { let base_dir = cli.common.base_dir.clone(); - let vn_registration_file = base_dir.join(cli.common.key_path.clone()); - let vn_base_dir = base_dir.join(cli.common.validator_dir.clone()); + let vn_base_dir = cli.get_validator_node_base_dir(); + let network = cli.common.network.unwrap_or(Network::Esmeralda); Ok(Config { + network, auto_register: true, auto_restart: true, base_node_grpc_url: DEFAULT_BASE_NODE_GRPC_URL.parse()?, base_wallet_grpc_url: DEFAULT_BASE_WALLET_GRPC_URL.parse()?, base_dir: base_dir.to_path_buf(), sidechain_id: None, - vn_registration_file, vn_base_dir, validator_node_executable_path: DEFAULT_VALIDATOR_NODE_BINARY_PATH.into(), channel_config: Channels { diff --git a/applications/tari_watcher/src/constants.rs b/applications/tari_watcher/src/constants.rs index 2b99ad447..a4fc0a089 100644 --- a/applications/tari_watcher/src/constants.rs +++ b/applications/tari_watcher/src/constants.rs @@ -1,11 +1,8 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -pub const DEFAULT_WATCHER_BASE_PATH: &str = "data/watcher/"; -pub const DEFAULT_WATCHER_CONFIG_PATH: &str = "data/watcher/config.toml"; -pub const DEFAULT_VALIDATOR_PID_PATH: &str = "data/watcher/validator.pid"; -pub const DEFAULT_VALIDATOR_DIR: &str = "data/vn1"; -pub const DEFAULT_VALIDATOR_KEY_PATH: &str = "data/vn1/esmeralda/registration.json"; +pub const DEFAULT_VALIDATOR_PID_PATH: &str = "validator.pid"; +pub const DEFAULT_VALIDATOR_DIR: &str = "validator"; pub const DEFAULT_VALIDATOR_NODE_BINARY_PATH: &str = "target/release/tari_validator_node"; pub const DEFAULT_BASE_NODE_GRPC_URL: &str = "http://127.0.0.1:12001"; // note: protocol pub const DEFAULT_BASE_WALLET_GRPC_URL: &str = "http://127.0.0.1:12003"; // note: protocol diff --git a/applications/tari_watcher/src/helpers.rs b/applications/tari_watcher/src/helpers.rs index ec3320a4d..4260db297 100644 --- a/applications/tari_watcher/src/helpers.rs +++ b/applications/tari_watcher/src/helpers.rs @@ -6,23 +6,21 @@ use std::{ path::{Path, PathBuf}, }; +use anyhow::anyhow; use minotari_app_grpc::tari_rpc::GetActiveValidatorNodesResponse; use tari_common_types::types::PublicKey; use tari_core::transactions::transaction_components::ValidatorNodeSignature; -use tari_crypto::{ristretto::RistrettoPublicKey, tari_utilities::ByteArray}; +use tari_crypto::tari_utilities::ByteArray; use tokio::fs; use crate::config::Config; pub async fn read_config_file(path: PathBuf) -> anyhow::Result { - let content = fs::read_to_string(&path).await.map_err(|_| { - format!( - "Failed to read config file at {}", - path.into_os_string().into_string().unwrap() - ) - }); + let content = fs::read_to_string(&path) + .await + .map_err(|_| anyhow!("Failed to read config file at {}", path.display()))?; - let config = toml::from_str(&content.unwrap())?; + let config = toml::from_str(&content)?; Ok(config) } @@ -59,7 +57,3 @@ pub fn to_vn_public_keys(vns: Vec) -> Vec, needle: PublicKey) -> bool { - vns.iter().any(|vn| vn.eq(&needle)) -} diff --git a/applications/tari_watcher/src/logger.rs b/applications/tari_watcher/src/logger.rs index db4044998..69b804de2 100644 --- a/applications/tari_watcher/src/logger.rs +++ b/applications/tari_watcher/src/logger.rs @@ -14,8 +14,9 @@ pub fn init_logger() -> Result<(), log::SetLoggerError> { } let colors = fern::colors::ColoredLevelConfig::new() - .info(fern::colors::Color::Green) - .debug(fern::colors::Color::Yellow) + .info(fern::colors::Color::Blue) + .debug(fern::colors::Color::White) + .warn(fern::colors::Color::Yellow) .error(fern::colors::Color::Red); fern::Dispatch::new() .format(move |out, message, record| { diff --git a/applications/tari_watcher/src/main.rs b/applications/tari_watcher/src/main.rs index c72839fc3..db7452b53 100644 --- a/applications/tari_watcher/src/main.rs +++ b/applications/tari_watcher/src/main.rs @@ -1,15 +1,14 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use anyhow::{anyhow, Context}; -use registration::registration_loop; +use anyhow::{anyhow, bail, Context}; use tari_shutdown::{Shutdown, ShutdownSignal}; use tokio::{fs, task::JoinHandle}; +use transaction_worker::worker_loop; use crate::{ cli::{Cli, Commands}, config::{get_base_config, Config}, - constants::DEFAULT_WATCHER_BASE_PATH, helpers::read_config_file, logger::init_logger, manager::{start_receivers, ManagerHandle, ProcessManager}, @@ -27,22 +26,31 @@ mod manager; mod minotari; mod monitoring; mod process; -mod registration; mod shutdown; +mod transaction_worker; #[tokio::main] async fn main() -> anyhow::Result<()> { let cli = Cli::init(); let config_path = cli.get_config_path(); - let config_path = config_path - .canonicalize() - .context("Failed to canonicalize config path")?; + if config_path.is_dir() { + bail!( + "Config path '{}' does points to a directory, expected a file", + config_path.display() + ); + } init_logger()?; match cli.command { Commands::Init(ref args) => { + if !args.force && config_path.exists() { + bail!( + "Config file already exists at {} (use --force to overwrite)", + config_path.display() + ); + } // set by default in CommonCli let parent = config_path.parent().context("parent path")?; fs::create_dir_all(parent).await?; @@ -72,48 +80,44 @@ async fn main() -> anyhow::Result<()> { } async fn start(config: Config) -> anyhow::Result<()> { - let shutdown = Shutdown::new(); + let mut shutdown = Shutdown::new(); let signal = shutdown.to_signal().select(exit_signal()?); - fs::create_dir_all(config.base_dir.join(DEFAULT_WATCHER_BASE_PATH)) + fs::create_dir_all(&config.base_dir) .await .context("create watcher base path")?; - create_pid_file( - config.base_dir.join(DEFAULT_WATCHER_BASE_PATH).join("watcher.pid"), - std::process::id(), - ) - .await?; - let handlers = spawn_manager(config.clone(), shutdown.to_signal(), shutdown).await?; - let manager_handle = handlers.manager; - let task_handle = handlers.task; + create_pid_file(config.base_dir.join("watcher.pid"), std::process::id()).await?; + let Handlers { manager_handle, task } = spawn_manager(config.clone(), shutdown.to_signal()).await?; tokio::select! { _ = signal => { log::info!("Shutting down"); }, - result = task_handle => { + result = task => { result?; log::info!("Process manager exited"); }, - Err(err) = registration_loop(config, manager_handle) => { + Err(err) = worker_loop(config, manager_handle) => { log::error!("Registration loop exited with error {err}"); }, } + shutdown.trigger(); + Ok(()) } struct Handlers { - manager: ManagerHandle, + manager_handle: ManagerHandle, task: JoinHandle<()>, } -async fn spawn_manager(config: Config, shutdown: ShutdownSignal, trigger: Shutdown) -> anyhow::Result { - let (manager, manager_handle) = ProcessManager::new(config, shutdown, trigger); +async fn spawn_manager(config: Config, shutdown: ShutdownSignal) -> anyhow::Result { + let (manager, manager_handle) = ProcessManager::new(config, shutdown); let cr = manager.start_request_handler().await?; start_receivers(cr.rx_log, cr.rx_alert, cr.cfg_alert).await; Ok(Handlers { - manager: manager_handle, + manager_handle, task: cr.task, }) } diff --git a/applications/tari_watcher/src/manager.rs b/applications/tari_watcher/src/manager.rs index 014bed09a..c6b0f5c98 100644 --- a/applications/tari_watcher/src/manager.rs +++ b/applications/tari_watcher/src/manager.rs @@ -2,18 +2,11 @@ // SPDX-License-Identifier: BSD-3-Clause use log::*; -use minotari_app_grpc::tari_rpc::{ - self as grpc, - ConsensusConstants, - GetActiveValidatorNodesResponse, - RegisterValidatorNodeResponse, -}; -use tari_shutdown::{Shutdown, ShutdownSignal}; +use minotari_app_grpc::tari_rpc::{GetActiveValidatorNodesResponse, RegisterValidatorNodeResponse}; +use tari_dan_common_types::layer_one_transaction::LayerOneTransactionDef; +use tari_shutdown::ShutdownSignal; use tokio::{ - sync::{ - mpsc::{self, Receiver}, - oneshot, - }, + sync::{mpsc, oneshot}, task::JoinHandle, }; @@ -27,29 +20,27 @@ use crate::{ pub struct ProcessManager { pub config: Config, pub shutdown_signal: ShutdownSignal, // listen for keyboard exit signal - pub trigger_signal: Shutdown, // triggered when validator auto-restart is disabled pub rx_request: mpsc::Receiver, pub chain: MinotariNodes, } pub struct ChannelReceivers { - pub rx_log: Receiver, - pub rx_alert: Receiver, + pub rx_log: mpsc::Receiver, + pub rx_alert: mpsc::Receiver, pub cfg_alert: Channels, pub task: JoinHandle<()>, } impl ProcessManager { - pub fn new(config: Config, shutdown_signal: ShutdownSignal, trigger_signal: Shutdown) -> (Self, ManagerHandle) { + pub fn new(config: Config, shutdown_signal: ShutdownSignal) -> (Self, ManagerHandle) { let (tx_request, rx_request) = mpsc::channel(1); let this = Self { shutdown_signal, - trigger_signal, rx_request, chain: MinotariNodes::new( config.base_node_grpc_url.clone(), config.base_wallet_grpc_url.clone(), - config.vn_registration_file.clone(), + config.get_registration_file(), ), config, }; @@ -61,8 +52,6 @@ impl ProcessManager { // clean_stale_pid_file(self.base_dir.clone().join(DEFAULT_VALIDATOR_PID_PATH)).await?; - self.chain.bootstrap().await?; - let cc = self.start_child_process().await; info!("Setup completed: connected to base node and wallet, ready to receive requests"); @@ -70,58 +59,8 @@ impl ProcessManager { loop { tokio::select! { Some(req) = self.rx_request.recv() => { - match req { - ManagerRequest::GetTipInfo { reply } => { - let response = match self.chain.get_tip_status().await { - Ok(resp) => resp, - Err(e) => { - error!("Failed to get tip status: {}", e); - continue; - } - }; - - drop(reply.send(Ok(response))); - } - ManagerRequest::GetActiveValidatorNodes { reply } => { - let response = match self.chain.get_active_validator_nodes().await { - Ok(resp) => resp, - Err(e) => { - error!("Failed to get active validator nodes: {}", e); - continue; - } - }; - drop(reply.send(Ok(response))); - } - ManagerRequest::RegisterValidatorNode { block, reply } => { - let response = match self.chain.register_validator_node().await { - Ok(resp) => resp, - Err(e) => { - error!("Failed to register validator node: {}", e); - continue; - } - }; - - // send registration response to logger - if let Err(e) = cc.tx_log.send(ProcessStatus::Submitted(Transaction::new(response.clone(), block))).await { - error!("Failed to send node registration update to monitoring: {}", e); - } - // send registration response to alerting - if let Err(e) = cc.tx_alert.send(ProcessStatus::Submitted(Transaction::new(response.clone(), block))).await { - error!("Failed to send node registration update to alerting: {}", e); - } - - drop(reply.send(Ok(response))); - }, - ManagerRequest::GetConsensusConstants { block, reply } => { - let response = match self.chain.get_consensus_constants(block).await { - Ok(resp) => resp, - Err(e) => { - error!("Failed to get consensus constants: {}", e); - continue; - } - }; - drop(reply.send(Ok(response))); - } + if let Err(err) = self.handle_request(req, &cc.tx_log, &cc.tx_alert).await { + error!("Error handling request: {}", err); } } @@ -141,6 +80,58 @@ impl ProcessManager { }) } + async fn handle_request( + &mut self, + req: ManagerRequest, + tx_log: &mpsc::Sender, + tx_alert: &mpsc::Sender, + ) -> anyhow::Result<()> { + match req { + ManagerRequest::GetTipInfo { reply } => { + let response = self.chain.get_tip_status().await?; + drop(reply.send(Ok(response))); + }, + ManagerRequest::GetActiveValidatorNodes { reply } => { + let response = self.chain.get_active_validator_nodes().await; + drop(reply.send(response)); + }, + ManagerRequest::RegisterValidatorNode { block, reply } => { + let response = self.chain.register_validator_node().await; + + if let Ok(ref response) = response { + // send registration response to logger + if let Err(e) = tx_log + .send(ProcessStatus::Submitted(Transaction::new( + response.transaction_id, + block, + ))) + .await + { + error!("Failed to send node registration update to monitoring: {}", e); + } + // send registration response to alerting + if let Err(e) = tx_alert + .send(ProcessStatus::Submitted(Transaction::new( + response.transaction_id, + block, + ))) + .await + { + error!("Failed to send node registration update to alerting: {}", e); + } + } + + drop(reply.send(response)); + }, + ManagerRequest::SubmitTransaction { transaction_def, reply } => { + let response = self.chain.submit_transaction(transaction_def).await; + let _ignore = reply.send(response); + }, + } + + Ok(()) + } + async fn start_child_process(&self) -> ChildChannel { let vn_binary_path = self.config.validator_node_executable_path.clone(); let vn_base_dir = self.config.base_dir.join(self.config.vn_base_dir.clone()); @@ -153,7 +144,7 @@ impl ProcessManager { self.config.base_node_grpc_url.clone(), self.config.channel_config.clone(), self.config.auto_restart, - self.trigger_signal.clone(), + self.config.network, ) .await; if cc.is_none() { @@ -189,14 +180,14 @@ pub enum ManagerRequest { GetActiveValidatorNodes { reply: Reply>, }, - GetConsensusConstants { - block: u64, - reply: Reply, - }, RegisterValidatorNode { block: u64, reply: Reply, }, + SubmitTransaction { + transaction_def: LayerOneTransactionDef, + reply: Reply<()>, + }, } pub struct ManagerHandle { @@ -208,7 +199,7 @@ impl ManagerHandle { Self { tx_request } } - pub async fn get_active_validator_nodes(&mut self) -> anyhow::Result> { + pub async fn get_active_validator_nodes(&self) -> anyhow::Result> { let (tx, rx) = oneshot::channel(); self.tx_request .send(ManagerRequest::GetActiveValidatorNodes { reply: tx }) @@ -216,23 +207,29 @@ impl ManagerHandle { rx.await? } - pub async fn get_consensus_constants(&mut self, block: u64) -> anyhow::Result { + pub async fn register_validator_node(&self, block: u64) -> anyhow::Result { let (tx, rx) = oneshot::channel(); self.tx_request - .send(ManagerRequest::GetConsensusConstants { block, reply: tx }) + .send(ManagerRequest::RegisterValidatorNode { block, reply: tx }) .await?; rx.await? } - pub async fn register_validator_node(&mut self, block: u64) -> anyhow::Result { + pub async fn submit_transaction( + &self, + transaction_def: LayerOneTransactionDef, + ) -> anyhow::Result<()> { let (tx, rx) = oneshot::channel(); self.tx_request - .send(ManagerRequest::RegisterValidatorNode { block, reply: tx }) + .send(ManagerRequest::SubmitTransaction { + transaction_def, + reply: tx, + }) .await?; rx.await? } - pub async fn get_tip_info(&mut self) -> anyhow::Result { + pub async fn get_tip_info(&self) -> anyhow::Result { let (tx, rx) = oneshot::channel(); self.tx_request.send(ManagerRequest::GetTipInfo { reply: tx }).await?; rx.await? diff --git a/applications/tari_watcher/src/minotari.rs b/applications/tari_watcher/src/minotari.rs index ed29ac055..b6c00fa5a 100644 --- a/applications/tari_watcher/src/minotari.rs +++ b/applications/tari_watcher/src/minotari.rs @@ -8,9 +8,9 @@ use log::*; use minotari_app_grpc::tari_rpc::{self as grpc, GetActiveValidatorNodesResponse, RegisterValidatorNodeResponse}; use minotari_node_grpc_client::BaseNodeGrpcClient; use minotari_wallet_grpc_client::WalletGrpcClient; -use tari_common::exit_codes::{ExitCode, ExitError}; -use tari_common_types::types::FixedHash; use tari_crypto::tari_utilities::ByteArray; +use tari_dan_common_types::layer_one_transaction::{LayerOnePayloadType, LayerOneTransactionDef}; +use tari_sidechain::EvictionProof; use tonic::transport::Channel; use url::Url; @@ -18,26 +18,18 @@ use crate::helpers::read_registration_file; #[derive(Clone)] pub struct MinotariNodes { - bootstrapped: bool, node_grpc_address: Url, wallet_grpc_address: Url, node_registration_file: PathBuf, current_height: u64, - node: Option>, - wallet: Option>, } #[derive(Debug, Clone)] pub struct TipStatus { block_height: u64, - block_hash: FixedHash, } impl TipStatus { - pub fn hash(&self) -> FixedHash { - self.block_hash - } - pub fn height(&self) -> u64 { self.block_height } @@ -46,55 +38,29 @@ impl TipStatus { impl MinotariNodes { pub fn new(node_grpc_address: Url, wallet_grpc_address: Url, node_registration_file: PathBuf) -> Self { Self { - bootstrapped: false, node_grpc_address, wallet_grpc_address, node_registration_file, current_height: 0, - node: None, - wallet: None, } } - pub async fn bootstrap(&mut self) -> anyhow::Result<()> { - if self.bootstrapped { - return Ok(()); - } - - self.connect_node().await?; - self.connect_wallet().await?; - self.bootstrapped = true; - Ok(()) - } - - async fn connect_wallet(&mut self) -> anyhow::Result<()> { - log::info!("Connecting to wallet on gRPC {}", self.wallet_grpc_address); + async fn connect_wallet(&self) -> anyhow::Result> { + log::debug!("Connecting to wallet on gRPC {}", self.wallet_grpc_address); let client = WalletGrpcClient::connect(self.wallet_grpc_address.as_str()).await?; - - self.wallet = Some(client); - Ok(()) + Ok(client) } - async fn connect_node(&mut self) -> anyhow::Result<()> { - log::info!("Connecting to base node on gRPC {}", self.node_grpc_address); - let client = BaseNodeGrpcClient::connect(self.node_grpc_address.to_string()) - .await - .map_err(|e| ExitError::new(ExitCode::ConfigError, e))?; - - self.node = Some(client); - - Ok(()) + async fn connect_node(&self) -> anyhow::Result> { + debug!("Connecting to base node on gRPC {}", self.node_grpc_address); + let client = BaseNodeGrpcClient::connect(self.node_grpc_address.to_string()).await?; + Ok(client) } pub async fn get_tip_status(&mut self) -> anyhow::Result { - if !self.bootstrapped { - bail!("Node client not connected"); - } - let inner = self - .node - .clone() - .unwrap() + .connect_node() + .await? .get_tip_info(grpc::Empty {}) .await? .into_inner(); @@ -107,20 +73,14 @@ impl MinotariNodes { Ok(TipStatus { block_height: metadata.best_block_height, - block_hash: metadata.best_block_hash.try_into().map_err(|_| anyhow!("error"))?, }) } pub async fn get_active_validator_nodes(&self) -> anyhow::Result> { - if !self.bootstrapped { - bail!("Node client not connected"); - } - let height = self.current_height; let mut stream = self - .node - .clone() - .unwrap() + .connect_node() + .await? .get_active_validator_nodes(grpc::GetActiveValidatorNodesRequest { height, sidechain_id: vec![], @@ -150,11 +110,7 @@ impl MinotariNodes { Ok(vns) } - pub async fn register_validator_node(&self) -> anyhow::Result { - if !self.bootstrapped { - bail!("Node client not connected"); - } - + pub async fn register_validator_node(&mut self) -> anyhow::Result { info!("Preparing to send a VN registration request"); let info = read_registration_file(self.node_registration_file.clone()) @@ -167,9 +123,8 @@ impl MinotariNodes { })?; let sig = info.signature.signature(); let resp = self - .wallet - .clone() - .unwrap() + .connect_wallet() + .await? .register_validator_node(grpc::RegisterValidatorNodeRequest { validator_node_public_key: info.public_key.to_vec(), validator_node_signature: Some(grpc::Signature { @@ -192,19 +147,36 @@ impl MinotariNodes { Ok(resp) } - pub async fn get_consensus_constants(&self, block_height: u64) -> anyhow::Result { - if !self.bootstrapped { - bail!("Node client not connected"); - } - - let constants = self - .node - .clone() - .unwrap() - .get_constants(grpc::BlockHeight { block_height }) - .await? - .into_inner(); + pub async fn submit_transaction( + &mut self, + transaction_def: LayerOneTransactionDef, + ) -> anyhow::Result<()> { + let proof_type = transaction_def.proof_type; + let resp = match proof_type { + LayerOnePayloadType::EvictionProof => { + let proof = serde_json::from_value::(transaction_def.payload)?; + info!( + "Preparing to send an eviction proof transaction to evict {}", + proof.node_to_evict() + ); + let proof_proto = (&proof).into(); + + let resp = self + .connect_wallet() + .await? + .submit_validator_eviction_proof(grpc::SubmitValidatorEvictionProofRequest { + proof: Some(proof_proto), + fee_per_gram: 10, + message: format!("Validator: Automatically submitted {proof_type} transaction"), + sidechain_deployment_key: vec![], + }) + .await?; + resp.into_inner() + }, + }; + + info!("{} transaction sent successfully (tx_id={})", proof_type, resp.tx_id); - Ok(constants) + Ok(()) } } diff --git a/applications/tari_watcher/src/monitoring.rs b/applications/tari_watcher/src/monitoring.rs index dc98b5c82..c50bcfde6 100644 --- a/applications/tari_watcher/src/monitoring.rs +++ b/applications/tari_watcher/src/monitoring.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: BSD-3-Clause use log::*; -use minotari_app_grpc::tari_rpc::RegisterValidatorNodeResponse; use tokio::{ process::Child, sync::mpsc, @@ -21,11 +20,8 @@ pub struct Transaction { } impl Transaction { - pub fn new(response: RegisterValidatorNodeResponse, block: u64) -> Self { - Self { - id: response.transaction_id, - block, - } + pub fn new(id: u64, block: u64) -> Self { + Self { id, block } } } diff --git a/applications/tari_watcher/src/process.rs b/applications/tari_watcher/src/process.rs index 241de05ac..edd91eead 100644 --- a/applications/tari_watcher/src/process.rs +++ b/applications/tari_watcher/src/process.rs @@ -8,7 +8,7 @@ use std::{ use anyhow::bail; use log::*; -use tari_shutdown::Shutdown; +use tari_common::configuration::Network; use tokio::{ fs::{self, OpenOptions}, io::AsyncWriteExt, @@ -62,6 +62,7 @@ async fn spawn_validator_node( binary_path: PathBuf, base_dir: PathBuf, minotari_node_grpc_url: &Url, + network: Network, ) -> anyhow::Result { debug!("Using VN binary at: {}", binary_path.display()); debug!("Using VN base dir in directory: {}", base_dir.display()); @@ -69,6 +70,7 @@ async fn spawn_validator_node( fs::create_dir_all(&base_dir).await?; let child = TokioCommand::new(binary_path) + .arg(format!("--network={}", network.as_key_str())) .arg(format!("-b{}", base_dir.display())) .arg(format!("--node-grpc={minotari_node_grpc_url}")) .stdin(Stdio::null()) @@ -87,7 +89,7 @@ pub async fn spawn_validator_node_os( cfg_alert: Channels, auto_restart: bool, minotari_node_grpc_url: Url, - mut trigger_signal: Shutdown, + network: Network, ) -> anyhow::Result { let (tx_log, rx_log) = mpsc::channel(16); let (tx_alert, rx_alert) = mpsc::channel(16); @@ -98,8 +100,13 @@ pub async fn spawn_validator_node_os( let tx_restart_clone_main = tx_restart.clone(); tokio::spawn(async move { loop { - let child_res = - spawn_validator_node(binary_path.clone(), vn_base_dir.clone(), &minotari_node_grpc_url).await; + let child_res = spawn_validator_node( + binary_path.clone(), + vn_base_dir.clone(), + &minotari_node_grpc_url, + network, + ) + .await; match child_res { Ok(child) => { @@ -135,7 +142,6 @@ pub async fn spawn_validator_node_os( Some(_) => { if !auto_restart { info!("Received restart signal, but auto restart is disabled, exiting"); - trigger_signal.trigger(); break; } @@ -201,7 +207,7 @@ pub async fn start_validator( minotari_node_grpc_url: Url, alerting_config: Channels, auto_restart: bool, - trigger_signal: Shutdown, + network: Network, ) -> Option { let opt = check_existing_node_os(vn_base_dir.clone()).await; if let Some(pid) = opt { @@ -218,7 +224,7 @@ pub async fn start_validator( alerting_config, auto_restart, minotari_node_grpc_url, - trigger_signal, + network, ) .await .ok()?; diff --git a/applications/tari_watcher/src/registration.rs b/applications/tari_watcher/src/registration.rs deleted file mode 100644 index 4a7e91818..000000000 --- a/applications/tari_watcher/src/registration.rs +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2024 The Tari Project -// SPDX-License-Identifier: BSD-3-Clause - -use log::*; -use tari_common_types::types::FixedHash; -use tokio::time::{self, Duration, MissedTickBehavior}; - -use crate::{ - config::Config, - helpers::{contains_key, read_registration_file, to_vn_public_keys}, - manager::ManagerHandle, -}; - -// TODO: make configurable -// Amount of time to wait before the watcher runs a check again -const REGISTRATION_LOOP_INTERVAL: Duration = Duration::from_secs(30); - -// Periodically checks that the local node is still registered on the network. -// If it is no longer registered or close to expiry (1 epoch of blocks or less), it will attempt to re-register. -// It will do nothing if it is registered already and not close to expiry. -pub async fn registration_loop(config: Config, mut handle: ManagerHandle) -> anyhow::Result { - let mut interval = time::interval(REGISTRATION_LOOP_INTERVAL); - interval.set_missed_tick_behavior(MissedTickBehavior::Delay); - let mut last_block_hash: Option = None; - let mut recently_registered = false; - - loop { - interval.tick().await; - - let Some(vn_reg_data) = read_registration_file(&config.vn_registration_file).await? else { - info!("No registration data found, will try again in 30s"); - continue; - }; - let public_key = vn_reg_data.public_key; - debug!("Local public key: {}", public_key.clone()); - - let tip_info = handle.get_tip_info().await; - if let Err(e) = tip_info { - error!("Failed to get tip info: {}", e); - continue; - } - - let current_block = tip_info.as_ref().unwrap().height(); - if last_block_hash.is_none() || last_block_hash.unwrap() != tip_info.as_ref().unwrap().hash() { - last_block_hash = Some(tip_info.unwrap().hash()); - debug!( - "New block hash at height {}: {}", - current_block, - last_block_hash.unwrap() - ); - } else { - debug!("Same block as previous tick"); - } - - let vn_status = handle.get_active_validator_nodes().await; - if let Err(e) = vn_status { - error!("Failed to get active validators: {}", e); - continue; - } - let active_keys = to_vn_public_keys(vn_status.unwrap()); - info!("Amount of active validator node keys: {}", active_keys.len()); - for key in &active_keys { - info!("{}", key); - } - - let constants = handle.get_consensus_constants(current_block).await; - if let Err(e) = constants { - error!("Failed to get consensus constants: {}", e); - continue; - } - - // if the node is already registered and not close to expiring in the next epoch, skip registration - if contains_key(active_keys.clone(), public_key.clone()) || recently_registered { - info!("VN has an active registration and will not expire in the next epoch, skip"); - recently_registered = false; - continue; - } - - // if we are not currently registered or close to expiring, attempt to register - - info!("VN not active or about to expire, attempting to register.."); - let tx = handle.register_validator_node(current_block).await; - if let Err(e) = tx { - error!("Failed to register VN: {}", e); - continue; - } - let tx = tx.unwrap(); - if !tx.is_success { - error!("Failed to register VN: {}", tx.failure_message); - continue; - } - info!( - "Registered VN at block {} with transaction id: {}", - current_block, tx.transaction_id - ); - - // give the network another tick to process the registration - recently_registered = true; - } -} diff --git a/applications/tari_watcher/src/transaction_worker.rs b/applications/tari_watcher/src/transaction_worker.rs new file mode 100644 index 000000000..3a7e226a9 --- /dev/null +++ b/applications/tari_watcher/src/transaction_worker.rs @@ -0,0 +1,151 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use std::path::PathBuf; + +use anyhow::bail; +use log::*; +use tari_dan_common_types::layer_one_transaction::LayerOneTransactionDef; +use tokio::{ + fs, + time::{self, Duration}, +}; + +use crate::{ + config::Config, + helpers::{read_registration_file, to_vn_public_keys}, + manager::ManagerHandle, +}; + +// TODO: make configurable +// Amount of time to wait before the watcher runs a check again +const REGISTRATION_LOOP_INTERVAL: Duration = Duration::from_secs(30); + +// Periodically checks that the local node is still registered on the network. +// If it is no longer registered or close to expiry (1 epoch of blocks or less), it will attempt to re-register. +// It will do nothing if it is registered already and not close to expiry. +pub async fn worker_loop(config: Config, handle: ManagerHandle) -> anyhow::Result { + let mut is_registered = false; + let vn_registration_file = config.get_registration_file(); + let vn_layer_one_transactions = config.get_layer_one_transaction_path(); + + loop { + time::sleep(REGISTRATION_LOOP_INTERVAL).await; + + if !is_registered { + if config.auto_register { + match ensure_registered(&handle, &vn_registration_file).await { + Ok(_) => { + is_registered = true; + }, + Err(e) => { + error!("Unable to ensure validator registration: {}", e); + }, + } + } else { + debug!("Auto registration is disabled, skipping registration check"); + } + } + + check_and_submit_layer_one_transactions(&handle, &vn_layer_one_transactions).await?; + } +} + +async fn ensure_registered(handle: &ManagerHandle, vn_registration_file: &PathBuf) -> anyhow::Result<()> { + let Some(vn_reg_data) = read_registration_file(&vn_registration_file).await? else { + info!("No registration data found, will try again in 30s"); + return Ok(()); + }; + let public_key = vn_reg_data.public_key; + debug!("Local public key: {}", public_key.clone()); + + let tip_info = handle.get_tip_info().await?; + + let current_height = tip_info.height(); + + let vn_status = handle.get_active_validator_nodes().await?; + let active_keys = to_vn_public_keys(vn_status); + info!("Amount of active validator node keys: {}", active_keys.len()); + for key in &active_keys { + info!("{}", key); + } + + // if the node is already registered + if active_keys.iter().any(|vn| *vn == public_key) { + info!("VN has an active registration"); + return Ok(()); + } + + info!("VN not active, attempting to register.."); + let tx = handle.register_validator_node(current_height).await?; + if !tx.is_success { + bail!("Failed to register VN: {}", tx.failure_message); + } + info!( + "Registered VN at block {} with transaction id: {}", + current_height, tx.transaction_id + ); + + Ok(()) +} + +async fn check_and_submit_layer_one_transactions( + handle: &ManagerHandle, + vn_layer_one_transactions: &PathBuf, +) -> anyhow::Result<()> { + let complete_dir = vn_layer_one_transactions.join("complete"); + fs::create_dir_all(&complete_dir).await?; + let failed_dir = vn_layer_one_transactions.join("failed"); + fs::create_dir_all(&failed_dir).await?; + + info!("Checking for layer one transactions to submit.."); + let mut files = fs::read_dir(vn_layer_one_transactions).await?; + while let Some(file) = files.next_entry().await.transpose() { + let file = file?; + if file.path() == complete_dir || file.path() == failed_dir { + continue; + } + if !file.file_type().await?.is_file() { + trace!("Skipping non-file: {}", file.path().display()); + continue; + } + if file.path().extension().map_or(true, |s| s != "json") { + debug!("Skipping non-JSON file: {}", file.path().display()); + continue; + } + + let f = match fs::File::open(file.path()).await { + Ok(f) => f.into_std().await, + Err(e) => { + warn!("Failed to open file: {}", e); + continue; + }, + }; + match serde_json::from_reader::<_, LayerOneTransactionDef>(f) { + Ok(transaction_def) => { + info!("Submitting {} transaction", transaction_def.proof_type); + if let Err(err) = handle.submit_transaction(transaction_def).await { + warn!( + "Failed to submit transaction: {}. Moving to {}", + err, + failed_dir.display() + ); + fs::rename(file.path(), failed_dir.join(file.file_name())).await?; + continue; + } + info!( + "Transaction submitted successfully! Moving to complete: {}", + complete_dir.display() + ); + fs::rename(file.path(), complete_dir.join(file.file_name())).await?; + }, + Err(e) => { + warn!("Failed to parse JSON file {}: {}", file.path().display(), e); + fs::rename(file.path(), failed_dir.join(file.file_name())).await?; + continue; + }, + } + } + + Ok(()) +} diff --git a/bindings/dist/types/Command.d.ts b/bindings/dist/types/Command.d.ts index 937760536..1374b8885 100644 --- a/bindings/dist/types/Command.d.ts +++ b/bindings/dist/types/Command.d.ts @@ -1,3 +1,4 @@ +import type { EvictNodeAtom } from "./EvictNodeAtom"; import type { ForeignProposalAtom } from "./ForeignProposalAtom"; import type { MintConfidentialOutputAtom } from "./MintConfidentialOutputAtom"; import type { ResumeNodeAtom } from "./ResumeNodeAtom"; @@ -27,4 +28,6 @@ export type Command = { SuspendNode: SuspendNodeAtom; } | { ResumeNode: ResumeNodeAtom; +} | { + EvictNode: EvictNodeAtom; } | "EndEpoch"; diff --git a/bindings/dist/types/MintConfidentialOutputAtom.d.ts b/bindings/dist/types/MintConfidentialOutputAtom.d.ts index 7ba1166fa..44379f922 100644 --- a/bindings/dist/types/MintConfidentialOutputAtom.d.ts +++ b/bindings/dist/types/MintConfidentialOutputAtom.d.ts @@ -1,4 +1,4 @@ -import type { SubstateId } from "./SubstateId"; +import type { UnclaimedConfidentialOutputAddress } from "./UnclaimedConfidentialOutputAddress"; export interface MintConfidentialOutputAtom { - substate_id: SubstateId; + commitment: UnclaimedConfidentialOutputAddress; } diff --git a/bindings/dist/types/QuorumCertificate.d.ts b/bindings/dist/types/QuorumCertificate.d.ts index 919ad93fc..006657676 100644 --- a/bindings/dist/types/QuorumCertificate.d.ts +++ b/bindings/dist/types/QuorumCertificate.d.ts @@ -6,6 +6,8 @@ import type { ValidatorSignature } from "./ValidatorSignature"; export interface QuorumCertificate { qc_id: string; block_id: string; + header_hash: string; + parent_id: string; block_height: NodeHeight; epoch: Epoch; shard_group: ShardGroup; diff --git a/bindings/dist/types/validator-node-client/GetShardKeyRequest.d.ts b/bindings/dist/types/validator-node-client/GetShardKeyRequest.d.ts index 15a627638..73c61a3ff 100644 --- a/bindings/dist/types/validator-node-client/GetShardKeyRequest.d.ts +++ b/bindings/dist/types/validator-node-client/GetShardKeyRequest.d.ts @@ -1,4 +1,5 @@ +import type { Epoch } from "../Epoch"; export interface GetShardKeyRequest { - height: number; + epoch: Epoch; public_key: string; } diff --git a/bindings/dist/types/validator-node-client/GetShardKeyRequest.js b/bindings/dist/types/validator-node-client/GetShardKeyRequest.js index e5b481d1e..cb0ff5c3b 100644 --- a/bindings/dist/types/validator-node-client/GetShardKeyRequest.js +++ b/bindings/dist/types/validator-node-client/GetShardKeyRequest.js @@ -1,2 +1 @@ -// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. export {}; diff --git a/bindings/src/types/Command.ts b/bindings/src/types/Command.ts index 8b2b61b23..9436e5fee 100644 --- a/bindings/src/types/Command.ts +++ b/bindings/src/types/Command.ts @@ -1,4 +1,5 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { EvictNodeAtom } from "./EvictNodeAtom"; import type { ForeignProposalAtom } from "./ForeignProposalAtom"; import type { MintConfidentialOutputAtom } from "./MintConfidentialOutputAtom"; import type { ResumeNodeAtom } from "./ResumeNodeAtom"; @@ -18,4 +19,5 @@ export type Command = | { MintConfidentialOutput: MintConfidentialOutputAtom } | { SuspendNode: SuspendNodeAtom } | { ResumeNode: ResumeNodeAtom } + | { EvictNode: EvictNodeAtom } | "EndEpoch"; diff --git a/bindings/src/types/MintConfidentialOutputAtom.ts b/bindings/src/types/MintConfidentialOutputAtom.ts index 1ac1f564d..ac4c69036 100644 --- a/bindings/src/types/MintConfidentialOutputAtom.ts +++ b/bindings/src/types/MintConfidentialOutputAtom.ts @@ -1,6 +1,6 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. -import type { SubstateId } from "./SubstateId"; +import type { UnclaimedConfidentialOutputAddress } from "./UnclaimedConfidentialOutputAddress"; export interface MintConfidentialOutputAtom { - substate_id: SubstateId; + commitment: UnclaimedConfidentialOutputAddress; } diff --git a/bindings/src/types/QuorumCertificate.ts b/bindings/src/types/QuorumCertificate.ts index 122875a06..fad4c4cc4 100644 --- a/bindings/src/types/QuorumCertificate.ts +++ b/bindings/src/types/QuorumCertificate.ts @@ -8,6 +8,8 @@ import type { ValidatorSignature } from "./ValidatorSignature"; export interface QuorumCertificate { qc_id: string; block_id: string; + header_hash: string; + parent_id: string; block_height: NodeHeight; epoch: Epoch; shard_group: ShardGroup; diff --git a/bindings/src/types/validator-node-client/GetShardKeyRequest.ts b/bindings/src/types/validator-node-client/GetShardKeyRequest.ts index 4461c5aed..438386e2a 100644 --- a/bindings/src/types/validator-node-client/GetShardKeyRequest.ts +++ b/bindings/src/types/validator-node-client/GetShardKeyRequest.ts @@ -1,6 +1,7 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { Epoch } from "../Epoch"; export interface GetShardKeyRequest { - height: number; + epoch: Epoch; public_key: string; } diff --git a/clients/base_node_client/src/grpc.rs b/clients/base_node_client/src/grpc.rs index 316a37bd4..c2a2e8240 100644 --- a/clients/base_node_client/src/grpc.rs +++ b/clients/base_node_client/src/grpc.rs @@ -22,20 +22,17 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::convert::TryInto; - use async_trait::async_trait; use log::*; -use minotari_app_grpc::tari_rpc::{ - self as grpc, - GetShardKeyRequest, - GetValidatorNodeChangesRequest, - ValidatorNodeChange, -}; +use minotari_app_grpc::tari_rpc::{self as grpc, GetShardKeyRequest, GetValidatorNodeChangesRequest}; use minotari_node_grpc_client::BaseNodeGrpcClient; use tari_common_types::types::{FixedHash, PublicKey}; -use tari_core::{blocks::BlockHeader, transactions::transaction_components::CodeTemplateRegistration}; -use tari_dan_common_types::SubstateAddress; +use tari_core::{ + base_node::comms_interface::ValidatorNodeChange, + blocks::BlockHeader, + transactions::transaction_components::CodeTemplateRegistration, +}; +use tari_dan_common_types::{Epoch, SubstateAddress}; use tari_utilities::ByteArray; use url::Url; @@ -105,6 +102,14 @@ impl BaseNodeClient for GrpcBaseNodeClient { Ok(()) } + async fn get_network(&mut self) -> Result { + let inner = self.connection().await?; + let request = grpc::Empty {}; + let result = inner.get_version(request).await?.into_inner(); + u8::try_from(result.network) + .map_err(|_| BaseNodeClientError::InvalidPeerMessage(format!("Invalid network byte {}", result.network))) + } + async fn get_tip_info(&mut self) -> Result { let inner = self.connection().await?; let request = grpc::Empty {}; @@ -122,24 +127,27 @@ impl BaseNodeClient for GrpcBaseNodeClient { async fn get_validator_node_changes( &mut self, - start_height: u64, - end_height: u64, + epoch: Epoch, sidechain_id: Option<&PublicKey>, ) -> Result, BaseNodeClientError> { let client = self.connection().await?; let result = client .get_validator_node_changes(GetValidatorNodeChangesRequest { - start_height, - end_height, - sidechain_id: match sidechain_id { - None => vec![], - Some(sidechain_id) => sidechain_id.to_vec(), - }, + epoch: epoch.as_u64(), + sidechain_id: sidechain_id.map(|id| id.as_bytes().to_vec()).unwrap_or_default(), }) .await? .into_inner(); - Ok(result.changes) + let changes = result + .changes + .into_iter() + .map(TryInto::try_into) + .collect::>() + .map_err(|err| { + BaseNodeClientError::InvalidPeerMessage(format!("Error converting validator node changes: {}", err)) + })?; + Ok(changes) } async fn get_validator_nodes(&mut self, height: u64) -> Result, BaseNodeClientError> { @@ -201,23 +209,22 @@ impl BaseNodeClient for GrpcBaseNodeClient { async fn get_shard_key( &mut self, - height: u64, + epoch: Epoch, public_key: &PublicKey, ) -> Result, BaseNodeClientError> { let inner = self.connection().await?; let request = GetShardKeyRequest { - height, + epoch: epoch.as_u64(), public_key: public_key.as_bytes().to_vec(), }; - let result = inner.get_shard_key(request).await?.into_inner(); - if result.shard_key.is_empty() { - Ok(None) - } else { - // The SubstateAddress type has 4 extra bytes for the version, this is disregarded for validator node shard - // key. - // TODO: separate type for validator node shard key - let hash = FixedHash::try_from(result.shard_key.as_slice())?; - Ok(Some(SubstateAddress::from_hash_and_version(hash, 0))) + match inner.get_shard_key(request).await { + Ok(result) => { + let result = result.into_inner(); + let hash = FixedHash::try_from(result.shard_key.as_slice())?; + Ok(Some(SubstateAddress::from_hash_and_version(hash, 0))) + }, + Err(status) if matches!(status.code(), tonic::Code::NotFound) => Ok(None), + Err(e) => Err(e.into()), } } diff --git a/clients/base_node_client/src/traits.rs b/clients/base_node_client/src/traits.rs index 020177b5c..4ba182d71 100644 --- a/clients/base_node_client/src/traits.rs +++ b/clients/base_node_client/src/traits.rs @@ -2,10 +2,13 @@ // SPDX-License-Identifier: BSD-3-Clause use async_trait::async_trait; -use minotari_app_grpc::tari_rpc::ValidatorNodeChange; use tari_common_types::types::{FixedHash, PublicKey}; -use tari_core::{blocks::BlockHeader, transactions::transaction_components::CodeTemplateRegistration}; -use tari_dan_common_types::SubstateAddress; +use tari_core::{ + base_node::comms_interface::ValidatorNodeChange, + blocks::BlockHeader, + transactions::transaction_components::CodeTemplateRegistration, +}; +use tari_dan_common_types::{Epoch, SubstateAddress}; use crate::{ error::BaseNodeClientError, @@ -15,17 +18,17 @@ use crate::{ #[async_trait] pub trait BaseNodeClient: Send + Sync + Clone { async fn test_connection(&mut self) -> Result<(), BaseNodeClientError>; + async fn get_network(&mut self) -> Result; async fn get_tip_info(&mut self) -> Result; async fn get_validator_node_changes( &mut self, - start_height: u64, - end_height: u64, + epoch: Epoch, sidechain_id: Option<&PublicKey>, ) -> Result, BaseNodeClientError>; async fn get_validator_nodes(&mut self, height: u64) -> Result, BaseNodeClientError>; async fn get_shard_key( &mut self, - height: u64, + epoch: Epoch, public_key: &PublicKey, ) -> Result, BaseNodeClientError>; async fn get_template_registrations( diff --git a/clients/validator_node_client/src/lib.rs b/clients/validator_node_client/src/lib.rs index b106492b7..dc7a8482c 100644 --- a/clients/validator_node_client/src/lib.rs +++ b/clients/validator_node_client/src/lib.rs @@ -67,6 +67,10 @@ impl ValidatorNodeClient { self.send_request("get_epoch_manager_stats", json!({})).await } + pub async fn get_consensus_status(&mut self) -> Result { + self.send_request("get_consensus_status", json!({})).await + } + pub async fn get_active_templates( &mut self, request: GetTemplatesRequest, diff --git a/clients/validator_node_client/src/types.rs b/clients/validator_node_client/src/types.rs index 4cdbba6bd..12b218729 100644 --- a/clients/validator_node_client/src/types.rs +++ b/clients/validator_node_client/src/types.rs @@ -30,6 +30,7 @@ use tari_dan_common_types::{ committee::{Committee, CommitteeInfo}, shard::Shard, Epoch, + NodeHeight, PeerAddress, SubstateAddress, }; @@ -258,6 +259,59 @@ pub struct GetAllVnsResponse { pub vns: Vec, } +#[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr( + feature = "ts", + derive(TS), + ts(export, export_to = "../../bindings/src/types/validator-node-client/") +)] +pub struct GetBaseLayerEpochChangesRequest { + pub start_epoch: Epoch, + pub end_epoch: Epoch, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr( + feature = "ts", + derive(TS), + ts(export, export_to = "../../bindings/src/types/validator-node-client/") +)] +pub struct GetBaseLayerEpochChangesResponse { + pub changes: Vec<(Epoch, Vec)>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr( + feature = "ts", + derive(TS), + ts(export, export_to = "../../bindings/src/types/validator-node-client/") +)] +pub struct GetConsensusStatusResponse { + pub epoch: Epoch, + pub height: NodeHeight, + pub state: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr( + feature = "ts", + derive(TS), + ts(export, export_to = "../../bindings/src/types/validator-node-client/") +)] +/// Represents a validator node state change +pub enum ValidatorNodeChange { + Add { + #[cfg_attr(feature = "ts", ts(type = "string"))] + public_key: PublicKey, + activation_epoch: Epoch, + minimum_value_promise: u64, + }, + Remove { + #[cfg_attr(feature = "ts", ts(type = "string"))] + public_key: PublicKey, + }, +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[cfg_attr( feature = "ts", @@ -536,8 +590,7 @@ impl From> for ValidatorNode { ts(export, export_to = "../../bindings/src/types/validator-node-client/") )] pub struct GetShardKeyRequest { - #[cfg_attr(feature = "ts", ts(type = "number"))] - pub height: u64, + pub epoch: Epoch, #[cfg_attr(feature = "ts", ts(type = "string"))] pub public_key: PublicKey, } diff --git a/dan_layer/common_types/Cargo.toml b/dan_layer/common_types/Cargo.toml index a7143f771..853ce1cc8 100644 --- a/dan_layer/common_types/Cargo.toml +++ b/dan_layer/common_types/Cargo.toml @@ -22,10 +22,11 @@ libp2p-identity = { workspace = true, features = [ "peerid", ] } -blake2 = { workspace = true } +borsh = { workspace = true } ethnum = { workspace = true } newtype-ops = { workspace = true } rand = { workspace = true } +indexmap = { workspace = true } prost = { workspace = true } prost-types = { workspace = true } serde = { workspace = true, default-features = true } diff --git a/dan_layer/common_types/src/borsh.rs b/dan_layer/common_types/src/borsh.rs new file mode 100644 index 000000000..c42b82e37 --- /dev/null +++ b/dan_layer/common_types/src/borsh.rs @@ -0,0 +1,24 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +// We cannot currently use the "borsh" feature in indexmap with the "derive" feature of borsh as this creates a cyclic +// dependency. +// See https://github.com/bkchr/proc-macro-crate/issues/37#issuecomment-2476386861 for details. +pub mod indexmap { + + use borsh::BorshSerialize; + use indexmap::IndexMap; + + pub fn serialize( + obj: &IndexMap, + writer: &mut W, + ) -> Result<(), borsh::io::Error> { + let len = obj.len() as u64; + len.serialize(writer)?; + for (key, value) in obj { + key.serialize(writer)?; + value.serialize(writer)?; + } + Ok(()) + } +} diff --git a/dan_layer/common_types/src/bytes.rs b/dan_layer/common_types/src/bytes.rs index e6e5b374b..3d4ad0557 100644 --- a/dan_layer/common_types/src/bytes.rs +++ b/dan_layer/common_types/src/bytes.rs @@ -22,9 +22,10 @@ use std::{cmp, convert::TryFrom, ops::Deref}; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; -#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Default, Deserialize, Serialize)] +#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Default, Deserialize, Serialize, BorshSerialize)] pub struct MaxSizeBytes { inner: Vec, } diff --git a/dan_layer/common_types/src/epoch.rs b/dan_layer/common_types/src/epoch.rs index ca504a6b1..77a26d600 100644 --- a/dan_layer/common_types/src/epoch.rs +++ b/dan_layer/common_types/src/epoch.rs @@ -22,12 +22,13 @@ use std::fmt::Display; +use borsh::BorshSerialize; use newtype_ops::newtype_ops; use serde::{Deserialize, Serialize}; #[cfg(feature = "ts")] use ts_rs::TS; -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize, BorshSerialize)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] pub struct Epoch(#[cfg_attr(feature = "ts", ts(type = "number"))] pub u64); diff --git a/dan_layer/common_types/src/extra_data.rs b/dan_layer/common_types/src/extra_data.rs index 19cc36e8a..49751b597 100644 --- a/dan_layer/common_types/src/extra_data.rs +++ b/dan_layer/common_types/src/extra_data.rs @@ -22,6 +22,7 @@ use std::collections::BTreeMap; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; #[cfg(feature = "ts")] use ts_rs::TS; @@ -32,12 +33,13 @@ const MAX_DATA_SIZE: usize = 256; type ExtraFieldValue = MaxSizeBytes; #[repr(u8)] -#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize, BorshSerialize)] +#[borsh(use_discriminant = true)] pub enum ExtraFieldKey { SidechainId = 0x00, } -#[derive(Clone, Debug, Deserialize, Serialize, Default)] +#[derive(Clone, Debug, Deserialize, Serialize, Default, BorshSerialize)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] pub struct ExtraData(#[cfg_attr(feature = "ts", ts(type = "string"))] BTreeMap); diff --git a/dan_layer/common_types/src/hasher.rs b/dan_layer/common_types/src/hasher.rs index 43dbc23b9..a0c0d931b 100644 --- a/dan_layer/common_types/src/hasher.rs +++ b/dan_layer/common_types/src/hasher.rs @@ -1,79 +1,84 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use std::{io, io::Write}; +pub use tari_hashing::layer2::TariConsensusHasher; -use blake2::{ - digest::{consts::U32, Digest}, - Blake2b, -}; -use serde::Serialize; -use tari_bor::encode_into_std_writer; -use tari_common_types::types::FixedHash; -use tari_crypto::hashing::DomainSeparation; - -/// Create a new `TariHasher` using a given domain-separated hasher and label. -/// This is just a wrapper, -pub fn tari_hasher(label: &'static str) -> TariHasher { - TariHasher::new_with_label::(label) -} - -/// A domain-separated hasher that uses CBOR internally to ensure hashing is canonical. -/// -/// The hasher produces 32 bytes of output using the `Blake2b` hash function. -/// -/// This assumes that any input type supports `Serialize` canonically; that is, two different values of the same type -/// must serialize distinctly. -#[derive(Debug, Clone)] -pub struct TariHasher { - hasher: Blake2b, -} - -impl TariHasher { - pub fn new_with_label(label: &'static str) -> Self { - let mut hasher = Blake2b::::new(); - D::add_domain_separation_tag(&mut hasher, label); - Self { hasher } - } - - pub fn update(&mut self, data: &T) { - // Update the hasher using the CBOR encoding of the input, which is assumed to be canonical. - // - // Binary encoding does not make any contract to say that if the writer is infallible (as it is here) then - // encoding in infallible. However this should be the case. Since it is very unergonomic to return an - // error in hash chain functions, and therefore all usages of the hasher, we assume all types implement - // infallible encoding. - encode_into_std_writer(data, &mut self.hash_writer()).expect("encoding failed") - } - - pub fn chain(mut self, data: &T) -> Self { - self.update(data); - self - } - - pub fn digest(self, data: &T) -> FixedHash { - self.chain(data).result() - } - - pub fn result(self) -> FixedHash { - self.finalize_into_array().into() - } - - pub fn finalize_into_array(self) -> [u8; 32] { - self.hasher.finalize().into() - } - - fn hash_writer(&mut self) -> impl Write + '_ { - struct HashWriter<'a>(&'a mut Blake2b); - impl Write for HashWriter<'_> { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.update(buf); - Ok(buf.len()) - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } - } - HashWriter(&mut self.hasher) - } -} +// use std::{io, io::Write}; +// +// use blake2::{ +// digest::{consts::U32, Digest}, +// Blake2b, +// }; +// use serde::Serialize; +// use tari_bor::encode_into_std_writer; +// use tari_common_types::types::FixedHash; +// use tari_crypto::hashing::DomainSeparation; +// use tari_hashing::DomainSeparatedBorshHasher; +// +// /// Create a new `TariHasher` using a given domain-separated hasher and label. +// /// This is just a wrapper, +// pub fn tari_hasher(label: &'static str) -> TariDomainHasher { +// TariDomainHasher::new_with_label(label) +// } +// +// pub type TariDomainHasher = DomainSeparatedBorshHasher, M>; +// +// /// A domain-separated hasher that uses CBOR internally to ensure hashing is canonical. +// /// +// /// The hasher produces 32 bytes of output using the `Blake2b` hash function. +// /// +// /// This assumes that any input type supports `Serialize` canonically; that is, two different values of the same type +// /// must serialize distinctly. +// #[derive(Debug, Clone)] +// pub struct TariHasher { +// hasher: Blake2b, +// } +// +// impl TariHasher { +// pub fn new_with_label(label: &'static str) -> Self { +// let mut hasher = Blake2b::::new(); +// D::add_domain_separation_tag(&mut hasher, label); +// Self { hasher } +// } +// +// pub fn update(&mut self, data: &T) { +// // Update the hasher using the CBOR encoding of the input, which is assumed to be canonical. +// // +// // Binary encoding does not make any contract to say that if the writer is infallible (as it is here) then +// // encoding in infallible. However this should be the case. Since it is very unergonomic to return an +// // error in hash chain functions, and therefore all usages of the hasher, we assume all types implement +// // infallible encoding. +// encode_into_std_writer(data, &mut self.hash_writer()).expect("encoding failed") +// } +// +// pub fn chain(mut self, data: &T) -> Self { +// self.update(data); +// self +// } +// +// pub fn digest(self, data: &T) -> FixedHash { +// self.chain(data).result() +// } +// +// pub fn result(self) -> FixedHash { +// self.finalize_into_array().into() +// } +// +// pub fn finalize_into_array(self) -> [u8; 32] { +// self.hasher.finalize().into() +// } +// +// fn hash_writer(&mut self) -> impl Write + '_ { +// struct HashWriter<'a>(&'a mut Blake2b); +// impl Write for HashWriter<'_> { +// fn write(&mut self, buf: &[u8]) -> io::Result { +// self.0.update(buf); +// Ok(buf.len()) +// } +// +// fn flush(&mut self) -> io::Result<()> { +// Ok(()) +// } +// } +// HashWriter(&mut self.hasher) +// } +// } diff --git a/dan_layer/common_types/src/hashing.rs b/dan_layer/common_types/src/hashing.rs index 6a78dbd19..1fa2f5994 100644 --- a/dan_layer/common_types/src/hashing.rs +++ b/dan_layer/common_types/src/hashing.rs @@ -22,45 +22,64 @@ // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use blake2::{digest::consts::U32, Blake2b}; -use tari_crypto::{hash_domain, hashing::DomainSeparatedHasher}; -use tari_hashing::ValidatorNodeMerkleHashDomain; +use tari_hashing::layer2::ValidatorNodeBmtHasherBlake2b; +pub use tari_hashing::layer2::{ + block_hasher, + command_hasher, + extra_data_hasher, + foreign_indexes_hasher, + quorum_certificate_hasher, + vote_signature_hasher, +}; use tari_mmr::{BalancedBinaryMerkleProof, BalancedBinaryMerkleTree, MergedBalancedBinaryMerkleProof}; -use crate::hasher::{tari_hasher, TariHasher}; - -hash_domain!(TariDanConsensusHashDomain, "com.tari.dan.consensus", 0); - -pub fn block_hasher() -> TariHasher { - dan_hasher("Block") -} - -pub fn command_hasher() -> TariHasher { - dan_hasher("Command") -} - -pub fn quorum_certificate_hasher() -> TariHasher { - dan_hasher("QuorumCertificate") -} - -pub fn pledge_hasher() -> TariHasher { - dan_hasher("Pledges") -} - -pub fn vote_hasher() -> TariHasher { - dan_hasher("Vote") -} - -pub fn vote_signature_hasher() -> TariHasher { - dan_hasher("VoteSignature") -} - -fn dan_hasher(label: &'static str) -> TariHasher { - tari_hasher::(label) -} - -pub type ValidatorNodeBmtHasherBlake2b = DomainSeparatedHasher, ValidatorNodeMerkleHashDomain>; +// use blake2::{digest::consts::U32, Blake2b}; +// use tari_crypto::{hash_domain, hashing::DomainSeparatedHasher}; +// use tari_hashing::ValidatorNodeMerkleHashDomain; +// use tari_mmr::{BalancedBinaryMerkleProof, BalancedBinaryMerkleTree, MergedBalancedBinaryMerkleProof}; +// +// use crate::hasher::{tari_hasher, TariDomainHasher}; +// +// hash_domain!(TariDanConsensusHashDomain, "com.tari.dan.consensus", 0); +// +// pub type TariHasher = TariDomainHasher; +// +// pub fn block_hasher() -> TariHasher { +// dan_hasher("Block") +// } +// pub fn extra_data_hasher() -> TariHasher { +// dan_hasher("ExtraData") +// } +// +// pub fn foreign_indexes_hasher() -> TariHasher { +// dan_hasher("ForeignIndexes") +// } +// +// pub fn command_hasher() -> TariHasher { +// dan_hasher("Command") +// } +// +// pub fn quorum_certificate_hasher() -> TariHasher { +// dan_hasher("QuorumCertificate") +// } +// +// pub fn pledge_hasher() -> TariHasher { +// dan_hasher("Pledges") +// } +// +// pub fn vote_hasher() -> TariHasher { +// dan_hasher("Vote") +// } +// +// pub fn vote_signature_hasher() -> TariHasher { +// dan_hasher("VoteSignature") +// } +// +// fn dan_hasher(label: &'static str) -> TariHasher { +// tari_hasher::(label) +// } +// +// pub type ValidatorNodeBmtHasherBlake2b = DomainSeparatedHasher, ValidatorNodeMerkleHashDomain>; pub type ValidatorNodeBalancedMerkleTree = BalancedBinaryMerkleTree; pub type ValidatorNodeMerkleProof = BalancedBinaryMerkleProof; pub type MergedValidatorNodeMerkleProof = MergedBalancedBinaryMerkleProof; diff --git a/dan_layer/common_types/src/layer_one_transaction.rs b/dan_layer/common_types/src/layer_one_transaction.rs new file mode 100644 index 000000000..be287114e --- /dev/null +++ b/dan_layer/common_types/src/layer_one_transaction.rs @@ -0,0 +1,25 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use std::fmt::Display; + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LayerOneTransactionDef { + pub proof_type: LayerOnePayloadType, + pub payload: T, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub enum LayerOnePayloadType { + EvictionProof, +} + +impl Display for LayerOnePayloadType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + LayerOnePayloadType::EvictionProof => write!(f, "EvictionProof"), + } + } +} diff --git a/dan_layer/common_types/src/lib.rs b/dan_layer/common_types/src/lib.rs index e99c066a8..6f4fa7002 100644 --- a/dan_layer/common_types/src/lib.rs +++ b/dan_layer/common_types/src/lib.rs @@ -18,6 +18,7 @@ pub use extra_data::{ExtraData, ExtraFieldKey}; pub mod committee; pub mod hasher; pub mod hashing; +pub mod option; pub mod optional; mod node_height; @@ -51,6 +52,8 @@ mod versioned_substate_id; pub use versioned_substate_id::*; +pub mod borsh; mod lock_intent; pub use lock_intent::*; +pub mod layer_one_transaction; diff --git a/dan_layer/common_types/src/lock_intent.rs b/dan_layer/common_types/src/lock_intent.rs index 47aeb579b..ad302cb75 100644 --- a/dan_layer/common_types/src/lock_intent.rs +++ b/dan_layer/common_types/src/lock_intent.rs @@ -3,6 +3,7 @@ use std::{fmt, str::FromStr}; +use borsh::BorshSerialize; use tari_bor::{Deserialize, Serialize}; use tari_engine_types::substate::SubstateId; @@ -26,7 +27,7 @@ impl ToSubstateAddress for T { } /// Substate lock flags -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, BorshSerialize)] #[cfg_attr( feature = "ts", derive(ts_rs::TS), diff --git a/dan_layer/common_types/src/node_height.rs b/dan_layer/common_types/src/node_height.rs index c44a59f24..262f160ab 100644 --- a/dan_layer/common_types/src/node_height.rs +++ b/dan_layer/common_types/src/node_height.rs @@ -6,12 +6,14 @@ use std::{ ops::{Add, AddAssign, Sub}, }; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; #[cfg(feature = "ts")] use ts_rs::TS; -#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize)] +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize, BorshSerialize)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[serde(transparent)] pub struct NodeHeight(#[cfg_attr(feature = "ts", ts(type = "number"))] pub u64); impl NodeHeight { diff --git a/dan_layer/common_types/src/option.rs b/dan_layer/common_types/src/option.rs new file mode 100644 index 000000000..12cbfaaf5 --- /dev/null +++ b/dan_layer/common_types/src/option.rs @@ -0,0 +1,133 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use std::{ + collections::{BTreeSet, HashSet}, + fmt, + fmt::{Debug, Display}, +}; + +/// Implements a method that returns an allocation-free Display impl for container types such as Option, Vec, [T], +/// HashSet, BTreeSet. +/// +/// # Example +/// ```rust +/// use tari_dan_common_types::option::DisplayContainer; +/// +/// let some_value = Some(42); +/// let none_value: Option = None; +/// +/// // The usual way to do this is verbose and has a heap allocation +/// let _bad = println!( +/// "answer: {}", +/// some_value +/// .as_ref() +/// // Heap allocation +/// .map(|v| v.to_string()) +/// .unwrap_or_else(|| "None".to_string()) +/// ); +/// +/// assert_eq!(format!("answer: {}", some_value.display()), "answer: 42"); +/// assert_eq!(format!("answer: {}", none_value.display()), "answer: None"); +/// assert_eq!( +/// format!("list: {:.2}", vec![1.01f32, 2f32, 3f32].display()), +/// "list: 1.01, 2.00, 3.00" +/// ); +/// ``` +pub trait DisplayContainer { + type Item: ?Sized; + fn display(&self) -> DisplayCont<&'_ Self::Item>; +} + +#[derive(Debug, Clone, Copy)] +pub struct DisplayCont { + value: T, +} + +impl Display for DisplayCont<&'_ Option> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.value { + Some(value) => Display::fmt(value, f), + None => write!(f, "None"), + } + } +} + +impl Display for DisplayCont<&'_ [T]> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let len = self.value.len(); + for (i, item) in self.value.iter().enumerate() { + Display::fmt(item, f)?; + if i < len - 1 { + write!(f, ", ")?; + } + } + Ok(()) + } +} + +impl Display for DisplayCont<&'_ HashSet> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let len = self.value.len(); + for (i, item) in self.value.iter().enumerate() { + Display::fmt(item, f)?; + if i < len - 1 { + write!(f, ", ")?; + } + } + Ok(()) + } +} + +impl Display for DisplayCont<&'_ BTreeSet> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let len = self.value.len(); + for (i, item) in self.value.iter().enumerate() { + Display::fmt(item, f)?; + if i < len - 1 { + write!(f, ", ")?; + } + } + Ok(()) + } +} + +impl DisplayContainer for Option { + type Item = Self; + + fn display(&self) -> DisplayCont<&'_ Self> { + DisplayCont { value: self } + } +} + +impl DisplayContainer for [T] { + type Item = Self; + + fn display(&self) -> DisplayCont<&'_ Self> { + DisplayCont { value: self } + } +} + +impl DisplayContainer for Vec { + type Item = [T]; + + fn display(&self) -> DisplayCont<&'_ [T]> { + (*self.as_slice()).display() + } +} + +impl DisplayContainer for HashSet { + type Item = Self; + + fn display(&self) -> DisplayCont<&'_ Self> { + DisplayCont { value: self } + } +} + +impl DisplayContainer for BTreeSet { + type Item = Self; + + fn display(&self) -> DisplayCont<&'_ Self> { + DisplayCont { value: self } + } +} diff --git a/dan_layer/common_types/src/shard.rs b/dan_layer/common_types/src/shard.rs index 63bba9167..c66864771 100644 --- a/dan_layer/common_types/src/shard.rs +++ b/dan_layer/common_types/src/shard.rs @@ -3,17 +3,18 @@ use std::{fmt::Display, ops::RangeInclusive}; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; use crate::{uint::U256, NumPreshards, SubstateAddress}; -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -#[serde(transparent)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, BorshSerialize)] #[cfg_attr( feature = "ts", derive(ts_rs::TS), ts(export, export_to = "../../bindings/src/types/") )] +#[serde(transparent)] pub struct Shard(#[cfg_attr(feature = "ts", ts(type = "number"))] u32); impl Shard { diff --git a/dan_layer/common_types/src/shard_group.rs b/dan_layer/common_types/src/shard_group.rs index e1371a0ec..a4cecc3d7 100644 --- a/dan_layer/common_types/src/shard_group.rs +++ b/dan_layer/common_types/src/shard_group.rs @@ -7,11 +7,12 @@ use std::{ ops::RangeInclusive, }; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; use crate::{shard::Shard, uint::U256, NumPreshards, SubstateAddress}; -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, BorshSerialize)] #[cfg_attr( feature = "ts", derive(ts_rs::TS), @@ -91,6 +92,10 @@ impl ShardGroup { self.as_range().contains(shard) } + pub fn overlaps_shard_group(&self, other: &ShardGroup) -> bool { + self.start <= other.end_inclusive && self.end_inclusive >= other.start + } + pub fn as_range(&self) -> RangeInclusive { self.start..=self.end_inclusive } diff --git a/dan_layer/common_types/src/substate_address.rs b/dan_layer/common_types/src/substate_address.rs index 650f42b4b..6a780b57a 100644 --- a/dan_layer/common_types/src/substate_address.rs +++ b/dan_layer/common_types/src/substate_address.rs @@ -9,6 +9,7 @@ use std::{ str::FromStr, }; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; use tari_common_types::types::{FixedHash, FixedHashSizeError}; use tari_crypto::tari_utilities::{ @@ -24,7 +25,7 @@ pub trait ToSubstateAddress { fn to_substate_address(&self) -> SubstateAddress; } -#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize, BorshSerialize)] #[cfg_attr( feature = "ts", derive(ts_rs::TS), diff --git a/dan_layer/common_types/src/versioned_substate_id.rs b/dan_layer/common_types/src/versioned_substate_id.rs index 702a030f4..20fcdd3f3 100644 --- a/dan_layer/common_types/src/versioned_substate_id.rs +++ b/dan_layer/common_types/src/versioned_substate_id.rs @@ -172,8 +172,11 @@ pub struct VersionedSubstateId { } impl VersionedSubstateId { - pub fn new(substate_id: SubstateId, version: u32) -> Self { - Self { substate_id, version } + pub fn new>(substate_id: T, version: u32) -> Self { + Self { + substate_id: substate_id.into(), + version, + } } pub fn substate_id(&self) -> &SubstateId { diff --git a/dan_layer/consensus/Cargo.toml b/dan_layer/consensus/Cargo.toml index 57337877e..fe18f8a55 100644 --- a/dan_layer/consensus/Cargo.toml +++ b/dan_layer/consensus/Cargo.toml @@ -15,6 +15,7 @@ tari_engine_types = { workspace = true } tari_transaction = { workspace = true } tari_epoch_manager = { workspace = true } tari_state_tree = { workspace = true } +tari_sidechain = { workspace = true } # Used for PublicKey and Signature and Network enum tari_common = { workspace = true } @@ -27,4 +28,7 @@ async-trait = { workspace = true } log = { workspace = true } serde = { workspace = true, default-features = true } thiserror = { workspace = true } -tokio = { workspace = true, default-features = false, features = ["sync"] } +tokio = { workspace = true, default-features = false, features = ["sync", "time", "macros", "rt"] } + +# REMOvE +serde_json = { workspace = true } diff --git a/dan_layer/consensus/src/block_validations.rs b/dan_layer/consensus/src/block_validations.rs index 8d31c4ed9..51403db1e 100644 --- a/dan_layer/consensus/src/block_validations.rs +++ b/dan_layer/consensus/src/block_validations.rs @@ -1,11 +1,13 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause +use log::{debug, warn}; use tari_common::configuration::Network; use tari_crypto::{ristretto::RistrettoPublicKey, tari_utilities::ByteArray}; use tari_dan_common_types::{ committee::{Committee, CommitteeInfo}, DerivableFromPublicKey, + Epoch, ExtraFieldKey, }; use tari_dan_storage::consensus_models::Block; @@ -16,6 +18,29 @@ use crate::{ traits::{ConsensusSpec, LeaderStrategy, VoteSignatureService}, }; +const LOG_TARGET: &str = "tari::dan::consensus::hotstuff::block_validations"; +pub fn check_local_proposal( + current_epoch: Epoch, + block: &Block, + committee_info: &CommitteeInfo, + committee_for_block: &Committee, + vote_signing_service: &TConsensusSpec::SignatureService, + leader_strategy: &TConsensusSpec::LeaderStrategy, + config: &HotstuffConfig, +) -> Result<(), HotStuffError> { + check_proposal::( + block, + committee_info, + committee_for_block, + vote_signing_service, + leader_strategy, + config, + )?; + // This proposal is valid, if it is for an epoch ahead of us, we need to sync + check_current_epoch(block, current_epoch)?; + Ok(()) +} + pub fn check_proposal( block: &Block, committee_info: &CommitteeInfo, @@ -27,19 +52,39 @@ pub fn check_proposal( // TODO: in order to do the base layer block has validation, we need to ensure that we have synced to the tip. // If not, we need some strategy for "parking" the blocks until we are at least at the provided hash or the // tip. Without this, the check has a race condition between the base layer scanner and consensus. + // A simpler suggestion is to use the BL epoch block which does not change within epochs // check_base_layer_block_hash::(block, epoch_manager, config).await?; check_network(block, config.network)?; + if block.is_genesis() { + return Err(ProposalValidationError::ProposingGenesisBlock { + proposed_by: block.proposed_by().to_string(), + hash: *block.id(), + } + .into()); + } check_sidechain_id(block, config)?; if block.is_dummy() { check_dummy(block)?; } - check_hash_and_height(block)?; check_proposed_by_leader(leader_strategy, committee_for_block, block)?; check_signature(block)?; check_quorum_certificate::(block, committee_for_block, committee_info, vote_signing_service)?; Ok(()) } +pub fn check_current_epoch(candidate_block: &Block, current_epoch: Epoch) -> Result<(), ProposalValidationError> { + if candidate_block.epoch() > current_epoch { + warn!(target: LOG_TARGET, "⚠️ Proposal for future epoch {} received. Current epoch is {}", candidate_block.epoch(), current_epoch); + return Err(ProposalValidationError::FutureEpoch { + block_id: *candidate_block.id(), + current_epoch, + block_epoch: candidate_block.epoch(), + }); + } + + Ok(()) +} + pub fn check_dummy(candidate_block: &Block) -> Result<(), ProposalValidationError> { if candidate_block.signature().is_some() { return Err(ProposalValidationError::DummyBlockWithSignature { @@ -113,26 +158,6 @@ pub async fn check_base_layer_block_hash( Ok(()) } -pub fn check_hash_and_height(candidate_block: &Block) -> Result<(), ProposalValidationError> { - if candidate_block.is_genesis() { - return Err(ProposalValidationError::ProposingGenesisBlock { - proposed_by: candidate_block.proposed_by().to_string(), - hash: *candidate_block.id(), - }); - } - - let calculated_hash = candidate_block.calculate_hash().into(); - if calculated_hash != *candidate_block.id() { - return Err(ProposalValidationError::BlockIdMismatch { - proposed_by: candidate_block.proposed_by().to_string(), - block_id: *candidate_block.id(), - calculated_hash, - }); - } - - Ok(()) -} - pub fn check_proposed_by_leader>( leader_strategy: &TLeaderStrategy, local_committee: &Committee, @@ -164,6 +189,13 @@ pub fn check_signature(candidate_block: &Block) -> Result<(), ProposalValidation block_id: *candidate_block.id(), height: candidate_block.height(), })?; + debug!( + target: LOG_TARGET, + "Validating signature block_id={}, P={}, R={}", + candidate_block.id(), + candidate_block.proposed_by(), + validator_signature.get_public_nonce(), + ); if !validator_signature.verify(candidate_block.proposed_by(), candidate_block.id()) { return Err(ProposalValidationError::InvalidSignature { block_id: *candidate_block.id(), diff --git a/dan_layer/consensus/src/consensus_constants.rs b/dan_layer/consensus/src/consensus_constants.rs index 2bba87c46..69d727077 100644 --- a/dan_layer/consensus/src/consensus_constants.rs +++ b/dan_layer/consensus/src/consensus_constants.rs @@ -60,7 +60,7 @@ impl ConsensusConstants { num_preshards: NumPreshards::P256, pacemaker_block_time: Duration::from_secs(10), missed_proposal_suspend_threshold: 5, - missed_proposal_evict_threshold: 5, + missed_proposal_evict_threshold: 10, missed_proposal_recovery_threshold: 5, max_block_size: 500, fee_exhaust_divisor: 20, // 5% diff --git a/dan_layer/consensus/src/hotstuff/block_change_set.rs b/dan_layer/consensus/src/hotstuff/block_change_set.rs index 0b473e130..1ab74c8a4 100644 --- a/dan_layer/consensus/src/hotstuff/block_change_set.rs +++ b/dan_layer/consensus/src/hotstuff/block_change_set.rs @@ -10,7 +10,7 @@ use std::{ use indexmap::IndexMap; use log::*; use tari_common_types::types::PublicKey; -use tari_dan_common_types::{optional::Optional, shard::Shard, Epoch, ShardGroup}; +use tari_dan_common_types::{option::DisplayContainer, optional::Optional, shard::Shard, Epoch, ShardGroup}; use tari_dan_storage::{ consensus_models::{ Block, @@ -42,7 +42,7 @@ use tari_dan_storage::{ StateStoreWriteTransaction, StorageError, }; -use tari_engine_types::substate::SubstateId; +use tari_engine_types::{substate::SubstateId, template_models::UnclaimedConfidentialOutputAddress}; use tari_transaction::TransactionId; use crate::tracing::TraceTimer; @@ -55,7 +55,6 @@ const MEM_MAX_SUBSTATE_LOCK_SIZE: usize = 100000; const MEM_MAX_TRANSACTION_CHANGE_SIZE: usize = 1000; const MEM_MAX_PROPOSED_FOREIGN_PROPOSALS_SIZE: usize = 1000; const MEM_MAX_PROPOSED_UTXO_MINTS_SIZE: usize = 1000; -const MEM_MAX_SUSPEND_CHANGE_SIZE: usize = 10; #[derive(Debug, Clone)] pub struct BlockDecision { @@ -66,6 +65,7 @@ pub struct BlockDecision { pub finalized_transactions: Vec>, pub end_of_epoch: Option, pub high_qc: HighQc, + pub committed_blocks_with_evictions: Vec, } impl BlockDecision { @@ -78,15 +78,14 @@ impl BlockDecision { pub struct ProposedBlockChangeSet { block: LeafBlock, quorum_decision: Option, - block_diff: Vec, + substate_changes: Vec, state_tree_diffs: IndexMap, substate_locks: IndexMap>, transaction_changes: IndexMap, proposed_foreign_proposals: Vec, - proposed_utxo_mints: Vec, + proposed_utxo_mints: Vec, no_vote_reason: Option, - suspend_nodes: Vec, - resume_nodes: Vec, + evict_nodes: Vec, } impl ProposedBlockChangeSet { @@ -94,15 +93,14 @@ impl ProposedBlockChangeSet { Self { block, quorum_decision: None, - block_diff: Vec::new(), + substate_changes: Vec::new(), substate_locks: IndexMap::new(), transaction_changes: IndexMap::new(), state_tree_diffs: IndexMap::new(), proposed_foreign_proposals: Vec::new(), proposed_utxo_mints: Vec::new(), no_vote_reason: None, - suspend_nodes: Vec::new(), - resume_nodes: Vec::new(), + evict_nodes: Vec::new(), } } @@ -112,7 +110,6 @@ impl ProposedBlockChangeSet { } pub fn no_vote(&mut self, no_vote_reason: NoVoteReason) -> &mut Self { - self.clear(); self.no_vote_reason = Some(no_vote_reason); self } @@ -120,15 +117,15 @@ impl ProposedBlockChangeSet { pub fn clear(&mut self) { self.quorum_decision = None; - self.block_diff.clear(); - if self.block_diff.capacity() > MEM_MAX_BLOCK_DIFF_CHANGES { + self.substate_changes.clear(); + if self.substate_changes.capacity() > MEM_MAX_BLOCK_DIFF_CHANGES { debug!( target: LOG_TARGET, "Shrinking block_diff from {} to {}", - self.block_diff.capacity(), + self.substate_changes.capacity(), MEM_MAX_BLOCK_DIFF_CHANGES ); - self.block_diff.shrink_to(MEM_MAX_BLOCK_DIFF_CHANGES); + self.substate_changes.shrink_to(MEM_MAX_BLOCK_DIFF_CHANGES); } self.transaction_changes.clear(); if self.transaction_changes.capacity() > MEM_MAX_TRANSACTION_CHANGE_SIZE { @@ -181,10 +178,8 @@ impl ProposedBlockChangeSet { ); self.proposed_utxo_mints.shrink_to(MEM_MAX_PROPOSED_UTXO_MINTS_SIZE); } - self.suspend_nodes.clear(); - if self.suspend_nodes.capacity() > MEM_MAX_SUSPEND_CHANGE_SIZE { - self.suspend_nodes.shrink_to(MEM_MAX_SUSPEND_CHANGE_SIZE); - } + // evict_nodes is typically rare, so rather release all memory + self.evict_nodes = vec![]; self.no_vote_reason = None; } @@ -198,8 +193,8 @@ impl ProposedBlockChangeSet { self } - pub fn set_block_diff(&mut self, diff: Vec) -> &mut Self { - self.block_diff = diff; + pub fn set_substate_changes(&mut self, diff: Vec) -> &mut Self { + self.substate_changes = diff; self } @@ -217,7 +212,7 @@ impl ProposedBlockChangeSet { &self.proposed_foreign_proposals } - pub fn set_utxo_mint_proposed_in(&mut self, mint: SubstateId) -> &mut Self { + pub fn set_utxo_mint_proposed_in(&mut self, mint: UnclaimedConfidentialOutputAddress) -> &mut Self { self.proposed_utxo_mints.push(mint); self } @@ -228,14 +223,13 @@ impl ProposedBlockChangeSet { } } - pub fn add_suspend_node(&mut self, public_key: PublicKey) -> &mut Self { - self.suspend_nodes.push(public_key); + pub fn add_evict_node(&mut self, public_key: PublicKey) -> &mut Self { + self.evict_nodes.push(public_key); self } - pub fn add_resume_node(&mut self, public_key: PublicKey) -> &mut Self { - self.resume_nodes.push(public_key); - self + pub fn num_evicted_nodes_this_block(&self) -> usize { + self.evict_nodes.len() } #[allow(clippy::mutable_key_type)] @@ -345,7 +339,7 @@ impl ProposedBlockChangeSet { let _timer = TraceTimer::debug(LOG_TARGET, "ProposedBlockChangeSet::save"); // Store the block diff - BlockDiff::insert_record(tx, &self.block.block_id, &self.block_diff)?; + BlockDiff::insert_record(tx, &self.block.block_id, &self.substate_changes)?; // Store the tree diffs for each effected shard for (shard, diff) in &self.state_tree_diffs { @@ -391,18 +385,65 @@ impl ProposedBlockChangeSet { } for mint in &self.proposed_utxo_mints { - BurntUtxo::set_proposed_in_block(tx, mint, &self.block.block_id)? + BurntUtxo::set_proposed_in_block(tx, mint, &self.block.block_id)?; } - for node in &self.suspend_nodes { - ValidatorConsensusStats::suspend_node(tx, node, self.block.block_id)? + for node in &self.evict_nodes { + ValidatorConsensusStats::evict_node(tx, node, self.block.block_id)?; } - for node in &self.resume_nodes { - ValidatorConsensusStats::resume_node(tx, node, self.block.block_id)? + Ok(()) + } + + pub fn log_everything(&self) { + const LOG_TARGET: &str = "tari::dan::consensus::block_change_set::debug"; + debug!(target: LOG_TARGET, "❌ No vote: {}", self.no_vote_reason.display()); + let _timer = TraceTimer::debug(LOG_TARGET, "ProposedBlockChangeSet::save_for_debug"); + // TODO: consider persisting this data somewhere + + for change in &self.substate_changes { + debug!(target: LOG_TARGET, "[drop] SubstateChange: {}", change); } - Ok(()) + // Store the tree diffs for each effected shard + for (shard, diff) in &self.state_tree_diffs { + debug!(target: LOG_TARGET, "[drop] StateTreeDiff: shard: {}, diff: {}", shard, diff); + } + + for (substate_id, locks) in &self.substate_locks { + debug!(target: LOG_TARGET, "[drop] SubstateLock: {substate_id}"); + for lock in locks { + debug!(target: LOG_TARGET, " - {lock}"); + } + } + + for (transaction_id, change) in &self.transaction_changes { + debug!(target: LOG_TARGET, "[drop] TransactionChange: {transaction_id}"); + if let Some(ref execution) = change.execution { + debug!(target: LOG_TARGET, " - {execution}"); + } + if let Some(ref update) = change.next_update { + debug!(target: LOG_TARGET, " - Update: {} {} {}", update.transaction_id(), update.decision(), update.transaction().current_stage()); + } + for (shard_group, pledges) in &change.foreign_pledges { + debug!(target: LOG_TARGET, " - ForeignPledges: {shard_group}"); + for pledge in pledges { + debug!(target: LOG_TARGET, " - {pledge}"); + } + } + } + + for block_id in &self.proposed_foreign_proposals { + debug!(target: LOG_TARGET, "[drop] ProposedForeignProposal: {block_id}"); + } + + for mint in &self.proposed_utxo_mints { + debug!(target: LOG_TARGET, "[drop] ProposedUtxoMint: {mint}"); + } + + for node in &self.evict_nodes { + debug!(target: LOG_TARGET, "[drop] EvictNode: {node}"); + } } } @@ -413,8 +454,8 @@ impl Display for ProposedBlockChangeSet { Some(decision) => write!(f, " Decision: {},", decision)?, None => write!(f, " Decision: NO VOTE, ")?, } - if !self.block_diff.is_empty() { - write!(f, " BlockDiff: {} change(s), ", self.block_diff.len())?; + if !self.substate_changes.is_empty() { + write!(f, " BlockDiff: {} change(s), ", self.substate_changes.len())?; } if !self.state_tree_diffs.is_empty() { write!(f, " StateTreeDiff: {} change(s), ", self.state_tree_diffs.len())?; diff --git a/dan_layer/consensus/src/hotstuff/common.rs b/dan_layer/consensus/src/hotstuff/common.rs index 855c40fe9..a51108d1e 100644 --- a/dan_layer/consensus/src/hotstuff/common.rs +++ b/dan_layer/consensus/src/hotstuff/common.rs @@ -322,8 +322,8 @@ pub(crate) fn get_next_block_height_and_leader< let mut next_height = height; let (mut leader_addr, mut leader_pk) = leader_strategy.get_leader_for_next_height(committee, next_height); - while ValidatorConsensusStats::is_node_suspended(tx, block_id, leader_pk)? { - debug!(target: LOG_TARGET, "Validator {} suspended for next height {}. Checking next validator", leader_addr, next_height + NodeHeight(1)); + while ValidatorConsensusStats::is_node_evicted(tx, block_id, leader_pk)? { + debug!(target: LOG_TARGET, "Validator {} evicted for next height {}. Checking next validator", leader_addr, next_height + NodeHeight(1)); next_height += NodeHeight(1); num_skipped += 1; let (addr, pk) = leader_strategy.get_leader_for_next_height(committee, next_height); diff --git a/dan_layer/consensus/src/hotstuff/current_view.rs b/dan_layer/consensus/src/hotstuff/current_view.rs index 666ccba55..5e9752b9e 100644 --- a/dan_layer/consensus/src/hotstuff/current_view.rs +++ b/dan_layer/consensus/src/hotstuff/current_view.rs @@ -60,7 +60,7 @@ impl CurrentView { pub(crate) fn reset(&self, epoch: Epoch, height: NodeHeight) { self.epoch.store(epoch.as_u64(), atomic::Ordering::SeqCst); self.height.store(height.as_u64(), atomic::Ordering::SeqCst); - info!(target: LOG_TARGET, "🧿 PACEMAKER RESET: View updated to {epoch}/{height}"); + info!(target: LOG_TARGET, "🧿 PACEMAKER: reset View updated to {epoch}/{height}"); } } diff --git a/dan_layer/consensus/src/hotstuff/error.rs b/dan_layer/consensus/src/hotstuff/error.rs index e940bec83..5d66aac47 100644 --- a/dan_layer/consensus/src/hotstuff/error.rs +++ b/dan_layer/consensus/src/hotstuff/error.rs @@ -118,12 +118,6 @@ impl From for HotStuffError { pub enum ProposalValidationError { #[error("Storage error: {0}")] StorageError(#[from] StorageError), - #[error("Node proposed by {proposed_by} with ID {block_id} does not match calculated hash {calculated_hash}")] - BlockIdMismatch { - proposed_by: String, - block_id: BlockId, - calculated_hash: BlockId, - }, #[error("Node proposed by {proposed_by} with hash {hash} did not satisfy the safeNode predicate")] NotSafeBlock { proposed_by: String, hash: BlockId }, #[error("Node proposed by {proposed_by} with hash {hash} is missing foreign index")] @@ -261,4 +255,12 @@ pub enum ProposalValidationError { DummyBlockWithSignature { block_id: BlockId }, #[error("Dummy block {block_id} includes commands")] DummyBlockWithCommands { block_id: BlockId }, + #[error("Malformed block {block_id}: {details}")] + MalformedBlock { block_id: BlockId, details: String }, + #[error("Block {block_id} is for a future epoch. Current epoch: {current_epoch}, block epoch: {block_epoch}")] + FutureEpoch { + block_id: BlockId, + current_epoch: Epoch, + block_epoch: Epoch, + }, } diff --git a/dan_layer/consensus/src/hotstuff/event.rs b/dan_layer/consensus/src/hotstuff/event.rs index 49a3a9d4a..63463cdfd 100644 --- a/dan_layer/consensus/src/hotstuff/event.rs +++ b/dan_layer/consensus/src/hotstuff/event.rs @@ -1,7 +1,7 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use tari_dan_common_types::{Epoch, NodeHeight}; +use tari_dan_common_types::{Epoch, NodeHeight, ShardGroup}; use tari_dan_storage::consensus_models::{BlockId, LeafBlock}; #[derive(Debug, Clone, thiserror::Error)] @@ -24,4 +24,9 @@ pub enum HotstuffEvent { }, #[error("Parked block {block} is ready")] ParkedBlockReady { block: LeafBlock }, + #[error("Epoch changed to {epoch}")] + EpochChanged { + epoch: Epoch, + registered_shard_group: Option, + }, } diff --git a/dan_layer/consensus/src/hotstuff/eviction_proof.rs b/dan_layer/consensus/src/hotstuff/eviction_proof.rs new file mode 100644 index 000000000..aab21af39 --- /dev/null +++ b/dan_layer/consensus/src/hotstuff/eviction_proof.rs @@ -0,0 +1,222 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use log::*; +use tari_dan_storage::{ + consensus_models::{Block, BlockHeader, QuorumCertificate, QuorumDecision}, + StateStoreReadTransaction, +}; +use tari_sidechain::{ + ChainLink, + CommandCommitProof, + CommitProofElement, + EvictNodeAtom, + EvictionProof, + SidechainBlockCommitProof, + SidechainBlockHeader, + ValidatorQcSignature, +}; + +use crate::hotstuff::HotStuffError; + +const LOG_TARGET: &str = "tari::dan::consensus::hotstuff::eviction_proof"; + +pub fn generate_eviction_proofs( + tx: &TTx, + tip_qc: &QuorumCertificate, + committed_blocks_with_evictions: &[Block], +) -> Result, HotStuffError> { + let num_evictions = committed_blocks_with_evictions + .iter() + .map(|b| b.all_evict_nodes().count()) + .sum(); + + let mut proofs = Vec::with_capacity(num_evictions); + for block in committed_blocks_with_evictions { + // First generate a commit proof for the block which is shared by all EvictionProofs + let block_commit_proof = generate_block_commit_proof(tx, tip_qc, block)?; + + for atom in block.all_evict_nodes() { + info!(target: LOG_TARGET, "🦶 Generating eviction proof for validator: {atom}"); + // TODO: command inclusion proof + let atom = EvictNodeAtom::new(atom.public_key.clone()); + let commit_command_proof = CommandCommitProof::new(atom, block_commit_proof.clone()); + let proof = EvictionProof::new(commit_command_proof); + proofs.push(proof); + } + } + + Ok(proofs) +} + +fn generate_block_commit_proof( + tx: &TTx, + tip_qc: &QuorumCertificate, + commit_block: &Block, +) -> Result { + let mut proof_elements = Vec::with_capacity(3); + + if commit_block.is_dummy() || commit_block.signature().is_none() { + return Err(HotStuffError::InvariantError(format!( + "Commit block is a dummy block or has no signature in generate_block_commit_proof ({commit_block})", + ))); + } + + debug!(target: LOG_TARGET, "Add tip_qc: {tip_qc}"); + proof_elements.push(convert_qc_to_proof_element(tip_qc)); + + let mut block = tip_qc.get_block(tx)?; + while block.id() != commit_block.id() { + if block.justifies_parent() { + debug!(target: LOG_TARGET, "Add justify: {}", block.justify()); + proof_elements.push(convert_qc_to_proof_element(block.justify())); + block = block.get_parent(tx)?; + } else { + block = block.get_parent(tx)?; + let mut dummy_chain = vec![ChainLink { + header_hash: block.header().calculate_hash(), + parent_id: *block.parent().hash(), + }]; + debug!(target: LOG_TARGET, "add dummy chain: {block}"); + let parent_id = *block.parent(); + let qc = block.into_justify(); + block = Block::get(tx, &parent_id)?; + while block.id() != qc.block_id() { + debug!(target: LOG_TARGET, "add dummy chain: {block} QC: {qc}"); + dummy_chain.push(ChainLink { + header_hash: block.header().calculate_hash(), + parent_id: *block.parent().hash(), + }); + + block = block.get_parent(tx)?; + if block.height() < qc.block_height() { + return Err(HotStuffError::InvariantError(format!( + "Block height is less than the height of the QC in generate_block_commit_proof \ + (block={block}, qc={qc})", + ))); + } + } + + proof_elements.push(CommitProofElement::DummyChain(dummy_chain)); + debug!(target: LOG_TARGET, "Add justify: {}", qc); + proof_elements.push(convert_qc_to_proof_element(&qc)); + } + // Prevent possibility of endless loop + if block.height() < commit_block.height() { + return Err(HotStuffError::InvariantError(format!( + "Block height is less than the commit block height in generate_block_commit_proof ({block}, \ + commit_block={commit_block})", + ))); + } + } + + let command_commit_proof = SidechainBlockCommitProof { + header: convert_block_to_sidechain_block_header(commit_block.header()), + proof_elements, + }; + + Ok(command_commit_proof) +} + +pub fn convert_block_to_sidechain_block_header(header: &BlockHeader) -> SidechainBlockHeader { + SidechainBlockHeader { + network: header.network().as_byte(), + parent_id: *header.parent().hash(), + justify_id: *header.justify_id().hash(), + height: header.height().as_u64(), + epoch: header.epoch().as_u64(), + shard_group: tari_sidechain::ShardGroup { + start: header.shard_group().start().as_u32(), + end_inclusive: header.shard_group().end().as_u32(), + }, + proposed_by: header.proposed_by().clone(), + total_leader_fee: header.total_leader_fee(), + state_merkle_root: *header.state_merkle_root(), + command_merkle_root: *header.command_merkle_root(), + is_dummy: header.is_dummy(), + foreign_indexes_hash: header.create_foreign_indexes_hash(), + signature: header.signature().expect("checked by caller").clone(), + timestamp: header.timestamp(), + base_layer_block_height: header.base_layer_block_height(), + base_layer_block_hash: *header.base_layer_block_hash(), + extra_data_hash: header.create_extra_data_hash(), + } +} + +fn convert_qc_to_proof_element(qc: &QuorumCertificate) -> CommitProofElement { + CommitProofElement::QuorumCertificate(tari_sidechain::QuorumCertificate { + header_hash: *qc.header_hash(), + parent_id: *qc.parent_id().hash(), + signatures: qc + .signatures() + .iter() + .map(|s| ValidatorQcSignature { + public_key: s.public_key.clone(), + signature: s.signature.clone(), + }) + .collect(), + decision: match qc.decision() { + QuorumDecision::Accept => tari_sidechain::QuorumDecision::Accept, + QuorumDecision::Reject => tari_sidechain::QuorumDecision::Reject, + }, + }) +} + +#[cfg(test)] +mod tests { + + use super::*; + use crate::test_helpers::load_fixture; + + #[test] + fn it_produces_a_summarized_header_that_hashes_to_the_original() { + let block = load_fixture::("block.json"); + let sidechain_block = convert_block_to_sidechain_block_header(block.header()); + assert_eq!(sidechain_block.extra_data_hash, block.header().create_extra_data_hash()); + assert_eq!( + sidechain_block.base_layer_block_hash, + *block.header().base_layer_block_hash() + ); + assert_eq!( + sidechain_block.base_layer_block_height, + block.header().base_layer_block_height() + ); + assert_eq!(sidechain_block.timestamp, block.header().timestamp()); + assert_eq!( + sidechain_block.signature, + block.header().signature().expect("checked by caller").clone() + ); + assert_eq!( + sidechain_block.foreign_indexes_hash, + block.header().create_foreign_indexes_hash() + ); + assert_eq!(sidechain_block.is_dummy, block.header().is_dummy()); + assert_eq!( + sidechain_block.command_merkle_root, + *block.header().command_merkle_root() + ); + assert_eq!(sidechain_block.state_merkle_root, *block.header().state_merkle_root()); + assert_eq!(sidechain_block.total_leader_fee, block.header().total_leader_fee()); + assert_eq!(sidechain_block.proposed_by, block.header().proposed_by().clone()); + assert_eq!( + sidechain_block.shard_group.start, + block.header().shard_group().start().as_u32() + ); + assert_eq!( + sidechain_block.shard_group.end_inclusive, + block.header().shard_group().end().as_u32() + ); + assert_eq!(sidechain_block.epoch, block.header().epoch().as_u64()); + assert_eq!(sidechain_block.height, block.header().height().as_u64()); + assert_eq!(sidechain_block.justify_id, *block.header().justify_id().hash()); + assert_eq!(sidechain_block.parent_id, *block.header().parent().hash()); + assert_eq!(sidechain_block.network, block.header().network().as_byte()); + + // Finally check the hash matches + assert_eq!(sidechain_block.calculate_hash(), block.header().calculate_hash()); + assert_eq!( + sidechain_block.calculate_block_id(), + *block.header().calculate_id().hash() + ); + } +} diff --git a/dan_layer/consensus/src/hotstuff/foreign_proposal_processor.rs b/dan_layer/consensus/src/hotstuff/foreign_proposal_processor.rs index 9323eb8a2..737bc10a1 100644 --- a/dan_layer/consensus/src/hotstuff/foreign_proposal_processor.rs +++ b/dan_layer/consensus/src/hotstuff/foreign_proposal_processor.rs @@ -387,8 +387,7 @@ pub fn process_foreign_block( Command::Prepare(_) | Command::LocalOnly(_) | Command::ForeignProposal(_) | - Command::SuspendNode(_) | - Command::ResumeNode(_) | + Command::EvictNode(_) | Command::MintConfidentialOutput(_) => { // Disregard continue; diff --git a/dan_layer/consensus/src/hotstuff/mod.rs b/dan_layer/consensus/src/hotstuff/mod.rs index 2faec1870..04e00c7d3 100644 --- a/dan_layer/consensus/src/hotstuff/mod.rs +++ b/dan_layer/consensus/src/hotstuff/mod.rs @@ -21,6 +21,7 @@ mod on_receive_request_missing_transactions; mod on_receive_vote; // mod on_sync_response; mod block_change_set; +pub mod eviction_proof; mod foreign_proposal_processor; mod on_catch_up_sync; mod on_message_validate; diff --git a/dan_layer/consensus/src/hotstuff/on_inbound_message.rs b/dan_layer/consensus/src/hotstuff/on_inbound_message.rs index c59ba1b73..0a1955627 100644 --- a/dan_layer/consensus/src/hotstuff/on_inbound_message.rs +++ b/dan_layer/consensus/src/hotstuff/on_inbound_message.rs @@ -94,14 +94,23 @@ impl MessageBuffer { while let Some(result) = self.inbound_messaging.next_message().await { let (from, msg) = result?; + + // If we receive an FP that is greater than our current epoch, we buffer it + if let HotstuffMessage::ForeignProposal(ref m) = msg { + if m.justify_qc.epoch() > current_epoch { + self.push_to_buffer(m.justify_qc.epoch(), NodeHeight::zero(), from, msg); + continue; + } + } + match msg_epoch_and_height(&msg) { // Discard old message - Some((e, h)) if e < current_epoch || h < next_height => { - info!(target: LOG_TARGET, "Discard message {} is for previous view {}/{}. Current view {}/{}", msg, e, h, current_epoch, next_height); + Some((e, h)) if e < current_epoch || (e == current_epoch && h < next_height) => { + info!(target: LOG_TARGET, "🗑️ Discard message {} is for previous view {}/{}. Current view {}/{}", msg, e, h, current_epoch, next_height); continue; }, // Buffer message for future epoch/height - Some((epoch, height)) if epoch > current_epoch || height > next_height => { + Some((epoch, height)) if epoch == current_epoch && height > next_height => { if msg.proposal().is_some() { info!(target: LOG_TARGET, "🦴Proposal {msg} is for future view (Current view: {current_epoch}, {next_height})"); } else { @@ -110,6 +119,17 @@ impl MessageBuffer { self.push_to_buffer(epoch, height, from, msg); continue; }, + Some((epoch, height)) if epoch > current_epoch => { + warn!(target: LOG_TARGET, "⚠️ Message {msg} is for future epoch {epoch}. Current epoch {current_epoch}"); + if matches!(&msg, HotstuffMessage::Vote(_)) { + // Buffer VOTE messages. As it does not contain a QC we can use to prove that a BFT-majority has + // reached the epoch + self.push_to_buffer(epoch, height, from, msg); + continue; + } + // Return the message, it will be validated and if valid, will kick consensus into sync + return Ok(Some((from, msg))); + }, // Height is irrelevant or current, return message _ => return Ok(Some((from, msg))), } diff --git a/dan_layer/consensus/src/hotstuff/on_message_validate.rs b/dan_layer/consensus/src/hotstuff/on_message_validate.rs index bce67d70e..987459fd2 100644 --- a/dan_layer/consensus/src/hotstuff/on_message_validate.rs +++ b/dan_layer/consensus/src/hotstuff/on_message_validate.rs @@ -21,7 +21,7 @@ use tokio::sync::broadcast; use super::config::HotstuffConfig; use crate::{ block_validations, - hotstuff::{error::HotStuffError, HotstuffEvent, ProposalValidationError}, + hotstuff::{error::HotStuffError, CurrentView, HotstuffEvent, ProposalValidationError}, messages::{ForeignProposalMessage, HotstuffMessage, MissingTransactionsRequest, ProposalMessage}, tracing::TraceTimer, traits::{ConsensusSpec, OutboundMessaging}, @@ -33,6 +33,7 @@ pub struct OnMessageValidate { config: HotstuffConfig, store: TConsensusSpec::StateStore, epoch_manager: TConsensusSpec::EpochManager, + current_view: CurrentView, leader_strategy: TConsensusSpec::LeaderStrategy, vote_signing_service: TConsensusSpec::SignatureService, outbound_messaging: TConsensusSpec::OutboundMessaging, @@ -47,6 +48,7 @@ impl OnMessageValidate { config: HotstuffConfig, store: TConsensusSpec::StateStore, epoch_manager: TConsensusSpec::EpochManager, + current_view: CurrentView, leader_strategy: TConsensusSpec::LeaderStrategy, vote_signing_service: TConsensusSpec::SignatureService, outbound_messaging: TConsensusSpec::OutboundMessaging, @@ -56,6 +58,7 @@ impl OnMessageValidate { config, store, epoch_manager, + current_view, leader_strategy, vote_signing_service, outbound_messaging, @@ -146,6 +149,7 @@ impl OnMessageValidate { ); if proposal.block.height() < current_height { + // Should never happen since the on_inbound_message handler filters these out info!( target: LOG_TARGET, "🔥 Block {} is lower than current height {}. Ignoring.", @@ -155,7 +159,7 @@ impl OnMessageValidate { return Ok(MessageValidationResult::Discard); } - if let Err(err) = self.check_proposal(&proposal.block, local_committee, local_committee_info) { + if let Err(err) = self.check_local_proposal(&proposal.block, local_committee, local_committee_info) { return Ok(MessageValidationResult::Invalid { from, message: HotstuffMessage::Proposal(proposal), @@ -202,7 +206,24 @@ impl OnMessageValidate { }) } - fn check_proposal( + fn check_local_proposal( + &self, + block: &Block, + committee_for_block: &Committee, + committee_info: &CommitteeInfo, + ) -> Result<(), HotStuffError> { + block_validations::check_local_proposal::( + self.current_view.get_epoch(), + block, + committee_info, + committee_for_block, + &self.vote_signing_service, + &self.leader_strategy, + &self.config, + ) + } + + fn check_foreign_proposal( &self, block: &Block, committee_for_block: &Committee, @@ -325,7 +346,7 @@ impl OnMessageValidate { .get_committee_info_by_validator_public_key(msg.block.epoch(), msg.block.proposed_by().clone()) .await?; - if let Err(err) = self.check_proposal(&msg.block, &committee, &committee_info) { + if let Err(err) = self.check_foreign_proposal(&msg.block, &committee, &committee_info) { return Ok(MessageValidationResult::Invalid { from, message: HotstuffMessage::ForeignProposal(msg), diff --git a/dan_layer/consensus/src/hotstuff/on_next_sync_view.rs b/dan_layer/consensus/src/hotstuff/on_next_sync_view.rs index f98e9dfbd..8848be42f 100644 --- a/dan_layer/consensus/src/hotstuff/on_next_sync_view.rs +++ b/dan_layer/consensus/src/hotstuff/on_next_sync_view.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: BSD-3-Clause use log::*; -use tari_dan_common_types::{committee::Committee, optional::Optional, Epoch, NodeHeight}; +use tari_dan_common_types::{committee::Committee, option::DisplayContainer, optional::Optional, Epoch, NodeHeight}; use tari_dan_storage::{ consensus_models::{HighQc, LastSentVote, LeafBlock}, StateStore, @@ -60,6 +60,7 @@ impl OnNextSyncViewHandler { let high_qc = HighQc::get(tx, epoch)?.get_quorum_certificate(tx)?; let last_sent_vote = LastSentVote::get(tx) .optional()? + .filter(|vote| high_qc.epoch() < vote.epoch) .filter(|vote| high_qc.block_height() < vote.block_height); Ok::<_, HotStuffError>((next_height, next_leader, leaf_block, high_qc, last_sent_vote)) })?; @@ -77,7 +78,7 @@ impl OnNextSyncViewHandler { info!( target: LOG_TARGET, "🌟 Send NEWVIEW {new_height} Vote[{}] HighQC: {high_qc} to {next_leader}", - last_vote.as_ref().map(|v| format!("{}", v.unverified_block_height)).unwrap_or_else(|| "None".to_string()), + last_vote.display() ); let message = NewViewMessage { high_qc, diff --git a/dan_layer/consensus/src/hotstuff/on_propose.rs b/dan_layer/consensus/src/hotstuff/on_propose.rs index e39114a58..7abaeea39 100644 --- a/dan_layer/consensus/src/hotstuff/on_propose.rs +++ b/dan_layer/consensus/src/hotstuff/on_propose.rs @@ -3,15 +3,16 @@ use std::{ collections::{BTreeSet, HashMap, HashSet}, + fmt::Display, num::NonZeroU64, }; -use indexmap::IndexMap; use log::*; use tari_common_types::types::{FixedHash, PublicKey}; use tari_crypto::tari_utilities::epoch_time::EpochTime; use tari_dan_common_types::{ committee::{Committee, CommitteeInfo}, + option::DisplayContainer, optional::Optional, shard::Shard, Epoch, @@ -24,11 +25,13 @@ use tari_dan_storage::{ consensus_models::{ AbortReason, Block, + BlockHeader, BlockId, BlockTransactionExecution, BurntUtxo, Command, Decision, + EvictNodeAtom, ForeignProposal, ForeignSendCounters, HighQc, @@ -37,10 +40,8 @@ use tari_dan_storage::{ LockedBlock, PendingShardStateTreeDiff, QuorumCertificate, - ResumeNodeAtom, SubstateChange, SubstateRequirementLockIntent, - SuspendNodeAtom, TransactionAtom, TransactionExecution, TransactionPool, @@ -155,7 +156,6 @@ where TConsensusSpec: ConsensusSpec let base_layer_block_height = current_base_layer_block_height; let on_propose = self.clone(); - let validator_node_pk = self.signing_service.public_key().clone(); let (next_block, foreign_proposals) = task::spawn_blocking(move || { on_propose.store.with_write_tx(|tx| { let high_qc = HighQc::get(&**tx, epoch)?; @@ -174,7 +174,6 @@ where TConsensusSpec: ConsensusSpec next_height, leaf_block, high_qc_cert, - validator_node_pk, &local_committee_info, false, base_layer_block_height, @@ -247,9 +246,17 @@ where TConsensusSpec: ConsensusSpec self.outbound_messaging.send_self(msg.clone()).await?; // If we are the only VN in this committee, no need to multicast if local_committee_info.num_shard_group_members() > 1 { - self.outbound_messaging + if let Err(err) = self + .outbound_messaging .multicast(local_committee_info.shard_group(), msg) - .await?; + .await + { + warn!( + target: LOG_TARGET, + "Failed to multicast proposal to local committee: {}", + err + ); + } } Ok(()) @@ -381,145 +388,57 @@ where TConsensusSpec: ConsensusSpec next_height: NodeHeight, parent_block: LeafBlock, high_qc_certificate: QuorumCertificate, - proposed_by: PublicKey, local_committee_info: &CommitteeInfo, dont_propose_transactions: bool, base_layer_block_height: u64, base_layer_block_hash: FixedHash, propose_epoch_end: bool, ) -> Result { - let max_block_size = self.config.consensus_constants.max_block_size; - - let justifies_parent = high_qc_certificate.block_id() == parent_block.block_id(); - let start_of_chain_block = if justifies_parent || high_qc_certificate.is_zero() { - // Parent is justified - we can include its state in the MR calc, foreign propose etc + // The parent block will only ever not exist if it is a dummy block + let parent_exists = Block::record_exists(tx, parent_block.block_id())?; + let start_of_chain_block = if parent_exists { + // Parent exists - we can include its state in the MR calc, foreign propose etc parent_block } else { - // Parent is not justified which means we have dummy blocks between the parent and the justified block so we - // can exclude them from the query. Also note that the query will fail if we used the parent - // block id, since the dummy blocks do not exist yet. + // Parent does not exist which means we have dummy blocks between the parent and the justified block so we + // can exclude them from the query. There are a few queries that will fail if we used a non-existent block. high_qc_certificate.as_leaf_block() }; let mut total_leader_fee = 0; - let foreign_proposals = if propose_epoch_end { - vec![] - } else { - ForeignProposal::get_all_new(tx, start_of_chain_block.block_id(), max_block_size / 4)? - }; - - if !foreign_proposals.is_empty() { - debug!( - target: LOG_TARGET, - "🌿 Found {} foreign proposals for next block", - foreign_proposals.len() - ); - } - - let burnt_utxos = if dont_propose_transactions || propose_epoch_end { - vec![] - } else { - max_block_size - .checked_sub(foreign_proposals.len() * 4) - .filter(|n| *n > 0) - .map(|size| BurntUtxo::get_all_unproposed(tx, start_of_chain_block.block_id(), size)) - .transpose()? - .unwrap_or_default() - }; - - if !burnt_utxos.is_empty() { - debug!( - target: LOG_TARGET, - "🌿 Found {} burnt utxos for next block", - burnt_utxos.len() - ); - } - - let suspend_nodes = if dont_propose_transactions || propose_epoch_end { - vec![] - } else { - let num_suspended = ValidatorConsensusStats::count_number_suspended_nodes(tx)?; - let max_allowed_to_suspend = - u64::from(local_committee_info.quorum_threshold()).saturating_sub(num_suspended); - - max_block_size - .checked_sub(foreign_proposals.len() * 4 + burnt_utxos.len()) - .filter(|n| *n > 0) - .map(|size| { - ValidatorConsensusStats::get_nodes_to_suspend( - tx, - start_of_chain_block.block_id(), - self.config.consensus_constants.missed_proposal_suspend_threshold, - size.min(max_allowed_to_suspend as usize), - ) - }) - .transpose()? - .unwrap_or_default() - }; - - if !suspend_nodes.is_empty() { - debug!( - target: LOG_TARGET, - "🌿 Found {} suspend nodes for next block", - suspend_nodes.len() - ) - } - - let resume_nodes = if dont_propose_transactions || propose_epoch_end { - vec![] + let batch = if propose_epoch_end { + ProposalBatch::default() } else { - max_block_size - .checked_sub(foreign_proposals.len() * 4 + burnt_utxos.len()) - .filter(|n| *n > 0) - .map(|size| ValidatorConsensusStats::get_nodes_to_resume(tx, start_of_chain_block.block_id(), size)) - .transpose()? - .unwrap_or_default() + self.fetch_next_proposal_batch( + tx, + local_committee_info, + dont_propose_transactions, + start_of_chain_block, + )? }; - if !resume_nodes.is_empty() { - debug!( - target: LOG_TARGET, - "🌿 Found {} resume nodes for next block", - resume_nodes.len() - ) - } - - let suspend_nodes_len = suspend_nodes.len(); - - let batch = if dont_propose_transactions || propose_epoch_end { - vec![] - } else { - max_block_size - // Each foreign proposal is "heavier" than a transaction command - .checked_sub(foreign_proposals.len() * 4 + burnt_utxos.len() + suspend_nodes.len()) - .filter(|n| *n > 0) - .map(|size| self.transaction_pool.get_batch_for_next_block(tx, size, start_of_chain_block.block_id())) - .transpose()? - .unwrap_or_default() - }; + debug!(target: LOG_TARGET, "🌿 PROPOSE: {batch}"); let mut commands = if propose_epoch_end { BTreeSet::from_iter([Command::EndEpoch]) } else { BTreeSet::from_iter( - foreign_proposals + batch + .foreign_proposals .iter() .map(|fp| Command::ForeignProposal(fp.to_atom())) .chain( - burnt_utxos + batch + .burnt_utxos .iter() .map(|bu| Command::MintConfidentialOutput(bu.to_atom())), ) .chain( - suspend_nodes - .into_iter() - .map(|public_key| Command::SuspendNode(SuspendNodeAtom { public_key })), - ) - .chain( - resume_nodes + batch + .evict_nodes .into_iter() - .map(|public_key| Command::ResumeNode(ResumeNodeAtom { public_key })), + .map(|public_key| Command::EvictNode(EvictNodeAtom { public_key })), ), ) }; @@ -527,7 +446,7 @@ where TConsensusSpec: ConsensusSpec let mut change_set = ProposedBlockChangeSet::new(high_qc_certificate.as_leaf_block()); // No need to include evidence from justified block if no transactions are included in the next block - if !batch.is_empty() { + if !batch.transactions.is_empty() { // TODO(protocol-efficiency): We should process any foreign proposals included in this block to include // evidence. And that should determine if they are ready. However this is difficult because we // get the batch from the database which isnt aware of which foreign proposals we're going to @@ -559,22 +478,16 @@ where TConsensusSpec: ConsensusSpec } } - debug!( - target: LOG_TARGET, - "🌿 PROPOSE: {} (or less) transaction(s), {} foreign proposal(s), {} UTXOs, {} suspends for next block (justifies_parent = {})", - batch.len(), foreign_proposals.len() , burnt_utxos.len(), justifies_parent, suspend_nodes_len - ); - // batch is empty for is_empty, is_epoch_end and is_epoch_start blocks let mut substate_store = PendingSubstateStore::new( tx, - *parent_block.block_id(), + *start_of_chain_block.block_id(), self.config.consensus_constants.num_preshards, ); let mut executed_transactions = HashMap::new(); - let timer = TraceTimer::info(LOG_TARGET, "Generating commands").with_iterations(batch.len()); + let timer = TraceTimer::info(LOG_TARGET, "Generating commands").with_iterations(batch.transactions.len()); let mut lock_conflicts = TransactionLockConflicts::new(); - for mut transaction in batch { + for mut transaction in batch.transactions { // Apply the transaction updates (if any) that occurred as a result of the justified block. // This allows us to propose evidence in the next block that relates to transactions in the justified block. change_set.apply_transaction_update(&mut transaction); @@ -598,15 +511,15 @@ where TConsensusSpec: ConsensusSpec timer.done(); // This relies on the UTXO commands being ordered after transaction commands - for utxo in burnt_utxos { - let id = VersionedSubstateId::new(utxo.substate_id.clone(), 0); + for utxo in batch.burnt_utxos { + let id = VersionedSubstateId::new(utxo.commitment, 0); let shard = id.to_substate_address().to_shard(local_committee_info.num_preshards()); let change = SubstateChange::Up { id, shard, // N/A transaction_id: Default::default(), - substate: Substate::new(0, utxo.substate_value), + substate: Substate::new(0, utxo.output), }; substate_store.put(change)?; @@ -615,7 +528,7 @@ where TConsensusSpec: ConsensusSpec debug!( target: LOG_TARGET, "command(s) for next block: [{}]", - commands.iter().map(|c| c.to_string()).collect::>().join(",") + commands.display() ); let timer = TraceTimer::info(LOG_TARGET, "Propose calculate state root"); @@ -634,24 +547,21 @@ where TConsensusSpec: ConsensusSpec let non_local_shards = get_non_local_shards(substate_store.diff(), local_committee_info); let foreign_counters = ForeignSendCounters::get_or_default(tx, parent_block.block_id())?; - let mut foreign_indexes = non_local_shards + let foreign_indexes = non_local_shards .iter() .map(|shard| (*shard, foreign_counters.get_count(*shard) + 1)) - .collect::>(); + .collect(); - // Ensure that foreign indexes are canonically ordered - foreign_indexes.sort_keys(); - - let mut next_block = Block::create( + let mut header = BlockHeader::create( self.config.network, *parent_block.block_id(), - high_qc_certificate, + *high_qc_certificate.id(), next_height, epoch, local_committee_info.shard_group(), - proposed_by, - commands, + self.signing_service.public_key().clone(), state_root, + &commands, total_leader_fee, foreign_indexes, None, @@ -661,17 +571,114 @@ where TConsensusSpec: ConsensusSpec ExtraData::new(), )?; - let signature = self.signing_service.sign(next_block.id()); - next_block.set_signature(signature); + let signature = self.signing_service.sign(header.id()); + header.set_signature(signature); + + let next_block = Block::new(header, high_qc_certificate, commands); Ok(NextBlock { block: next_block, - foreign_proposals, + foreign_proposals: batch.foreign_proposals, executed_transactions, lock_conflicts, }) } + #[allow(clippy::too_many_lines)] + fn fetch_next_proposal_batch( + &self, + tx: &<::StateStore as StateStore>::ReadTransaction<'_>, + local_committee_info: &CommitteeInfo, + dont_propose_transactions: bool, + start_of_chain_block: LeafBlock, + ) -> Result { + let _timer = TraceTimer::debug(LOG_TARGET, "fetch_next_proposal_batch"); + let foreign_proposals = ForeignProposal::get_all_new( + tx, + start_of_chain_block.block_id(), + self.config.consensus_constants.max_block_size / 4, + )?; + + if !foreign_proposals.is_empty() { + debug!( + target: LOG_TARGET, + "🌿 Found {} foreign proposals for next block", + foreign_proposals.len() + ); + } + + let mut remaining_block_size = subtract_block_size_checked( + Some(self.config.consensus_constants.max_block_size), + foreign_proposals.len() * 4, + ); + + let burnt_utxos = remaining_block_size + .map(|size| BurntUtxo::get_all_unproposed(tx, start_of_chain_block.block_id(), size)) + .transpose()? + .unwrap_or_default(); + + if !burnt_utxos.is_empty() { + debug!( + target: LOG_TARGET, + "🌿 Found {} burnt utxos for next block", + burnt_utxos.len() + ); + } + + remaining_block_size = subtract_block_size_checked(remaining_block_size, burnt_utxos.len()); + + let evict_nodes = remaining_block_size + .map(|max| { + let num_evicted = + ValidatorConsensusStats::count_number_evicted_nodes(tx, start_of_chain_block.epoch())?; + let remaining_max = u64::from(local_committee_info.max_failures()).saturating_sub(num_evicted); + if remaining_max == 0 { + debug!( + target: LOG_TARGET, + "🦶 No more nodes can be evicted for next block. Num evicted: {num_evicted}", + ); + } + let max_allowed_to_evict = remaining_max.min(max as u64); + ValidatorConsensusStats::get_nodes_to_evict( + tx, + start_of_chain_block.block_id(), + self.config.consensus_constants.missed_proposal_evict_threshold, + max_allowed_to_evict, + ) + }) + .transpose()? + .unwrap_or_default(); + + if !evict_nodes.is_empty() { + debug!( + target: LOG_TARGET, + "🌿 Found {} EVICT nodes for next block", + evict_nodes.len() + ) + } + + remaining_block_size = subtract_block_size_checked(remaining_block_size, evict_nodes.len()); + + let transactions = if dont_propose_transactions { + vec![] + } else { + remaining_block_size + .map(|size| { + self.transaction_pool + .get_batch_for_next_block(tx, size, start_of_chain_block.block_id()) + }) + .transpose()? + .unwrap_or_default() + }; + + Ok(ProposalBatch { + foreign_proposals, + burnt_utxos, + transactions, + evict_nodes, + }) + } + #[allow(clippy::too_many_lines)] fn prepare_transaction( &self, @@ -992,3 +999,30 @@ pub fn get_non_local_shards(diff: &[SubstateChange], local_committee_info: &Comm .filter(|shard| local_committee_info.shard_group().contains(shard)) .collect() } + +#[derive(Default)] +struct ProposalBatch { + pub foreign_proposals: Vec, + pub burnt_utxos: Vec, + pub transactions: Vec, + pub evict_nodes: Vec, +} + +impl Display for ProposalBatch { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{} transaction(s), {} foreign proposal(s), {} UTXOs, {} evict", + self.transactions.len(), + self.foreign_proposals.len(), + self.burnt_utxos.len(), + self.evict_nodes.len() + ) + } +} + +fn subtract_block_size_checked(remaining_block_size: Option, by: usize) -> Option { + remaining_block_size + .and_then(|sz| sz.checked_sub(by)) + .filter(|sz| *sz > 0) +} diff --git a/dan_layer/consensus/src/hotstuff/on_ready_to_vote_on_local_block.rs b/dan_layer/consensus/src/hotstuff/on_ready_to_vote_on_local_block.rs index c43261255..dc57ca8ab 100644 --- a/dan_layer/consensus/src/hotstuff/on_ready_to_vote_on_local_block.rs +++ b/dan_layer/consensus/src/hotstuff/on_ready_to_vote_on_local_block.rs @@ -143,6 +143,7 @@ where TConsensusSpec: ConsensusSpec let mut finalized_transactions = Vec::new(); let mut end_of_epoch = None; let mut maybe_high_qc = None; + let mut committed_blocks_with_evictions = Vec::new(); if change_set.is_accept() { // Update nodes @@ -155,10 +156,13 @@ where TConsensusSpec: ConsensusSpec self.on_lock_block(tx, block) }, |tx, last_exec, commit_block| { - let committed = self.on_commit(tx, last_exec, commit_block, local_committee_info)?; + let committed = self.on_commit(tx, last_exec, &commit_block, local_committee_info)?; if commit_block.is_epoch_end() { end_of_epoch = Some(commit_block.epoch()); } + if commit_block.all_evict_nodes().next().is_some() { + committed_blocks_with_evictions.push(commit_block); + } if !committed.is_empty() { finalized_transactions.push(committed); } @@ -191,6 +195,7 @@ where TConsensusSpec: ConsensusSpec finalized_transactions, end_of_epoch, high_qc, + committed_blocks_with_evictions, }) } @@ -305,7 +310,6 @@ where TConsensusSpec: ConsensusSpec PendingSubstateStore::new(tx, *block.parent(), self.config.consensus_constants.num_preshards); let mut total_leader_fee = 0; let locked_block = LockedBlock::get(tx, block.epoch())?; - let mut suspended_in_this_block_count = 0u64; for cmd in block.commands() { match cmd { @@ -439,84 +443,49 @@ where TConsensusSpec: ConsensusSpec return Ok(()); } }, - Command::SuspendNode(atom) => { - if ValidatorConsensusStats::is_node_suspended(tx, block.id(), &atom.public_key)? { + Command::EvictNode(atom) => { + if ValidatorConsensusStats::is_node_evicted(tx, block.id(), &atom.public_key)? { warn!( target: LOG_TARGET, - "❌ NO VOTE: {}", NoVoteReason::NodeAlreadySuspended + "❌ NO VOTE: {}", NoVoteReason::NodeAlreadyEvicted ); - proposed_block_change_set.no_vote(NoVoteReason::ShouldNotSuspendNode); + proposed_block_change_set.no_vote(NoVoteReason::NodeAlreadyEvicted); return Ok(()); } - let num_suspended = ValidatorConsensusStats::count_number_suspended_nodes(tx)?; - let max_allowed_to_suspend = u64::from(local_committee_info.quorum_threshold()) - .saturating_sub(num_suspended) - .saturating_sub(suspended_in_this_block_count); - if max_allowed_to_suspend == 0 { + let num_evicted = ValidatorConsensusStats::count_number_evicted_nodes(tx, block.epoch())?; + let max_allowed_to_evict = u64::from(local_committee_info.quorum_threshold()) + .saturating_sub(num_evicted) + .saturating_sub(proposed_block_change_set.num_evicted_nodes_this_block() as u64); + if max_allowed_to_evict == 0 { warn!( target: LOG_TARGET, - "❌ NO VOTE: {}", NoVoteReason::CannotSuspendNodeBelowQuorumThreshold + "❌ NO VOTE: {}", NoVoteReason::CannotEvictNodeBelowQuorumThreshold ); - proposed_block_change_set.no_vote(NoVoteReason::ShouldNotSuspendNode); + proposed_block_change_set.no_vote(NoVoteReason::CannotEvictNodeBelowQuorumThreshold); return Ok(()); } - suspended_in_this_block_count += 1; let stats = ValidatorConsensusStats::get_by_public_key(tx, block.epoch(), &atom.public_key)?; - if stats.missed_proposals < self.config.consensus_constants.missed_proposal_suspend_threshold { + if stats.missed_proposals < self.config.consensus_constants.missed_proposal_evict_threshold { warn!( target: LOG_TARGET, - "❌ NO VOTE: {} (actual missed count: {}, threshold: {})", NoVoteReason::ShouldNotSuspendNode, stats.missed_proposals, self.config.consensus_constants.missed_proposal_suspend_threshold + "❌ NO VOTE: {} (actual missed count: {}, threshold: {})", NoVoteReason::ShouldNotEvictNode, stats.missed_proposals, self.config.consensus_constants.missed_proposal_evict_threshold ); - proposed_block_change_set.no_vote(NoVoteReason::ShouldNotSuspendNode); + proposed_block_change_set.no_vote(NoVoteReason::ShouldNotEvictNode); return Ok(()); } info!( target: LOG_TARGET, - "🐢 Suspending node: {} with missed count {}", + "💀 EVICTING node: {} with missed count {}", atom.public_key, stats.missed_proposals ); - proposed_block_change_set.add_suspend_node(atom.public_key.clone()); - }, - Command::ResumeNode(atom) => { - if !ValidatorConsensusStats::is_node_suspended(tx, block.id(), &atom.public_key)? { - warn!( - target: LOG_TARGET, - "❌ NO VOTE: {}", NoVoteReason::NodeNotSuspended - ); - - proposed_block_change_set.no_vote(NoVoteReason::NodeNotSuspended); - return Ok(()); - } - - let stats = ValidatorConsensusStats::get_by_public_key(tx, block.epoch(), &atom.public_key)?; - if stats.missed_proposals > 0 { - warn!( - target: LOG_TARGET, - "❌ NO VOTE: {}", NoVoteReason::ShouldNodeResumeNode - ); - warn!( - target: LOG_TARGET, - "❌ NO VOTE: {} (actual missed count: {})", NoVoteReason::ShouldNodeResumeNode, stats.missed_proposals, - ); - - proposed_block_change_set.no_vote(NoVoteReason::ShouldNodeResumeNode); - return Ok(()); - } - suspended_in_this_block_count = suspended_in_this_block_count.saturating_sub(1); - - info!( - target: LOG_TARGET, - "🐇 Resume node: {}", - atom.public_key, - ); - proposed_block_change_set.add_resume_node(atom.public_key.clone()); + proposed_block_change_set.add_evict_node(atom.public_key.clone()); }, Command::EndEpoch => { if !can_propose_epoch_end { @@ -569,18 +538,23 @@ where TConsensusSpec: ConsensusSpec if expected_merkle_root != *block.state_merkle_root() { warn!( target: LOG_TARGET, - "❌ Merkle root disagreement for block {}. Leader proposed {}, we calculated {}", + "❌ State Merkle root disagreement for block {}. Leader proposed {}, we calculated {}", block, block.state_merkle_root(), expected_merkle_root ); - proposed_block_change_set.no_vote(NoVoteReason::StateMerkleRootMismatch); + let (diff, locks) = substate_store.into_parts(); + proposed_block_change_set + .no_vote(NoVoteReason::StateMerkleRootMismatch) + // These are set for debugging purposes but aren't actually committed + .set_substate_changes(diff) + .set_substate_locks(locks); return Ok(()); } let (diff, locks) = substate_store.into_parts(); proposed_block_change_set - .set_block_diff(diff) + .set_substate_changes(diff) .set_state_tree_diffs(tree_diffs) .set_substate_locks(locks) .set_quorum_decision(QuorumDecision::Accept); @@ -1634,18 +1608,18 @@ where TConsensusSpec: ConsensusSpec warn!( target: LOG_TARGET, "❌ NO VOTE: MintConfidentialOutputAtom for {} is not known.", - atom.substate_id + atom.commitment ); return Ok(Some(NoVoteReason::MintConfidentialOutputUnknown)); }; - let id = VersionedSubstateId::new(utxo.substate_id.clone(), 0); + let id = VersionedSubstateId::new(utxo.commitment, 0); let shard = id.to_substate_address().to_shard(local_committee_info.num_preshards()); let change = SubstateChange::Up { id, shard, // N/A transaction_id: Default::default(), - substate: Substate::new(0, utxo.substate_value), + substate: Substate::new(0, utxo.output), }; if let Err(err) = substate_store.put(change) { @@ -1653,13 +1627,13 @@ where TConsensusSpec: ConsensusSpec warn!( target: LOG_TARGET, "❌ NO VOTE: Failed to store mint confidential output for {}. Error: {}", - atom.substate_id, + atom.commitment, err ); return Ok(Some(NoVoteReason::MintConfidentialOutputStoreFailed)); } - proposed_block_change_set.set_utxo_mint_proposed_in(utxo.substate_id); + proposed_block_change_set.set_utxo_mint_proposed_in(utxo.commitment); Ok(None) } @@ -1777,8 +1751,8 @@ where TConsensusSpec: ConsensusSpec atom.delete(tx)?; } - for atom in block.all_resume_nodes() { - atom.delete_suspended_node(tx)?; + for atom in block.all_evict_nodes() { + atom.mark_as_committed_in_epoch(tx, block.epoch())?; } // NOTE: this must happen before we commit the substate diff because the state transitions use this version diff --git a/dan_layer/consensus/src/hotstuff/on_receive_foreign_proposal.rs b/dan_layer/consensus/src/hotstuff/on_receive_foreign_proposal.rs index 8ecde5b9a..c9038eb27 100644 --- a/dan_layer/consensus/src/hotstuff/on_receive_foreign_proposal.rs +++ b/dan_layer/consensus/src/hotstuff/on_receive_foreign_proposal.rs @@ -123,11 +123,14 @@ where TConsensusSpec: ConsensusSpec fn validate_proposed_block( &self, - candidate_block: &Block, + _candidate_block: &Block, _foreign_shard: ShardGroup, _local_shard: ShardGroup, _foreign_receive_counter: &ForeignReceiveCounters, ) -> Result<(), ProposalValidationError> { + // TODO: validations specific to the foreign proposal. General block validations (signature etc) are already + // performed in on_message_validate. + // TODO: ignoring for now because this is currently broken // let Some(incoming_count) = candidate_block.get_foreign_counter(&local_shard) else { // debug!(target:LOG_TARGET, "Our bucket {local_shard:?} is missing reliability index in the proposed block @@ -149,24 +152,6 @@ where TConsensusSpec: ConsensusSpec // ), // }); // } - if candidate_block.is_genesis() { - return Err(ProposalValidationError::ProposingGenesisBlock { - proposed_by: candidate_block.proposed_by().to_string(), - hash: *candidate_block.id(), - }); - } - - let calculated_hash = candidate_block.calculate_hash().into(); - if calculated_hash != *candidate_block.id() { - return Err(ProposalValidationError::BlockIdMismatch { - proposed_by: candidate_block.proposed_by().to_string(), - block_id: *candidate_block.id(), - calculated_hash, - }); - } - - // TODO: validate justify signatures - // self.validate_qc(candidate_block.justify(), committee)?; Ok(()) } diff --git a/dan_layer/consensus/src/hotstuff/on_receive_local_proposal.rs b/dan_layer/consensus/src/hotstuff/on_receive_local_proposal.rs index 193850be7..baf435000 100644 --- a/dan_layer/consensus/src/hotstuff/on_receive_local_proposal.rs +++ b/dan_layer/consensus/src/hotstuff/on_receive_local_proposal.rs @@ -1,7 +1,10 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use std::collections::{HashMap, HashSet}; +use std::{ + collections::{HashMap, HashSet}, + mem, +}; use log::*; use tari_dan_common_types::{ @@ -35,6 +38,7 @@ use crate::{ calculate_dummy_blocks_from_justify, create_epoch_checkpoint, error::HotStuffError, + eviction_proof::generate_eviction_proofs, get_next_block_height_and_leader, on_ready_to_vote_on_local_block::OnReadyToVoteOnLocalBlock, on_receive_foreign_proposal::OnReceiveForeignProposalHandler, @@ -69,6 +73,7 @@ pub struct OnReceiveLocalProposalHandler { outbound_messaging: TConsensusSpec::OutboundMessaging, vote_signing_service: TConsensusSpec::SignatureService, on_receive_foreign_proposal: OnReceiveForeignProposalHandler, + tx_events: broadcast::Sender, hooks: TConsensusSpec::Hooks, } @@ -100,6 +105,7 @@ impl OnReceiveLocalProposalHandler OnReceiveLocalProposalHandler OnReceiveLocalProposalHandler OnReceiveLocalProposalHandler OnReceiveLocalProposalHandler OnReceiveLocalProposalHandler OnReceiveLocalProposalHandler OnReceiveLocalProposalHandler OnReceiveLocalProposalHandler OnReceiveLocalProposalHandler NodeHeight(1) { + let num_dummies = candidate_block.height().as_u64() - justify_block.height().as_u64() - 1; + info!(target: LOG_TARGET, "🔨 Creating {} dummy block(s) for block {}", num_dummies, candidate_block); + let dummy_blocks = calculate_dummy_blocks_from_justify( &candidate_block, &justify_block, diff --git a/dan_layer/consensus/src/hotstuff/on_receive_new_view.rs b/dan_layer/consensus/src/hotstuff/on_receive_new_view.rs index b7f3c8ed4..019b83e5e 100644 --- a/dan_layer/consensus/src/hotstuff/on_receive_new_view.rs +++ b/dan_layer/consensus/src/hotstuff/on_receive_new_view.rs @@ -149,9 +149,14 @@ where TConsensusSpec: ConsensusSpec target: LOG_TARGET, "🔥 Receive VOTE with NEWVIEW for node {} {} from {}", vote.unverified_block_height, vote.block_id, from, ); - self.vote_collector + if let Err(err) = self + .vote_collector .check_and_collect_vote(from.clone(), current_epoch, vote, local_committee_info) - .await?; + .await + { + warn!(target: LOG_TARGET, "❌ Error handling vote: {}", err); + return Ok(()); + } } // Take note of unique NEWVIEWs so that we can count them diff --git a/dan_layer/consensus/src/hotstuff/on_receive_request_missing_transactions.rs b/dan_layer/consensus/src/hotstuff/on_receive_request_missing_transactions.rs index 44b684eb6..d49d6ee71 100644 --- a/dan_layer/consensus/src/hotstuff/on_receive_request_missing_transactions.rs +++ b/dan_layer/consensus/src/hotstuff/on_receive_request_missing_transactions.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: BSD-3-Clause use log::*; +use tari_dan_common_types::option::DisplayContainer; use tari_dan_storage::{consensus_models::TransactionRecord, StateStore}; use crate::{ @@ -41,7 +42,7 @@ where TConsensusSpec: ConsensusSpec if !missing.is_empty() { warn!( target: LOG_TARGET, - "Some requested transaction(s) not found: {}", missing.iter().map(|t| t.to_string()).collect::>().join(", ") + "Some requested transaction(s) not found: {}", missing.display() ) } diff --git a/dan_layer/consensus/src/hotstuff/on_receive_vote.rs b/dan_layer/consensus/src/hotstuff/on_receive_vote.rs index c595f17e5..95c720bc8 100644 --- a/dan_layer/consensus/src/hotstuff/on_receive_vote.rs +++ b/dan_layer/consensus/src/hotstuff/on_receive_vote.rs @@ -47,9 +47,9 @@ where TConsensusSpec: ConsensusSpec // self.pacemaker // .update_view(high_qc.epoch(), high_qc.block_height(), high_qc.block_height()) // .await?; - // Reset the block time and leader timeouts + // Reset the leader timeout (not the block timer) self.pacemaker.reset_leader_timeout(high_qc.block_height()).await?; - // If we reached quorum, trigger a check to see if we should propose + // We've reached quorum, trigger a check to see if we should propose immediately self.pacemaker.beat(); }, Ok(None) => {}, diff --git a/dan_layer/consensus/src/hotstuff/pacemaker.rs b/dan_layer/consensus/src/hotstuff/pacemaker.rs index 7c350b9ed..ef4beef9d 100644 --- a/dan_layer/consensus/src/hotstuff/pacemaker.rs +++ b/dan_layer/consensus/src/hotstuff/pacemaker.rs @@ -39,7 +39,7 @@ impl PaceMaker { let on_beat = OnBeat::new(); let on_force_beat = OnForceBeat::new(); let on_leader_timeout = OnLeaderTimeout::new(); - let current_height = CurrentView::new(); + let current_view = CurrentView::new(); Self { handle_receiver: receiver, @@ -48,9 +48,9 @@ impl PaceMaker { on_beat, on_force_beat, on_leader_timeout, - current_height.clone(), + current_view.clone(), ), - current_view: current_height, + current_view, current_high_qc_height: NodeHeight(0), block_time: max_base_time, } @@ -95,7 +95,7 @@ impl PaceMaker { maybe_req = self.handle_receiver.recv() => { if let Some(req) = maybe_req { match req { - PacemakerRequest::ResetLeaderTimeout { high_qc_height } => { + PacemakerRequest::Reset { high_qc_height, reset_block_time } => { if !started { continue; } @@ -103,11 +103,15 @@ impl PaceMaker { leader_failure_triggered_during_suspension = false; if let Some(height) = high_qc_height { - self.current_high_qc_height = height; + self.current_high_qc_height = height; } - info!(target: LOG_TARGET, "🧿 Pacemaker Reset! Current height: {}, Delta: {:.2?}", self.current_view, self.delta_time()); leader_timeout.as_mut().reset(self.leader_timeout()); - block_timer.as_mut().reset(self.block_time()); + if reset_block_time { + block_timer.as_mut().reset(self.block_time()); + info!(target: LOG_TARGET, "🧿 Pacemaker Reset! Current height: {}, Delta: {:.2?}", self.current_view, self.delta_time()); + } else { + info!(target: LOG_TARGET, "🧿 Pacemaker Leader timeout Reset! Current height: {}, Delta: {:.2?}", self.current_view, self.delta_time()); + } }, PacemakerRequest::Start { high_qc_height } => { info!(target: LOG_TARGET, "🚀 Starting pacemaker at leaf height {} and high QC: {}", self.current_view, high_qc_height); diff --git a/dan_layer/consensus/src/hotstuff/pacemaker_handle.rs b/dan_layer/consensus/src/hotstuff/pacemaker_handle.rs index 9ad72a423..de86794d8 100644 --- a/dan_layer/consensus/src/hotstuff/pacemaker_handle.rs +++ b/dan_layer/consensus/src/hotstuff/pacemaker_handle.rs @@ -13,8 +13,13 @@ use crate::hotstuff::{ }; pub enum PacemakerRequest { - ResetLeaderTimeout { high_qc_height: Option }, - Start { high_qc_height: NodeHeight }, + Reset { + high_qc_height: Option, + reset_block_time: bool, + }, + Start { + high_qc_height: NodeHeight, + }, Stop, SuspendLeaderFailure, ResumeLeaderFailure, @@ -92,8 +97,19 @@ impl PaceMakerHandle { pub async fn reset_leader_timeout(&self, high_qc_height: NodeHeight) -> Result<(), HotStuffError> { self.sender - .send(PacemakerRequest::ResetLeaderTimeout { + .send(PacemakerRequest::Reset { + high_qc_height: Some(high_qc_height), + reset_block_time: false, + }) + .await + .map_err(|e| HotStuffError::PacemakerChannelDropped { details: e.to_string() }) + } + + pub async fn reset(&self, high_qc_height: NodeHeight) -> Result<(), HotStuffError> { + self.sender + .send(PacemakerRequest::Reset { high_qc_height: Some(high_qc_height), + reset_block_time: true, }) .await .map_err(|e| HotStuffError::PacemakerChannelDropped { details: e.to_string() }) @@ -108,7 +124,7 @@ impl PaceMakerHandle { ) -> Result<(), HotStuffError> { // Update current height here to prevent possibility of race conditions self.current_view.update(epoch, last_seen_height); - self.reset_leader_timeout(high_qc_height).await + self.reset(high_qc_height).await } /// Suspend leader failure trigger. This should be called when a proposal is being processed. No leader failure will @@ -138,13 +154,13 @@ impl PaceMakerHandle { ) -> Result<(), HotStuffError> { // Update current height here to prevent possibility of race conditions self.current_view.reset(epoch, last_seen_height); - self.reset_leader_timeout(high_qc_height).await + self.reset(high_qc_height).await } /// Reset the leader timeout. This should be called when an end of epoch proposal has been committed. pub async fn set_epoch(&self, epoch: Epoch) -> Result<(), HotStuffError> { self.current_view.reset(epoch, NodeHeight::zero()); - self.reset_leader_timeout(NodeHeight::zero()).await + self.reset(NodeHeight::zero()).await } pub fn current_view(&self) -> &CurrentView { diff --git a/dan_layer/consensus/src/hotstuff/state_machine/running.rs b/dan_layer/consensus/src/hotstuff/state_machine/running.rs index f1d39000a..3abf25f98 100644 --- a/dan_layer/consensus/src/hotstuff/state_machine/running.rs +++ b/dan_layer/consensus/src/hotstuff/state_machine/running.rs @@ -12,6 +12,7 @@ use crate::{ worker::ConsensusWorkerContext, }, HotStuffError, + ProposalValidationError, }, traits::ConsensusSpec, }; @@ -39,7 +40,8 @@ where TSpec: ConsensusSpec info!(target: LOG_TARGET, "Not registered for current epoch ({err})"); Ok(ConsensusStateEvent::NotRegisteredForEpoch { epoch }) }, - Err(err @ HotStuffError::FallenBehind { .. }) => { + Err(err @ HotStuffError::FallenBehind { .. }) | + Err(err @ HotStuffError::ProposalValidationError(ProposalValidationError::FutureEpoch { .. })) => { info!(target: LOG_TARGET, "⚠️ Behind peers, starting sync ({err})"); Ok(ConsensusStateEvent::NeedSync) }, diff --git a/dan_layer/consensus/src/hotstuff/state_machine/state.rs b/dan_layer/consensus/src/hotstuff/state_machine/state.rs index f24c8ccfd..26a6d6365 100644 --- a/dan_layer/consensus/src/hotstuff/state_machine/state.rs +++ b/dan_layer/consensus/src/hotstuff/state_machine/state.rs @@ -3,6 +3,8 @@ use std::fmt::Display; +use serde::Serialize; + use crate::hotstuff::state_machine::{check_sync::CheckSync, idle::Idle, running::Running, syncing::Syncing}; #[derive(Debug)] @@ -15,7 +17,7 @@ pub(super) enum ConsensusState { Shutdown, } -#[derive(Debug, Clone, Copy, Default)] +#[derive(Debug, Clone, Copy, Default, Serialize)] pub enum ConsensusCurrentState { #[default] Idle, diff --git a/dan_layer/consensus/src/hotstuff/substate_store/pending_store.rs b/dan_layer/consensus/src/hotstuff/substate_store/pending_store.rs index c8bca6563..633c5a28b 100644 --- a/dan_layer/consensus/src/hotstuff/substate_store/pending_store.rs +++ b/dan_layer/consensus/src/hotstuff/substate_store/pending_store.rs @@ -118,6 +118,7 @@ impl<'a, 'tx, TStore: StateStore + 'a + 'tx> WriteableSubstateStore for PendingS for (id, version) in diff.down_iter() { let id = VersionedSubstateId::new(id.clone(), *version); let shard = id.to_substate_address().to_shard(self.num_preshards); + debug!(target: LOG_TARGET, "🔽️ Down: {id} {shard}"); self.put(SubstateChange::Down { id, shard, @@ -128,6 +129,7 @@ impl<'a, 'tx, TStore: StateStore + 'a + 'tx> WriteableSubstateStore for PendingS for (id, substate) in diff.up_iter() { let id = VersionedSubstateId::new(id.clone(), substate.version()); let shard = id.to_substate_address().to_shard(self.num_preshards); + debug!(target: LOG_TARGET, "🔼️ Up: {id} {shard} value hash: {}", substate.to_value_hash()); self.put(SubstateChange::Up { id, shard, diff --git a/dan_layer/consensus/src/hotstuff/transaction_manager/manager.rs b/dan_layer/consensus/src/hotstuff/transaction_manager/manager.rs index a1e94af61..8c4b2ae58 100644 --- a/dan_layer/consensus/src/hotstuff/transaction_manager/manager.rs +++ b/dan_layer/consensus/src/hotstuff/transaction_manager/manager.rs @@ -160,7 +160,7 @@ impl> let mut transaction = TransactionRecord::get(store.read_transaction(), &transaction_id)?; let mut outputs = HashSet::new(); outputs.insert(VersionedSubstateId::new( - TransactionReceiptAddress::from(transaction_id).into(), + TransactionReceiptAddress::from(transaction_id), 0, )); diff --git a/dan_layer/consensus/src/hotstuff/vote_collector.rs b/dan_layer/consensus/src/hotstuff/vote_collector.rs index c440cd80c..0d7aa6673 100644 --- a/dan_layer/consensus/src/hotstuff/vote_collector.rs +++ b/dan_layer/consensus/src/hotstuff/vote_collector.rs @@ -261,7 +261,8 @@ fn create_qc(vote_data: VoteData) -> QuorumCertificate { block, } = vote_data; QuorumCertificate::new( - *block.id(), + block.header().calculate_hash(), + *block.parent(), block.height(), block.epoch(), block.shard_group(), diff --git a/dan_layer/consensus/src/hotstuff/worker.rs b/dan_layer/consensus/src/hotstuff/worker.rs index fec9201d3..21f3e8d09 100644 --- a/dan_layer/consensus/src/hotstuff/worker.rs +++ b/dan_layer/consensus/src/hotstuff/worker.rs @@ -139,6 +139,7 @@ impl HotstuffWorker { config.clone(), state_store.clone(), epoch_manager.clone(), + pacemaker.clone_handle().current_view().clone(), leader_strategy.clone(), signing_service.clone(), outbound_messaging.clone(), @@ -240,6 +241,10 @@ impl HotstuffWorker { self.pacemaker .start(current_epoch, current_height, high_qc.block_height()) .await?; + self.publish_event(HotstuffEvent::EpochChanged { + epoch: current_epoch, + registered_shard_group: Some(local_committee_info.shard_group()), + }); let local_committee = self.epoch_manager.get_local_committee(current_epoch).await?; self.run(local_committee_info, local_committee).await?; @@ -318,7 +323,7 @@ impl HotstuffWorker { Some(result) = self.on_inbound_message.next_message(current_epoch, current_height) => { if let Err(e) = self.on_unvalidated_message(current_epoch, current_height, result, &local_committee_info, &local_committee).await { - self.on_failure("on_inbound_message", &e).await; + self.on_failure("on_unvalidated_message", &e).await; return Err(e); } }, @@ -434,6 +439,18 @@ impl HotstuffWorker { Ok(()) }, MessageValidationResult::Discard => Ok(()), + // In these cases, we want to propagate the error back to the state machine, to allow sync + MessageValidationResult::Invalid { + err: err @ HotStuffError::FallenBehind { .. }, + .. + } | + MessageValidationResult::Invalid { + err: err @ HotStuffError::ProposalValidationError(ProposalValidationError::FutureEpoch { .. }), + .. + } => { + self.hooks.on_error(&err); + Err(err) + }, MessageValidationResult::Invalid { err, from, message } => { self.hooks.on_error(&err); error!(target: LOG_TARGET, "🚨 Invalid new message from {from}: {err} - {message}"); @@ -547,6 +564,10 @@ impl HotstuffWorker { ); return Err(HotStuffError::NotRegisteredForCurrentEpoch { epoch }); } + info!( + target: LOG_TARGET, + "🌟 This validator is registered for epoch {}.", epoch + ); // Edge case: we have started a VN and have progressed a few epochs quickly and have no blocks in // previous epochs to update the current view. This only really applies when mining is diff --git a/dan_layer/consensus/src/lib.rs b/dan_layer/consensus/src/lib.rs index 125ef4825..9373f84f0 100644 --- a/dan_layer/consensus/src/lib.rs +++ b/dan_layer/consensus/src/lib.rs @@ -7,3 +7,6 @@ pub mod hotstuff; pub mod messages; mod tracing; pub mod traits; + +#[cfg(test)] +mod test_helpers; diff --git a/dan_layer/consensus/src/messages/message.rs b/dan_layer/consensus/src/messages/message.rs index 36bb016ab..f920df8df 100644 --- a/dan_layer/consensus/src/messages/message.rs +++ b/dan_layer/consensus/src/messages/message.rs @@ -64,8 +64,9 @@ impl Display for HotstuffMessage { HotstuffMessage::NewView(msg) => { write!( f, - "NewView({}, high-qc: {})", + "NewView({}, {}, high-qc: {})", msg.new_height, + msg.high_qc.epoch(), msg.high_qc.block_height() ) }, @@ -81,8 +82,8 @@ impl Display for HotstuffMessage { HotstuffMessage::ForeignProposal(msg) => write!(f, "ForeignProposal({})", msg), HotstuffMessage::Vote(msg) => write!( f, - "Vote({}, {}, {})", - msg.unverified_block_height, msg.block_id, msg.decision + "Vote({}, {}, {}, {})", + msg.unverified_block_height, msg.epoch, msg.block_id, msg.decision, ), HotstuffMessage::MissingTransactionsRequest(msg) => { write!( @@ -101,7 +102,9 @@ impl Display for HotstuffMessage { msg.epoch ), HotstuffMessage::CatchUpSyncRequest(msg) => write!(f, "SyncRequest({})", msg.high_qc), - HotstuffMessage::SyncResponse(msg) => write!(f, "SyncResponse({} block(s))", msg.blocks.len()), + HotstuffMessage::SyncResponse(msg) => { + write!(f, "SyncResponse({}, {} block(s))", msg.epoch, msg.blocks.len()) + }, } } } diff --git a/dan_layer/consensus/src/test_helpers.rs b/dan_layer/consensus/src/test_helpers.rs new file mode 100644 index 000000000..32828724d --- /dev/null +++ b/dan_layer/consensus/src/test_helpers.rs @@ -0,0 +1,12 @@ +// Copyright 2024 The Tari Project +// SPDX-License-Identifier: BSD-3-Clause + +use serde::de::DeserializeOwned; + +pub fn load_fixture(name: &str) -> T { + let path = format!("tests/fixtures/{name}"); + let file = std::fs::File::open(&path).unwrap_or_else(|_| { + panic!("Could not open fixture file at path: {path}"); + }); + serde_json::from_reader(file).unwrap() +} diff --git a/dan_layer/consensus/src/traits/messaging.rs b/dan_layer/consensus/src/traits/messaging.rs index f3fb1f056..87126ecec 100644 --- a/dan_layer/consensus/src/traits/messaging.rs +++ b/dan_layer/consensus/src/traits/messaging.rs @@ -20,34 +20,41 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use async_trait::async_trait; +use std::future::Future; + use tari_dan_common_types::{NodeAddressable, ShardGroup}; use crate::messages::HotstuffMessage; -#[async_trait] pub trait OutboundMessaging { - type Addr: NodeAddressable + Send; + type Addr: NodeAddressable + Send + 'static; - async fn send_self + Send>(&mut self, message: T) -> Result<(), OutboundMessagingError>; + fn send_self + Send>( + &mut self, + message: T, + ) -> impl Future> + Send; - async fn send + Send>( + fn send + Send>( &mut self, to: Self::Addr, message: T, - ) -> Result<(), OutboundMessagingError>; + ) -> impl Future> + Send; - async fn multicast<'a, T>(&mut self, shard_group: ShardGroup, message: T) -> Result<(), OutboundMessagingError> + fn multicast( + &mut self, + shard_group: ShardGroup, + message: T, + ) -> impl Future> + Send where - Self::Addr: 'a, T: Into + Send; } -#[async_trait] pub trait InboundMessaging { type Addr: NodeAddressable + Send; - async fn next_message(&mut self) -> Option>; + fn next_message( + &mut self, + ) -> impl Future>> + Send; } #[derive(Debug, thiserror::Error)] diff --git a/dan_layer/consensus/src/traits/signing_service.rs b/dan_layer/consensus/src/traits/signing_service.rs index e2416a98d..a162f5741 100644 --- a/dan_layer/consensus/src/traits/signing_service.rs +++ b/dan_layer/consensus/src/traits/signing_service.rs @@ -13,7 +13,11 @@ pub trait ValidatorSignatureService { pub trait VoteSignatureService: ValidatorSignatureService { fn create_message(&self, block_id: &BlockId, decision: &QuorumDecision) -> FixedHash { - vote_signature_hasher().chain(block_id).chain(decision).result() + vote_signature_hasher() + .chain(block_id) + .chain(decision) + .finalize() + .into() } fn sign_vote(&self, block_id: &BlockId, decision: &QuorumDecision) -> ValidatorSignature { diff --git a/dan_layer/consensus/tests/fixtures/block.json b/dan_layer/consensus/tests/fixtures/block.json new file mode 100644 index 000000000..8ed242d5e --- /dev/null +++ b/dan_layer/consensus/tests/fixtures/block.json @@ -0,0 +1,237 @@ +{ + "header": { + "id": "1cdbe5c1a894bcc254b47cf017d4d17608839b7048d1c02162bccd39e7635288", + "network": "localnet", + "parent": "60e452539750d7d9c628f0b729f5a4ffb6a3f4343be734b230cf513bb21dbb53", + "justify_id": "5b8092cd68b5df33b27aeb2830a2a9a951f994536b9a22803bb567e08640e7c6", + "height": 66, + "epoch": 5, + "shard_group": { + "start": 0, + "end_inclusive": 255 + }, + "proposed_by": "5e13c16840aa8d2e7e68390d0eb1b45c86bc363db0419f7ec5daa534e63bdb35", + "total_leader_fee": 0, + "state_merkle_root": [ + 247, + 115, + 121, + 67, + 152, + 239, + 4, + 213, + 134, + 205, + 188, + 237, + 238, + 135, + 251, + 0, + 201, + 163, + 1, + 210, + 90, + 179, + 146, + 172, + 69, + 197, + 43, + 40, + 1, + 120, + 253, + 141 + ], + "command_merkle_root": [ + 231, + 9, + 182, + 13, + 242, + 171, + 86, + 197, + 164, + 182, + 2, + 28, + 201, + 247, + 92, + 220, + 138, + 31, + 216, + 74, + 157, + 216, + 74, + 61, + 1, + 17, + 255, + 134, + 70, + 63, + 96, + 59 + ], + "is_dummy": false, + "foreign_indexes": {}, + "signature": { + "public_nonce": "2846178a36d464460afc23c230cd5b5ed537fc214126e0ed4a2c972580068828", + "signature": "16f4be46fb4c5736e6b5be0771bf5e108c254a8cf28b2c407b089f466ec73305" + }, + "timestamp": 1732873628, + "base_layer_block_height": 50, + "base_layer_block_hash": [ + 25, + 244, + 181, + 192, + 179, + 56, + 148, + 2, + 152, + 84, + 182, + 233, + 186, + 146, + 40, + 18, + 183, + 41, + 1, + 114, + 211, + 244, + 142, + 109, + 211, + 160, + 8, + 20, + 68, + 17, + 58, + 112 + ], + "extra_data": {} + }, + "justify": { + "qc_id": "5b8092cd68b5df33b27aeb2830a2a9a951f994536b9a22803bb567e08640e7c6", + "block_id": "60e452539750d7d9c628f0b729f5a4ffb6a3f4343be734b230cf513bb21dbb53", + "header_hash": [ + 169, + 8, + 134, + 26, + 74, + 77, + 45, + 43, + 228, + 253, + 24, + 19, + 82, + 251, + 73, + 146, + 73, + 88, + 12, + 6, + 108, + 174, + 118, + 27, + 159, + 7, + 75, + 128, + 104, + 233, + 19, + 122 + ], + "parent_id": "75cd035fcf70444dfcd547f76838a8168c2e48bc7fd1217be05fcb28c4bf25dd", + "block_height": 65, + "epoch": 5, + "shard_group": { + "start": 0, + "end_inclusive": 255 + }, + "signatures": [ + { + "public_key": "5e13c16840aa8d2e7e68390d0eb1b45c86bc363db0419f7ec5daa534e63bdb35", + "signature": { + "public_nonce": "cabbbf993a27e0b55bb5bf80d24b49d70dc4199658d145a0f96ec96dc61a0e7f", + "signature": "34f7a9a5d97da003b0793a95277386146d2ce008b1e62e33ba27c93d8e376e03" + } + }, + { + "public_key": "048c457bddf93adc0cc7c8a934437cacb77998c2469bb2028b83d4e1d2ea344e", + "signature": { + "public_nonce": "aadeb13bfc9f164f06234e480dbdb243d87088e3b20d78da526ed2ca98ff9154", + "signature": "97c24ea6fb1f086b8dc403fc7edbcc0e785277e55e8d3416f20e667be8f8710a" + } + }, + { + "public_key": "f02cc332c74ac694bcfd1740896bae20d7d7d2676f1a5ae8ad9bce21f8156444", + "signature": { + "public_nonce": "9256b1b65e8abc1860fb960f97941e6fbe07710c09a56575493c97bf32d9a823", + "signature": "6c08e68e35dfd40d5a81d85b4f66c7ec631d8f2ea6ee7141228c3efcf4a99301" + } + }, + { + "public_key": "d6915bb1c499c289cdc78a0170e9649c1bc3e3534fa864cd3a6953f05a067e28", + "signature": { + "public_nonce": "8aaa7341a6de2dbcad79f9dd48f4e8da965d7aa9c45ee81a0517dabdeb087f0b", + "signature": "e796cb3da75a9ec9b6a6390f274e9260d584e021fa8b36a1cc9a58d9d72fc806" + } + }, + { + "public_key": "c8939413cbd8df92f65d7f9fd8316a55ca5e333e22a07f8041831a43eb20357b", + "signature": { + "public_nonce": "9ea60805e9e96686be87eaaed49b4ed1b2d41f263e81c76ba3353a29110bca50", + "signature": "e15c31cb1118cec1e9427fdabbb54fa5579b87f93e54493c6febcd620dc71a0e" + } + } + ], + "leaf_hashes": [ + "1a587fa378749abb7d246957dab8cd4b3325ab1270ca2eeeb1f41c575e3fe2e4", + "2ba1515d0b9ea4da1232672a9c1b0cf73f36dc82c96e74fb293f1d18365dda2f", + "4cd271e84e16899aee3ef451afcb00b1f141ff9b0aadaae61b63bf9d4fd7fb7f", + "506b980c526a79f8755acae1596917762ba171de32d1dbbcc576fcfae55e7619", + "6f1e1088364ee89c45a01cab358e55158fdb1eaf5f28702059c9316712100880" + ], + "decision": "Accept", + "is_shares_processed": true + }, + "commands": [ + { + "EvictNode": { + "public_key": "fac00ae089a2a49052fb232926ff88a0be1e3587ff0ed0620b55d83b53f05c31" + } + } + ], + "is_justified": true, + "is_committed": true, + "block_time": 9, + "stored_at": [ + 2024, + 334, + 9, + 47, + 8, + 0 + ] +} + diff --git a/dan_layer/consensus_tests/Cargo.toml b/dan_layer/consensus_tests/Cargo.toml index a76d2e791..f44564edb 100644 --- a/dan_layer/consensus_tests/Cargo.toml +++ b/dan_layer/consensus_tests/Cargo.toml @@ -22,21 +22,19 @@ tari_dan_engine = { workspace = true } tari_engine_types = { workspace = true } tari_epoch_manager = { workspace = true } tari_template_lib = { workspace = true } +tari_sidechain = { workspace = true } tari_common_types = { workspace = true } -tari_mmr = { workspace = true } tari_shutdown = { workspace = true } tari_crypto = { workspace = true } -anyhow = { workspace = true } async-trait = { workspace = true } log = { workspace = true } serde = { workspace = true, default-features = true } -thiserror = { workspace = true } tokio = { workspace = true, default-features = false, features = ["sync", "rt-multi-thread"] } rand = { workspace = true } futures = { workspace = true } fern = { workspace = true } humantime = { workspace = true } -indexmap = { workspace = true } itertools = { workspace = true } +serde_json = { workspace = true } diff --git a/dan_layer/consensus_tests/src/consensus.rs b/dan_layer/consensus_tests/src/consensus.rs index 8b7213f7b..028cbb4d2 100644 --- a/dan_layer/consensus_tests/src/consensus.rs +++ b/dan_layer/consensus_tests/src/consensus.rs @@ -918,8 +918,9 @@ async fn leader_failure_node_goes_down() { // Allow enough time for leader failures .with_test_timeout(Duration::from_secs(60)) .modify_consensus_constants(|config_mut| { - // Prevent suspends + // Prevent evictions config_mut.missed_proposal_suspend_threshold = 10; + config_mut.missed_proposal_evict_threshold = 10; config_mut.pacemaker_block_time = Duration::from_secs(2); }) .add_committee(0, vec!["1", "2", "3", "4", "5"]) @@ -976,91 +977,6 @@ async fn leader_failure_node_goes_down() { test.assert_clean_shutdown_except(&[failure_node]).await; } -#[tokio::test(flavor = "multi_thread", worker_threads = 4)] -async fn leader_failure_node_goes_down_and_gets_suspended() { - setup_logger(); - let failure_node = TestAddress::new("4"); - - let mut test = Test::builder() - // Allow enough time for leader failures - .with_test_timeout(Duration::from_secs(30)) - .modify_consensus_constants(|config_mut| { - // The node will be suspended after one missed proposal - config_mut.missed_proposal_suspend_threshold = 2; - config_mut.pacemaker_block_time = Duration::from_secs(5); - }) - .add_committee(0, vec!["1", "2", "3", "4", "5"]) - .add_failure_node(failure_node.clone()) - .start() - .await; - - for _ in 0..10 { - test.send_transaction_to_all(Decision::Commit, 1, 2, 1).await; - } - - // Take the VN offline - if we do it in the loop below, all transactions may have already been finalized (local - // only) by committed block 1 - log::info!("😴 {failure_node} is offline"); - test.network() - .go_offline(TestVnDestination::Address(failure_node.clone())) - .await; - - test.start_epoch(Epoch(1)).await; - - loop { - let (_, _, _, committed_height) = test.on_block_committed().await; - - // Takes missed_proposal_suspend_threshold * 5 + 3 blocks for nodes to suspend. So we need to keep the - // transactions coming to speed up this test. - if committed_height >= NodeHeight(1) && committed_height < NodeHeight(20) { - // This allows a few more leader failures to occur - test.send_transaction_to_all(Decision::Commit, 1, 2, 1).await; - // test.wait_for_pool_count(TestVnDestination::All, 1).await; - } - - // if test.validators_iter().filter(|vn| vn.address != failure_node).all(|v| { - // let c = v.get_transaction_pool_count(); - // log::info!("{} has {} transactions in pool", v.address, c); - // c == 0 - // }) { - // break; - // } - - if committed_height >= NodeHeight(20) { - break; - // panic!("Not all transaction committed after {} blocks", committed_height); - } - } - - test.assert_all_validators_at_same_height_except(&[failure_node.clone()]) - .await; - - test.validators_iter() - .filter(|vn| vn.address != failure_node) - .for_each(|v| { - assert!(v.has_committed_substates(), "Validator {} did not commit", v.address); - }); - - let (_, suspended_public_key) = helpers::derive_keypair_from_address(&failure_node); - test.validators() - .get(&TestAddress::new("1")) - .unwrap() - .state_store() - .with_read_tx(|tx| { - let leaf = tx.leaf_block_get(Epoch(1))?; - assert!( - tx.suspended_nodes_is_suspended(leaf.block_id(), &suspended_public_key) - .unwrap(), - "{failure_node} is not suspended" - ); - Ok::<_, HotStuffError>(()) - }) - .unwrap(); - - log::info!("total messages sent: {}", test.network().total_messages_sent()); - test.assert_clean_shutdown_except(&[failure_node]).await; -} - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] async fn foreign_block_distribution() { setup_logger(); @@ -1295,3 +1211,167 @@ async fn multi_shard_unversioned_input_conflict() { test.assert_clean_shutdown().await; log::info!("total messages sent: {}", test.network().total_messages_sent()); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +async fn leader_failure_node_goes_down_and_gets_evicted() { + setup_logger(); + let failure_node = TestAddress::new("4"); + + let mut test = Test::builder() + // Allow enough time for leader failures + .with_test_timeout(Duration::from_secs(30)) + .modify_consensus_constants(|config_mut| { + // The node will be evicted after three missed proposals + config_mut.missed_proposal_suspend_threshold = 1; + config_mut.missed_proposal_evict_threshold = 3; + config_mut.pacemaker_block_time = Duration::from_secs(5); + }) + .add_committee(0, vec!["1", "2", "3", "4", "5"]) + .add_failure_node(failure_node.clone()) + .start() + .await; + + for _ in 0..10 { + test.send_transaction_to_all(Decision::Commit, 1, 2, 1).await; + } + + // Take the VN offline - if we do it in the loop below, all transactions may have already been finalized (local + // only) by committed block 1 + log::info!("😴 {failure_node} is offline"); + test.network() + .go_offline(TestVnDestination::Address(failure_node.clone())) + .await; + + test.start_epoch(Epoch(1)).await; + + loop { + let (_, _, _, committed_height) = test.on_block_committed().await; + + // Takes missed_proposal_evict_threshold * 5 (members) + 3 (chain) blocks for nodes to evict. So we need to keep + // the transactions coming to speed up this test. + if committed_height >= NodeHeight(1) && committed_height < NodeHeight(20) { + // This allows a few more leader failures to occur + test.send_transaction_to_all(Decision::Commit, 1, 2, 1).await; + } + + let eviction_proofs = test + .validators() + .get(&TestAddress::new("1")) + .unwrap() + .epoch_manager() + .eviction_proofs() + .await; + if !eviction_proofs.is_empty() { + break; + } + + if committed_height >= NodeHeight(40) { + panic!("Not all transaction committed after {} blocks", committed_height); + } + } + + test.assert_all_validators_at_same_height_except(&[failure_node.clone()]) + .await; + + test.validators_iter() + .filter(|vn| vn.address != failure_node) + .for_each(|v| { + assert!(v.has_committed_substates(), "Validator {} did not commit", v.address); + }); + + let (_, failure_node_pk) = helpers::derive_keypair_from_address(&failure_node); + test.validators() + .get(&TestAddress::new("1")) + .unwrap() + .state_store() + .with_read_tx(|tx| { + let leaf = tx.leaf_block_get(Epoch(1))?; + assert!( + tx.suspended_nodes_is_evicted(leaf.block_id(), &failure_node_pk) + .unwrap(), + "{failure_node} is not evicted" + ); + Ok::<_, HotStuffError>(()) + }) + .unwrap(); + + let eviction_proofs = test + .validators() + .get(&TestAddress::new("1")) + .unwrap() + .epoch_manager() + .eviction_proofs() + .await; + for proof in &eviction_proofs { + assert_eq!(proof.node_to_evict(), &failure_node_pk); + } + + // Epoch manager state is shared between all validators, so each working validator (4) should create a proof. + assert_eq!(eviction_proofs.len(), 4); + + log::info!("total messages sent: {}", test.network().total_messages_sent()); + test.assert_clean_shutdown_except(&[failure_node]).await; +} + +// mod dump_data { +// use super::*; +// use std::fs::File; +// use tari_crypto::tari_utilities::hex::from_hex; +// use tari_consensus::hotstuff::eviction_proof::convert_block_to_sidechain_block_header; +// use tari_state_store_sqlite::SqliteStateStore; +// +// fn asd() { +// let store = SqliteStateStore::::connect( +// "data/swarm/processes/validator-node-01/localnet/data/validator_node/state.db", +// ) +// .unwrap(); +// let p = store +// .with_read_tx(|tx| { +// let block = tari_dan_storage::consensus_models::Block::get( +// tx, +// &BlockId::try_from( +// from_hex("891d186d2d46b990cc0974dc68734f701eaeb418a1bba487de93905d3986e0e3").unwrap(), +// ) +// .unwrap(), +// )?; +// +// let commit_block = tari_dan_storage::consensus_models::Block::get( +// tx, +// &BlockId::try_from( +// from_hex("1cdbe5c1a894bcc254b47cf017d4d17608839b7048d1c02162bccd39e7635288").unwrap(), +// ) +// .unwrap(), +// ) +// .unwrap(); +// +// let mut p = tari_consensus::hotstuff::eviction_proof::generate_eviction_proofs(tx, +// block.justify(), &[ commit_block.clone(), +// ]) +// .unwrap(); +// +// eprintln!(); +// eprintln!("{}", serde_json::to_string_pretty(&commit_block).unwrap()); +// eprintln!(); +// eprintln!(); +// +// let h = convert_block_to_sidechain_block_header(commit_block.header()); +// +// assert_eq!(h.calculate_hash(), commit_block.header().calculate_hash()); +// let b = p[0].proof().header().calculate_block_id(); +// assert_eq!( +// p[0].proof().header().calculate_hash(), +// commit_block.header().calculate_hash() +// ); +// assert_eq!(b, *commit_block.id().hash()); +// Ok::<_, HotStuffError>(p.remove(0)) +// }) +// .unwrap(); +// let f = File::options() +// .create(true) +// .write(true) +// .truncate(true) +// .open("/tmp/eviction_proof.json") +// .unwrap(); +// serde_json::to_writer_pretty(f, &p).unwrap(); +// } +// } diff --git a/dan_layer/consensus_tests/src/substate_store.rs b/dan_layer/consensus_tests/src/substate_store.rs index 78c8959f7..2306190a9 100644 --- a/dan_layer/consensus_tests/src/substate_store.rs +++ b/dan_layer/consensus_tests/src/substate_store.rs @@ -1,13 +1,21 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause +use tari_common::configuration::Network; use tari_consensus::{ hotstuff::substate_store::{LockFailedError, PendingSubstateStore, SubstateStoreError}, traits::{ReadableSubstateStore, WriteableSubstateStore}, }; -use tari_dan_common_types::{shard::Shard, NodeAddressable, PeerAddress, SubstateLockType, VersionedSubstateId}; +use tari_dan_common_types::{ + shard::Shard, + NodeAddressable, + NumPreshards, + PeerAddress, + SubstateLockType, + VersionedSubstateId, +}; use tari_dan_storage::{ - consensus_models::{BlockId, QcId, SubstateChange, SubstateRecord, SubstateRequirementLockIntent}, + consensus_models::{Block, BlockId, QcId, SubstateChange, SubstateRecord, SubstateRequirementLockIntent}, StateStore, }; use tari_engine_types::{ @@ -220,7 +228,15 @@ fn add_substate(store: &TestStore, seed: u8, version: u32) -> VersionedSubstateI } fn create_store() -> TestStore { - SqliteStateStore::connect(":memory:").unwrap() + let store = SqliteStateStore::connect(":memory:").unwrap(); + store + .with_write_tx(|tx| { + let zero = Block::zero_block(Network::LocalNet, NumPreshards::P256); + zero.justify().insert(tx)?; + zero.insert(tx) + }) + .unwrap(); + store } fn create_pending_store<'a, 'tx, TAddr: NodeAddressable>( diff --git a/dan_layer/consensus_tests/src/support/epoch_manager.rs b/dan_layer/consensus_tests/src/support/epoch_manager.rs index 9ce5171bb..d07b88fed 100644 --- a/dan_layer/consensus_tests/src/support/epoch_manager.rs +++ b/dan_layer/consensus_tests/src/support/epoch_manager.rs @@ -64,20 +64,18 @@ impl TestEpochManager { address, public_key, shard_key, - registered_at_base_height: our_validator_node.registered_at_base_height, start_epoch: our_validator_node.start_epoch, + end_epoch: None, fee_claim_public_key: PublicKey::default(), - sidechain_id: None, }); } else { copy.our_validator_node = Some(ValidatorNode { address, public_key, shard_key, - registered_at_base_height: 0, start_epoch: Epoch(0), + end_epoch: None, fee_claim_public_key: PublicKey::default(), - sidechain_id: None, }); } copy @@ -92,12 +90,15 @@ impl TestEpochManager { state.validator_shards.insert( address.clone(), ( + ValidatorNode { + address: address.clone(), + public_key: pk.clone(), + shard_key: substate_id.to_substate_address(), + start_epoch: Epoch(0), + end_epoch: None, + fee_claim_public_key: Default::default(), + }, shard_group, - substate_id.to_substate_address(), - pk.clone(), - None, - 0, - Epoch(0), ), ); state.address_shard.insert(address.clone(), shard_group); @@ -107,28 +108,8 @@ impl TestEpochManager { } } - pub async fn all_validators(&self) -> Vec<(TestAddress, ShardGroup, SubstateAddress, PublicKey, u64, Epoch)> { - self.state_lock() - .await - .validator_shards - .iter() - .filter_map( - |(a, (shard, substate_address, pk, sidechain_id, registered_at, start_epoch))| { - if sidechain_id.is_none() { - Some(( - a.clone(), - *shard, - *substate_address, - pk.clone(), - *registered_at, - *start_epoch, - )) - } else { - None - } - }, - ) - .collect() + pub async fn all_validators(&self) -> Vec<(ValidatorNode, ShardGroup)> { + self.state_lock().await.validator_shards.values().cloned().collect() } pub async fn all_committees(&self) -> HashMap> { @@ -138,6 +119,10 @@ impl TestEpochManager { pub fn get_current_epoch(&self) -> Epoch { self.current_epoch } + + pub async fn eviction_proofs(&self) -> Vec { + self.state_lock().await.eviction_proofs.clone() + } } #[async_trait] @@ -167,18 +152,8 @@ impl EpochManagerReader for TestEpochManager { _epoch: Epoch, addr: &Self::Addr, ) -> Result, EpochManagerError> { - let (_shard, shard_key, public_key, sidechain_id, registered_at_base_height, start_epoch) = - self.state_lock().await.validator_shards[addr].clone(); - - Ok(ValidatorNode { - address: addr.clone(), - public_key, - shard_key, - registered_at_base_height, - start_epoch, - fee_claim_public_key: PublicKey::default(), - sidechain_id, - }) + let (vn, _) = self.state_lock().await.validator_shards[addr].clone(); + Ok(vn) } async fn get_all_validator_nodes( @@ -248,17 +223,32 @@ impl EpochManagerReader for TestEpochManager { todo!() } - async fn get_committees_by_shard_group( + async fn get_committee_by_shard_group( &self, _epoch: Epoch, shard_group: ShardGroup, - ) -> Result>, EpochManagerError> { + ) -> Result, EpochManagerError> { let state = self.state_lock().await; let Some(committee) = state.committees.get(&shard_group) else { panic!("Committee not found for shard group {}", shard_group); }; - Ok(Some((shard_group, committee.clone())).into_iter().collect()) + Ok(committee.clone()) + } + + async fn get_committees_overlapping_shard_group( + &self, + _epoch: Epoch, + shard_group: ShardGroup, + ) -> Result>, EpochManagerError> { + let state = self.state_lock().await; + let mut committees = HashMap::new(); + for (sg, committee) in &state.committees { + if sg.overlaps_shard_group(&shard_group) { + committees.insert(*sg, committee.clone()); + } + } + Ok(committees) } async fn get_committee_info_for_substate( @@ -311,20 +301,19 @@ impl EpochManagerReader for TestEpochManager { public_key: PublicKey, ) -> Result, EpochManagerError> { let lock = self.state_lock().await; - let (address, (_shard, shard_key, public_key, sidechain_id, registered_at, start_epoch)) = lock + let (vn, _) = lock .validator_shards - .iter() - .find(|(_, (_, _, pk, _, _, _))| *pk == public_key) + .values() + .find(|(vn, _)| vn.public_key == public_key) .unwrap(); Ok(ValidatorNode { - address: address.clone(), - public_key: public_key.clone(), - shard_key: *shard_key, - registered_at_base_height: *registered_at, - start_epoch: *start_epoch, - fee_claim_public_key: PublicKey::default(), - sidechain_id: sidechain_id.clone(), + address: vn.address.clone(), + public_key: vn.public_key.clone(), + shard_key: vn.shard_key, + start_epoch: vn.start_epoch, + end_epoch: vn.end_epoch, + fee_claim_public_key: vn.fee_claim_public_key.clone(), }) } @@ -336,6 +325,15 @@ impl EpochManagerReader for TestEpochManager { // Scanning is not relevant to tests Ok(()) } + + async fn add_intent_to_evict_validator( + &self, + proof: tari_sidechain::EvictionProof, + ) -> Result<(), EpochManagerError> { + let mut state = self.state_lock().await; + state.eviction_proofs.push(proof); + Ok(()) + } } #[derive(Debug, Clone)] @@ -345,9 +343,10 @@ pub struct TestEpochManagerState { pub last_block_of_current_epoch: FixedHash, pub is_epoch_active: bool, #[allow(clippy::type_complexity)] - pub validator_shards: HashMap, u64, Epoch)>, + pub validator_shards: HashMap, ShardGroup)>, pub committees: HashMap>, pub address_shard: HashMap, + pub eviction_proofs: Vec, } impl Default for TestEpochManagerState { @@ -360,6 +359,7 @@ impl Default for TestEpochManagerState { is_epoch_active: false, committees: HashMap::new(), address_shard: HashMap::new(), + eviction_proofs: Vec::new(), } } } diff --git a/dan_layer/consensus_tests/src/support/harness.rs b/dan_layer/consensus_tests/src/support/harness.rs index 48d60919c..fe9401498 100644 --- a/dan_layer/consensus_tests/src/support/harness.rs +++ b/dan_layer/consensus_tests/src/support/harness.rs @@ -424,9 +424,8 @@ impl Test { for (addr, block) in blocks { if (first.epoch() != block.epoch() || first.height() != block.height()) && attempts < 5 { attempts += 1; - // Send this task to the back of the queue and try again after other tasks have executed - // to allow validators to catch up - task::yield_now().await; + // Allow validators to catch up + tokio::time::sleep(Duration::from_millis(10)).await; continue 'outer; } assert_eq!( @@ -610,28 +609,28 @@ impl TestBuilder { .await .into_iter() // Dont start failed nodes - .filter(|(addr, _, _, pk, _, _)| { - if failure_nodes.contains(addr) { - log::info!("❗️ {addr} {pk} is a failure node and will not be spawned"); + .filter(|(vn, _)| { + if failure_nodes.contains(&vn.address) { + log::info!("❗️ {} {} is a failure node and will not be spawned", vn.address, vn.public_key); return false; } true }) - .map(|(address, shard_group, shard_addr, _, _, _)| { - let sql_address = sql_address.replace("{}", &address.0); - let (sk, pk) = helpers::derive_keypair_from_address(&address); + .map(|(vn, shard_group)| { + let sql_address = sql_address.replace("{}", &vn.address.0); + let (sk, pk) = helpers::derive_keypair_from_address(&vn.address); let (channels, validator) = Validator::builder() .with_sql_url(sql_address) .with_config(config.clone()) - .with_address_and_secret_key(address.clone(), sk) - .with_shard(shard_addr) + .with_address_and_secret_key(vn.address.clone(), sk) + .with_shard(vn.shard_key) .with_shard_group(shard_group) - .with_epoch_manager(epoch_manager.clone_for(address.clone(), pk, shard_addr)) + .with_epoch_manager(epoch_manager.clone_for(vn.address.clone(), pk, vn.shard_key)) .with_leader_strategy(*leader_strategy) .with_num_committees(num_committees) .spawn(shutdown_signal.clone()); - (channels, (address, validator)) + (channels, (vn.address, validator)) }) .unzip() } diff --git a/dan_layer/consensus_tests/src/support/messaging_impls.rs b/dan_layer/consensus_tests/src/support/messaging_impls.rs index be0cec58b..ceaf9c83c 100644 --- a/dan_layer/consensus_tests/src/support/messaging_impls.rs +++ b/dan_layer/consensus_tests/src/support/messaging_impls.rs @@ -1,7 +1,6 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use async_trait::async_trait; use tari_consensus::{ messages::HotstuffMessage, traits::{InboundMessaging, InboundMessagingError, OutboundMessaging, OutboundMessagingError}, @@ -40,7 +39,6 @@ impl TestOutboundMessaging { } } -#[async_trait] impl OutboundMessaging for TestOutboundMessaging { type Addr = TestAddress; @@ -66,23 +64,21 @@ impl OutboundMessaging for TestOutboundMessaging { }) } - async fn multicast<'a, T>(&mut self, shard_group: ShardGroup, message: T) -> Result<(), OutboundMessagingError> - where - Self::Addr: 'a, - T: Into + Send, - { + async fn multicast(&mut self, shard_group: ShardGroup, message: T) -> Result<(), OutboundMessagingError> + where T: Into + Send { + // TODO: technically we should use the consensus epoch here, but current tests will not cause this issue let epoch = self .epoch_manager .current_epoch() .await .map_err(|e| OutboundMessagingError::UpstreamError(e.into()))?; - let peers: Vec = self + let peers = self .epoch_manager - .get_committees_by_shard_group(epoch, shard_group) + .get_committee_by_shard_group(epoch, shard_group) .await .map_err(|e| OutboundMessagingError::UpstreamError(e.into()))? - .values() - .flat_map(|c| c.addresses().cloned()) + .into_iter() + .map(|(addr, _)| addr) .collect(); self.tx_broadcast.send((peers, message.into())).await.map_err(|_| { @@ -113,7 +109,6 @@ impl TestInboundMessaging { } } -#[async_trait] impl InboundMessaging for TestInboundMessaging { type Addr = TestAddress; diff --git a/dan_layer/consensus_tests/src/support/validator/instance.rs b/dan_layer/consensus_tests/src/support/validator/instance.rs index 9408d1246..16da4df7f 100644 --- a/dan_layer/consensus_tests/src/support/validator/instance.rs +++ b/dan_layer/consensus_tests/src/support/validator/instance.rs @@ -61,6 +61,10 @@ impl Validator { &self.state_store } + pub fn epoch_manager(&self) -> &TestEpochManager { + &self.epoch_manager + } + pub fn get_transaction_pool_count(&self) -> usize { self.state_store .with_read_tx(|tx| tx.transaction_pool_count(None, None, None)) diff --git a/dan_layer/engine_types/src/lib.rs b/dan_layer/engine_types/src/lib.rs index 976029f20..d3afecb6c 100644 --- a/dan_layer/engine_types/src/lib.rs +++ b/dan_layer/engine_types/src/lib.rs @@ -35,3 +35,7 @@ pub mod id_provider; mod argument_parser; pub use argument_parser::parse_arg; + +pub mod template_models { + pub use tari_template_lib::models::*; +} diff --git a/dan_layer/engine_types/src/substate.rs b/dan_layer/engine_types/src/substate.rs index 28a35006e..49d61822e 100644 --- a/dan_layer/engine_types/src/substate.rs +++ b/dan_layer/engine_types/src/substate.rs @@ -637,6 +637,12 @@ impl From for SubstateValue { } } +impl From for SubstateValue { + fn from(output: UnclaimedConfidentialOutput) -> Self { + Self::UnclaimedConfidentialOutput(output) + } +} + impl Display for SubstateValue { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { // TODO: improve output diff --git a/dan_layer/epoch_manager/Cargo.toml b/dan_layer/epoch_manager/Cargo.toml index cca47a7d5..6a4511e05 100644 --- a/dan_layer/epoch_manager/Cargo.toml +++ b/dan_layer/epoch_manager/Cargo.toml @@ -8,24 +8,27 @@ repository.workspace = true license.workspace = true [dependencies] -tari_core = { workspace = true } +tari_core = { workspace = true, optional = true } tari_common_types = { workspace = true } tari_shutdown = { workspace = true } tari_dan_common_types = { workspace = true } tari_dan_storage = { workspace = true } tari_dan_storage_sqlite = { workspace = true, optional = true } tari_base_node_client = { workspace = true, optional = true } -tari_utilities = {workspace = true} +tari_utilities = { workspace = true } +tari_sidechain = { workspace = true } anyhow = { workspace = true } async-trait = { workspace = true } -log = { workspace = true , optional = true } +log = { workspace = true, optional = true } thiserror = { workspace = true } tokio = { workspace = true, default-features = false, features = ["sync"] } +serde = { workspace = true } [features] base_layer = [ "log", + "tari_core", "tari_base_node_client", "tari_dan_storage_sqlite", ] diff --git a/dan_layer/epoch_manager/src/base_layer/base_layer_epoch_manager.rs b/dan_layer/epoch_manager/src/base_layer/base_layer_epoch_manager.rs index 43893ce99..50bf46640 100644 --- a/dan_layer/epoch_manager/src/base_layer/base_layer_epoch_manager.rs +++ b/dan_layer/epoch_manager/src/base_layer/base_layer_epoch_manager.rs @@ -28,6 +28,7 @@ use tari_common_types::types::{FixedHash, PublicKey}; use tari_core::{blocks::BlockHeader, transactions::transaction_components::ValidatorNodeRegistration}; use tari_dan_common_types::{ committee::{Committee, CommitteeInfo}, + layer_one_transaction::{LayerOnePayloadType, LayerOneTransactionDef}, optional::Optional, DerivableFromPublicKey, Epoch, @@ -37,14 +38,20 @@ use tari_dan_common_types::{ }; use tari_dan_storage::global::{models::ValidatorNode, DbBaseLayerBlockInfo, DbEpoch, GlobalDb, MetadataKey}; use tari_dan_storage_sqlite::global::SqliteGlobalDbAdapter; -use tari_utilities::{byte_array::ByteArray, hex::Hex}; +use tari_sidechain::EvictionProof; +use tari_utilities::byte_array::ByteArray; use tokio::sync::{broadcast, oneshot}; -use crate::{base_layer::config::EpochManagerConfig, error::EpochManagerError, EpochManagerEvent}; +use crate::{ + base_layer::config::EpochManagerConfig, + error::EpochManagerError, + traits::LayerOneTransactionSubmitter, + EpochManagerEvent, +}; const LOG_TARGET: &str = "tari::dan::epoch_manager::base_layer"; -pub struct BaseLayerEpochManager { +pub struct BaseLayerEpochManager { global_db: GlobalDb, base_node_client: TBaseNodeClient, config: EpochManagerConfig, @@ -56,16 +63,21 @@ pub struct BaseLayerEpochManager { node_public_key: PublicKey, current_shard_key: Option, base_layer_consensus_constants: Option, + layer_one_submitter: TLayerOneSubmitter, is_initial_base_layer_sync_complete: bool, } -impl - BaseLayerEpochManager, GrpcBaseNodeClient> +impl + BaseLayerEpochManager, GrpcBaseNodeClient, TLayerOneSubmitter> +where + TAddr: NodeAddressable + DerivableFromPublicKey, + TLayerOneSubmitter: LayerOneTransactionSubmitter, { pub fn new( config: EpochManagerConfig, global_db: GlobalDb>, base_node_client: GrpcBaseNodeClient, + layer_one_submitter: TLayerOneSubmitter, tx_events: broadcast::Sender, node_public_key: PublicKey, ) -> Self { @@ -81,6 +93,7 @@ impl node_public_key, current_shard_key: None, base_layer_consensus_constants: None, + layer_one_submitter, is_initial_base_layer_sync_complete: false, } } @@ -136,14 +149,13 @@ impl let mut tx = self.global_db.create_transaction()?; let mut validator_nodes = self.global_db.validator_nodes(&mut tx); - let vns = validator_nodes.get_all_within_epoch(epoch, self.config.validator_node_sidechain_id.as_ref())?; + let vns = validator_nodes.get_all_registered_within_start_epoch(epoch)?; let num_committees = calculate_num_committees(vns.len() as u64, self.config.committee_size); for vn in vns { validator_nodes.set_committee_shard( vn.shard_key, vn.shard_key.to_shard_group(self.config.num_preshards, num_committees), - self.config.validator_node_sidechain_id.as_ref(), epoch, )?; } @@ -188,41 +200,27 @@ impl pub async fn add_validator_node_registration( &mut self, - block_height: u64, + activation_epoch: Epoch, registration: ValidatorNodeRegistration, ) -> Result<(), EpochManagerError> { - if registration.sidechain_id() != self.config.validator_node_sidechain_id.as_ref() { - return Err(EpochManagerError::ValidatorNodeRegistrationSidechainIdMismatch { - expected: self.config.validator_node_sidechain_id.as_ref().map(|v| v.to_hex()), - actual: registration.sidechain_id().map(|v| v.to_hex()), - }); - } - - let constants = self.base_layer_consensus_constants().await?; - let next_epoch = constants.height_to_epoch(block_height) + Epoch(1); - - let next_epoch_height = constants.epoch_to_height(next_epoch); - let shard_key = self .base_node_client - .get_shard_key(next_epoch_height, registration.public_key()) + .get_shard_key(activation_epoch, registration.public_key()) .await? .ok_or_else(|| EpochManagerError::ShardKeyNotFound { public_key: registration.public_key().clone(), - block_height, + epoch: activation_epoch, })?; - info!(target: LOG_TARGET, "Registering validator node for epoch {}", next_epoch); + info!(target: LOG_TARGET, "Registering validator node for epoch {}", activation_epoch); let mut tx = self.global_db.create_transaction()?; self.global_db.validator_nodes(&mut tx).insert_validator_node( TAddr::derive_from_public_key(registration.public_key()), registration.public_key().clone(), shard_key, - block_height, - next_epoch, + activation_epoch, registration.claim_public_key().clone(), - registration.sidechain_id().cloned(), )?; if *registration.public_key() == self.node_public_key { @@ -231,13 +229,13 @@ impl let last_registration_epoch = metadata .get_metadata::(MetadataKey::EpochManagerLastEpochRegistration)? .unwrap_or(Epoch(0)); - if last_registration_epoch < next_epoch { - metadata.set_metadata(MetadataKey::EpochManagerLastEpochRegistration, &next_epoch)?; + if last_registration_epoch < activation_epoch { + metadata.set_metadata(MetadataKey::EpochManagerLastEpochRegistration, &activation_epoch)?; } self.current_shard_key = Some(shard_key); info!( target: LOG_TARGET, - "📋️ This validator node is registered for epoch {}, shard key: {} ", next_epoch, shard_key + "📋️ This validator node is registered for epoch {}, shard key: {} ", activation_epoch, shard_key ); } @@ -246,23 +244,17 @@ impl Ok(()) } - pub async fn remove_validator_node_registration( + pub async fn deactivate_validator_node( &mut self, public_key: PublicKey, - sidechain_id: Option, + deactivation_epoch: Epoch, ) -> Result<(), EpochManagerError> { - if sidechain_id != self.config.validator_node_sidechain_id { - return Err(EpochManagerError::ValidatorNodeRegistrationSidechainIdMismatch { - expected: self.config.validator_node_sidechain_id.as_ref().map(|v| v.to_hex()), - actual: sidechain_id.map(|v| v.to_hex()), - }); - } - info!(target: LOG_TARGET, "Remove validator node({}) registration", public_key); + info!(target: LOG_TARGET, "Deactivating validator node({}) registration", public_key); let mut tx = self.global_db.create_transaction()?; self.global_db .validator_nodes(&mut tx) - .remove(public_key, sidechain_id)?; + .deactivate(public_key, deactivation_epoch)?; tx.commit()?; Ok(()) @@ -307,7 +299,7 @@ impl ) -> Result<(), EpochManagerError> { let mut tx = self.global_db.create_transaction()?; self.global_db - .base_layer_hashes(&mut tx) + .base_layer(&mut tx) .insert_base_layer_block_info(DbBaseLayerBlockInfo { hash: block_hash, height: block_height, @@ -368,7 +360,7 @@ impl let vn = self .global_db .validator_nodes(&mut tx) - .get_by_public_key(epoch, public_key, self.config.validator_node_sidechain_id.as_ref()) + .get_by_public_key(epoch, public_key) .optional()?; Ok(vn) @@ -387,7 +379,7 @@ impl let vn = self .global_db .validator_nodes(&mut tx) - .get_by_address(epoch, address, self.config.validator_node_sidechain_id.as_ref()) + .get_by_address(epoch, address) .optional()?; Ok(vn) @@ -405,7 +397,7 @@ impl let vn = self .global_db .validator_nodes(&mut tx) - .get_by_public_key(epoch, &public_key, self.config.validator_node_sidechain_id.as_ref()) + .get_by_public_key(epoch, &public_key) .optional()? .ok_or_else(|| EpochManagerError::ValidatorNodeNotRegistered { address: public_key.to_string(), @@ -443,7 +435,7 @@ impl pub fn get_committees(&self, epoch: Epoch) -> Result>, EpochManagerError> { let mut tx = self.global_db.create_transaction()?; let mut validator_node_db = self.global_db.validator_nodes(&mut tx); - Ok(validator_node_db.get_committees(epoch, self.config.validator_node_sidechain_id.as_ref())?) + Ok(validator_node_db.get_committees(epoch)?) } pub fn get_committee_info_by_validator_address( @@ -483,21 +475,19 @@ impl let shard_group = substate_address.to_shard_group(self.config.num_preshards, num_committees); // TODO(perf): fetch full validator node records for the shard group in single query (current O(n + 1) queries) - let committees = self.get_committees_for_shard_group(epoch, shard_group)?; + let committees = self.get_committee_for_shard_group(epoch, shard_group)?; let mut res = vec![]; - for (_, committee) in committees { - for pub_key in committee.public_keys() { - let vn = self.get_validator_node_by_public_key(epoch, pub_key)?.ok_or_else(|| { - EpochManagerError::ValidatorNodeNotRegistered { - address: TAddr::try_from_public_key(pub_key) - .map(|a| a.to_string()) - .unwrap_or_else(|| pub_key.to_string()), - epoch, - } - })?; - res.push(vn); - } + for (_, pub_key) in committees { + let vn = self.get_validator_node_by_public_key(epoch, &pub_key)?.ok_or_else(|| { + EpochManagerError::ValidatorNodeNotRegistered { + address: TAddr::try_from_public_key(&pub_key) + .map(|a| a.to_string()) + .unwrap_or_else(|| pub_key.to_string()), + epoch, + } + })?; + res.push(vn); } Ok(res) } @@ -520,10 +510,7 @@ impl pub fn get_validator_nodes_per_epoch(&self, epoch: Epoch) -> Result>, EpochManagerError> { let mut tx = self.global_db.create_transaction()?; - let db_vns = self - .global_db - .validator_nodes(&mut tx) - .get_all_within_epoch(epoch, self.config.validator_node_sidechain_id.as_ref())?; + let db_vns = self.global_db.validator_nodes(&mut tx).get_all_within_epoch(epoch)?; let vns = db_vns.into_iter().map(Into::into).collect(); Ok(vns) } @@ -578,11 +565,8 @@ impl pub fn get_total_validator_count(&self, epoch: Epoch) -> Result { let mut tx = self.global_db.create_transaction()?; - let db_vns = self - .global_db - .validator_nodes(&mut tx) - .count(epoch, self.config.validator_node_sidechain_id.as_ref())?; - Ok(db_vns) + let count = self.global_db.validator_nodes(&mut tx).count(epoch)?; + Ok(count) } pub fn get_num_committees(&self, epoch: Epoch) -> Result { @@ -601,11 +585,7 @@ impl let shard_group = substate_address.to_shard_group(self.config.num_preshards, num_committees); let mut tx = self.global_db.create_transaction()?; let mut validator_node_db = self.global_db.validator_nodes(&mut tx); - let num_validators = validator_node_db.count_in_shard_group( - epoch, - self.config.validator_node_sidechain_id.as_ref(), - shard_group, - )?; + let num_validators = validator_node_db.count_in_shard_group(epoch, shard_group)?; let num_validators = u32::try_from(num_validators).map_err(|_| EpochManagerError::IntegerOverflow { func: "get_committee_shard", })?; @@ -627,14 +607,25 @@ impl self.get_committee_info_for_substate(epoch, vn.shard_key) } - pub(crate) fn get_committees_for_shard_group( + pub(crate) fn get_committee_for_shard_group( + &self, + epoch: Epoch, + shard_group: ShardGroup, + ) -> Result, EpochManagerError> { + let mut tx = self.global_db.create_transaction()?; + let mut validator_node_db = self.global_db.validator_nodes(&mut tx); + let committees = validator_node_db.get_committee_for_shard_group(epoch, shard_group)?; + Ok(committees) + } + + pub(crate) fn get_committees_overlapping_shard_group( &self, epoch: Epoch, shard_group: ShardGroup, ) -> Result>, EpochManagerError> { let mut tx = self.global_db.create_transaction()?; let mut validator_node_db = self.global_db.validator_nodes(&mut tx); - let committees = validator_node_db.get_committees_for_shard_group(epoch, shard_group)?; + let committees = validator_node_db.get_committees_overlapping_shard_group(epoch, shard_group)?; Ok(committees) } @@ -657,14 +648,36 @@ impl let _ignore = self.tx_events.send(event); } - pub async fn get_base_layer_block_height(&self, hash: FixedHash) -> Result, EpochManagerError> { + pub fn get_base_layer_block_height(&self, hash: FixedHash) -> Result, EpochManagerError> { let mut tx = self.global_db.create_transaction()?; - let mut base_layer_hashes = self.global_db.base_layer_hashes(&mut tx); + let mut base_layer_hashes = self.global_db.base_layer(&mut tx); let info = base_layer_hashes .get_base_layer_block_height(hash)? .map(|info| info.height); Ok(info) } + + pub async fn add_intent_to_evict_validator(&self, proof: EvictionProof) -> Result<(), EpochManagerError> { + { + let mut tx = self.global_db.create_transaction()?; + // Currently we store this for ease of debugging, there is no specific need to store this in the database + let mut bl = self.global_db.base_layer(&mut tx); + bl.insert_eviction_proof(&proof)?; + tx.commit()?; + } + + let proof = LayerOneTransactionDef { + proof_type: LayerOnePayloadType::EvictionProof, + payload: proof, + }; + + self.layer_one_submitter + .submit_transaction(proof) + .await + .map_err(|e| EpochManagerError::FailedToSubmitLayerOneTransaction { details: e.to_string() })?; + + Ok(()) + } } fn calculate_num_committees(num_vns: u64, committee_size: NonZeroU32) -> u32 { diff --git a/dan_layer/epoch_manager/src/base_layer/epoch_manager_service.rs b/dan_layer/epoch_manager/src/base_layer/epoch_manager_service.rs index e9d7249c9..59db56301 100644 --- a/dan_layer/epoch_manager/src/base_layer/epoch_manager_service.rs +++ b/dan_layer/epoch_manager/src/base_layer/epoch_manager_service.rs @@ -39,18 +39,22 @@ use crate::{ types::EpochManagerRequest, }, error::EpochManagerError, + traits::LayerOneTransactionSubmitter, EpochManagerEvent, }; const LOG_TARGET: &str = "tari::validator_node::epoch_manager"; -pub struct EpochManagerService { +pub struct EpochManagerService { rx_request: Receiver>, - inner: BaseLayerEpochManager, + inner: BaseLayerEpochManager, } -impl - EpochManagerService, GrpcBaseNodeClient> +impl + EpochManagerService, GrpcBaseNodeClient, TLayerOneSubmitter> +where + TAddr: NodeAddressable + DerivableFromPublicKey + 'static, + TLayerOneSubmitter: LayerOneTransactionSubmitter + Send + Sync + 'static, { pub fn spawn( config: EpochManagerConfig, @@ -59,12 +63,20 @@ impl shutdown: ShutdownSignal, global_db: GlobalDb>, base_node_client: GrpcBaseNodeClient, + layer_one_transaction_submitter: TLayerOneSubmitter, node_public_key: PublicKey, ) -> JoinHandle> { tokio::spawn(async move { EpochManagerService { rx_request, - inner: BaseLayerEpochManager::new(config, global_db, base_node_client, events, node_public_key), + inner: BaseLayerEpochManager::new( + config, + global_db, + base_node_client, + layer_one_transaction_submitter, + events, + node_public_key, + ), } .run(shutdown) .await?; @@ -192,25 +204,25 @@ impl handle(reply, self.inner.get_validator_nodes_per_epoch(epoch), context) }, EpochManagerRequest::AddValidatorNodeRegistration { - block_height, + activation_epoch, registration, value: _value, reply, } => handle( reply, self.inner - .add_validator_node_registration(block_height, registration) + .add_validator_node_registration(activation_epoch, registration) .await, context, ), - EpochManagerRequest::RemoveValidatorNodeRegistration { + EpochManagerRequest::DeactivateValidatorNode { public_key, - sidechain_id, + deactivation_epoch, reply, } => handle( reply, self.inner - .remove_validator_node_registration(public_key, sidechain_id) + .deactivate_validator_node(public_key, deactivation_epoch) .await, context, ), @@ -244,13 +256,22 @@ impl EpochManagerRequest::GetNumCommittees { epoch, reply } => { handle(reply, self.inner.get_num_committees(epoch), context) }, - EpochManagerRequest::GetCommitteesForShardGroup { + EpochManagerRequest::GetCommitteeForShardGroup { epoch, shard_group, reply, } => handle( reply, - self.inner.get_committees_for_shard_group(epoch, shard_group), + self.inner.get_committee_for_shard_group(epoch, shard_group), + context, + ), + EpochManagerRequest::GetCommitteesOverlappingShardGroup { + epoch, + shard_group, + reply, + } => handle( + reply, + self.inner.get_committees_overlapping_shard_group(epoch, shard_group), context, ), EpochManagerRequest::GetFeeClaimPublicKey { reply } => { @@ -260,7 +281,10 @@ impl handle(reply, self.inner.set_fee_claim_public_key(public_key), context) }, EpochManagerRequest::GetBaseLayerBlockHeight { hash, reply } => { - handle(reply, self.inner.get_base_layer_block_height(hash).await, context) + handle(reply, self.inner.get_base_layer_block_height(hash), context) + }, + EpochManagerRequest::AddIntentToEvictValidator { proof, reply } => { + handle(reply, self.inner.add_intent_to_evict_validator(*proof).await, context) }, } } diff --git a/dan_layer/epoch_manager/src/base_layer/handle.rs b/dan_layer/epoch_manager/src/base_layer/handle.rs index 4f78e449a..3755e590d 100644 --- a/dan_layer/epoch_manager/src/base_layer/handle.rs +++ b/dan_layer/epoch_manager/src/base_layer/handle.rs @@ -15,6 +15,7 @@ use tari_dan_common_types::{ SubstateAddress, }; use tari_dan_storage::global::models::ValidatorNode; +use tari_sidechain::EvictionProof; use tokio::sync::{broadcast, mpsc, oneshot}; use crate::{ @@ -94,14 +95,14 @@ impl EpochManagerHandle { pub async fn add_validator_node_registration( &self, - block_height: u64, + activation_epoch: Epoch, registration: ValidatorNodeRegistration, value_of_registration: MicroMinotari, ) -> Result<(), EpochManagerError> { let (tx, rx) = oneshot::channel(); self.tx_request .send(EpochManagerRequest::AddValidatorNodeRegistration { - block_height, + activation_epoch, registration, value: value_of_registration, reply: tx, @@ -111,16 +112,16 @@ impl EpochManagerHandle { rx.await.map_err(|_| EpochManagerError::ReceiveError)? } - pub async fn remove_validator_node_registration( + pub async fn deactivate_validator_node( &self, public_key: PublicKey, - sidechain_id: Option, + deactivation_epoch: Epoch, ) -> Result<(), EpochManagerError> { let (tx, rx) = oneshot::channel(); self.tx_request - .send(EpochManagerRequest::RemoveValidatorNodeRegistration { + .send(EpochManagerRequest::DeactivateValidatorNode { public_key, - sidechain_id, + deactivation_epoch, reply: tx, }) .await @@ -409,14 +410,32 @@ impl EpochManagerReader for EpochManagerHandle { rx.await.map_err(|_| EpochManagerError::ReceiveError)? } - async fn get_committees_by_shard_group( + async fn get_committee_by_shard_group( + &self, + epoch: Epoch, + shard_group: ShardGroup, + ) -> Result, EpochManagerError> { + let (tx, rx) = oneshot::channel(); + self.tx_request + .send(EpochManagerRequest::GetCommitteeForShardGroup { + epoch, + shard_group, + reply: tx, + }) + .await + .map_err(|_| EpochManagerError::SendError)?; + + rx.await.map_err(|_| EpochManagerError::ReceiveError)? + } + + async fn get_committees_overlapping_shard_group( &self, epoch: Epoch, shard_group: ShardGroup, ) -> Result>, EpochManagerError> { let (tx, rx) = oneshot::channel(); self.tx_request - .send(EpochManagerRequest::GetCommitteesForShardGroup { + .send(EpochManagerRequest::GetCommitteesOverlappingShardGroup { epoch, shard_group, reply: tx, @@ -435,4 +454,16 @@ impl EpochManagerReader for EpochManagerHandle { .map_err(|_| EpochManagerError::SendError)?; rx.await.map_err(|_| EpochManagerError::ReceiveError)? } + + async fn add_intent_to_evict_validator(&self, proof: EvictionProof) -> Result<(), EpochManagerError> { + let (tx, rx) = oneshot::channel(); + self.tx_request + .send(EpochManagerRequest::AddIntentToEvictValidator { + proof: Box::new(proof), + reply: tx, + }) + .await + .map_err(|_| EpochManagerError::SendError)?; + rx.await.map_err(|_| EpochManagerError::ReceiveError)? + } } diff --git a/dan_layer/epoch_manager/src/base_layer/initializer.rs b/dan_layer/epoch_manager/src/base_layer/initializer.rs index 7df98a863..d8a40f375 100644 --- a/dan_layer/epoch_manager/src/base_layer/initializer.rs +++ b/dan_layer/epoch_manager/src/base_layer/initializer.rs @@ -31,15 +31,23 @@ use tokio::{ task::JoinHandle, }; -use crate::base_layer::{config::EpochManagerConfig, epoch_manager_service::EpochManagerService, EpochManagerHandle}; +use crate::{ + base_layer::{config::EpochManagerConfig, epoch_manager_service::EpochManagerService, EpochManagerHandle}, + traits::LayerOneTransactionSubmitter, +}; -pub fn spawn_service( +pub fn spawn_service( config: EpochManagerConfig, global_db: GlobalDb>, base_node_client: GrpcBaseNodeClient, node_public_key: PublicKey, + layer_one_submitter: TLayerOneSubmitter, shutdown: ShutdownSignal, -) -> (EpochManagerHandle, JoinHandle>) { +) -> (EpochManagerHandle, JoinHandle>) +where + TAddr: NodeAddressable + DerivableFromPublicKey + 'static, + TLayerOneSubmitter: LayerOneTransactionSubmitter + Send + Sync + 'static, +{ let (tx_request, rx_request) = mpsc::channel(10); let (events, _) = broadcast::channel(100); let epoch_manager = EpochManagerHandle::new(tx_request, events.clone()); @@ -50,6 +58,7 @@ pub fn spawn_service( shutdown, global_db, base_node_client, + layer_one_submitter, node_public_key, ); (epoch_manager, handle) diff --git a/dan_layer/epoch_manager/src/base_layer/types.rs b/dan_layer/epoch_manager/src/base_layer/types.rs index 3f96e706c..a959e3b06 100644 --- a/dan_layer/epoch_manager/src/base_layer/types.rs +++ b/dan_layer/epoch_manager/src/base_layer/types.rs @@ -13,6 +13,7 @@ use tari_dan_common_types::{ SubstateAddress, }; use tari_dan_storage::global::models::ValidatorNode; +use tari_sidechain::EvictionProof; use tokio::sync::oneshot; use crate::error::EpochManagerError; @@ -49,14 +50,14 @@ pub enum EpochManagerRequest { reply: Reply>>, }, AddValidatorNodeRegistration { - block_height: u64, + activation_epoch: Epoch, registration: ValidatorNodeRegistration, value: MicroMinotari, reply: Reply<()>, }, - RemoveValidatorNodeRegistration { + DeactivateValidatorNode { public_key: PublicKey, - sidechain_id: Option, + deactivation_epoch: Epoch, reply: Reply<()>, }, AddBlockHash { @@ -124,7 +125,12 @@ pub enum EpochManagerRequest { epoch: Epoch, reply: Reply, }, - GetCommitteesForShardGroup { + GetCommitteeForShardGroup { + epoch: Epoch, + shard_group: ShardGroup, + reply: Reply>, + }, + GetCommitteesOverlappingShardGroup { epoch: Epoch, shard_group: ShardGroup, reply: Reply>>, @@ -140,4 +146,8 @@ pub enum EpochManagerRequest { public_key: PublicKey, reply: Reply<()>, }, + AddIntentToEvictValidator { + proof: Box, + reply: Reply<()>, + }, } diff --git a/dan_layer/epoch_manager/src/error.rs b/dan_layer/epoch_manager/src/error.rs index c07959d80..e613e39a8 100644 --- a/dan_layer/epoch_manager/src/error.rs +++ b/dan_layer/epoch_manager/src/error.rs @@ -33,8 +33,8 @@ pub enum EpochManagerError { ValidatorNodeNotRegistered { address: String, epoch: Epoch }, #[error("Base layer consensus constants not set")] BaseLayerConsensusConstantsNotSet, - #[error("Base layer could not return shard key for {public_key} at height {block_height}")] - ShardKeyNotFound { public_key: PublicKey, block_height: u64 }, + #[error("Base layer could not return shard key for {public_key} at {epoch}")] + ShardKeyNotFound { public_key: PublicKey, epoch: Epoch }, #[error("Integer overflow: {func}")] IntegerOverflow { func: &'static str }, #[error("Invalid epoch: {epoch}")] @@ -44,6 +44,8 @@ pub enum EpochManagerError { actual: Option, expected: Option, }, + #[error("Failed to submit layer one transaction: {details}")] + FailedToSubmitLayerOneTransaction { details: String }, } impl EpochManagerError { diff --git a/dan_layer/epoch_manager/src/lib.rs b/dan_layer/epoch_manager/src/lib.rs index 132c7d081..9f1b74416 100644 --- a/dan_layer/epoch_manager/src/lib.rs +++ b/dan_layer/epoch_manager/src/lib.rs @@ -30,4 +30,5 @@ mod error; pub use error::EpochManagerError; mod event; + pub use event::*; diff --git a/dan_layer/epoch_manager/src/traits.rs b/dan_layer/epoch_manager/src/traits.rs index 0c256a6f9..804dd12d8 100644 --- a/dan_layer/epoch_manager/src/traits.rs +++ b/dan_layer/epoch_manager/src/traits.rs @@ -20,18 +20,20 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::collections::HashMap; +use std::{collections::HashMap, future::Future}; use async_trait::async_trait; use tari_common_types::types::{FixedHash, PublicKey}; use tari_dan_common_types::{ committee::{Committee, CommitteeInfo}, + layer_one_transaction::LayerOneTransactionDef, Epoch, NodeAddressable, ShardGroup, SubstateAddress, }; use tari_dan_storage::global::models::ValidatorNode; +use tari_sidechain::EvictionProof; use tokio::sync::broadcast; use crate::{EpochManagerError, EpochManagerEvent}; @@ -113,10 +115,15 @@ pub trait EpochManagerReader: Send + Sync { async fn get_num_committees(&self, epoch: Epoch) -> Result; - async fn get_committees_by_shard_group( + async fn get_committee_by_shard_group( &self, epoch: Epoch, shards: ShardGroup, + ) -> Result, EpochManagerError>; + async fn get_committees_overlapping_shard_group( + &self, + epoch: Epoch, + shard_group: ShardGroup, ) -> Result>, EpochManagerError>; async fn get_local_committee(&self, epoch: Epoch) -> Result, EpochManagerError> { @@ -175,4 +182,14 @@ pub trait EpochManagerReader: Send + Sync { } async fn get_base_layer_block_height(&self, hash: FixedHash) -> Result, EpochManagerError>; + + async fn add_intent_to_evict_validator(&self, proof: EvictionProof) -> Result<(), EpochManagerError>; +} + +pub trait LayerOneTransactionSubmitter { + type Error: std::error::Error; + fn submit_transaction( + &self, + proof: LayerOneTransactionDef, + ) -> impl Future> + Send; } diff --git a/dan_layer/p2p/proto/consensus.proto b/dan_layer/p2p/proto/consensus.proto index 89ca702c8..a56b95246 100644 --- a/dan_layer/p2p/proto/consensus.proto +++ b/dan_layer/p2p/proto/consensus.proto @@ -127,27 +127,18 @@ message Command { ForeignProposalAtom foreign_proposal = 9; MintConfidentialOutputAtom mint_confidential_output = 10; - SuspendNodeAtom suspend_node = 11; - ResumeNodeAtom resume_node = 12; - bool end_epoch = 13; + EvictNodeAtom evict_node = 11; + bool end_epoch = 12; } } -message SuspendNodeAtom { - bytes public_key = 1; -} - -message ResumeNodeAtom { - bytes public_key = 1; -} - message ForeignProposalAtom { bytes block_id = 1; uint32 shard_group = 2; } message MintConfidentialOutputAtom { - bytes substate_id = 1; + bytes commitment = 1; } message TransactionAtom { @@ -192,13 +183,14 @@ message Evidence { message QuorumCertificate { - bytes block_id = 1; - uint64 block_height = 2; - uint64 epoch = 3; - repeated tari.dan.common.SignatureAndPublicKey signatures = 4; - repeated bytes leaf_hashes = 6; - QuorumDecision decision = 7; - uint32 shard_group = 8; + bytes header_hash = 1; + bytes parent_id = 2; + uint64 block_height = 3; + uint64 epoch = 4; + repeated tari.dan.common.SignatureAndPublicKey signatures = 5; + repeated bytes leaf_hashes = 7; + QuorumDecision decision = 8; + uint32 shard_group = 9; } message ValidatorMetadata { @@ -295,3 +287,7 @@ message FullBlock { repeated QuorumCertificate qcs = 2; repeated tari.dan.transaction.Transaction transactions = 3; } + +message EvictNodeAtom { + bytes public_key = 1; +} \ No newline at end of file diff --git a/dan_layer/p2p/src/conversions/consensus.rs b/dan_layer/p2p/src/conversions/consensus.rs index 1f5adbe3b..c7e619161 100644 --- a/dan_layer/p2p/src/conversions/consensus.rs +++ b/dan_layer/p2p/src/conversions/consensus.rs @@ -25,7 +25,7 @@ use std::{ convert::{TryFrom, TryInto}, }; -use anyhow::anyhow; +use anyhow::{anyhow, Context}; use tari_bor::{decode_exact, encode}; use tari_common_types::types::PublicKey; use tari_consensus::messages::{ @@ -58,6 +58,7 @@ use tari_dan_storage::{ BlockId, Command, Decision, + EvictNodeAtom, Evidence, ForeignProposal, ForeignProposalAtom, @@ -67,12 +68,10 @@ use tari_dan_storage::{ QcId, QuorumCertificate, QuorumDecision, - ResumeNodeAtom, SubstateDestroyed, SubstatePledge, SubstatePledges, SubstateRecord, - SuspendNodeAtom, TransactionAtom, }, }; @@ -606,8 +605,7 @@ impl From<&Command> for proto::consensus::Command { Command::MintConfidentialOutput(atom) => { proto::consensus::command::Command::MintConfidentialOutput(atom.into()) }, - Command::SuspendNode(atom) => proto::consensus::command::Command::SuspendNode(atom.into()), - Command::ResumeNode(atom) => proto::consensus::command::Command::ResumeNode(atom.into()), + Command::EvictNode(atom) => proto::consensus::command::Command::EvictNode(atom.into()), Command::EndEpoch => proto::consensus::command::Command::EndEpoch(true), }; @@ -635,8 +633,7 @@ impl TryFrom for Command { proto::consensus::command::Command::MintConfidentialOutput(atom) => { Command::MintConfidentialOutput(atom.try_into()?) }, - proto::consensus::command::Command::SuspendNode(atom) => Command::SuspendNode(atom.try_into()?), - proto::consensus::command::Command::ResumeNode(atom) => Command::ResumeNode(atom.try_into()?), + proto::consensus::command::Command::EvictNode(atom) => Command::EvictNode(atom.try_into()?), proto::consensus::command::Command::EndEpoch(_) => Command::EndEpoch, }) } @@ -674,46 +671,6 @@ impl TryFrom for TransactionAtom { } } -// -------------------------------- SuspendNodeAtom -------------------------------- // - -impl From<&SuspendNodeAtom> for proto::consensus::SuspendNodeAtom { - fn from(value: &SuspendNodeAtom) -> Self { - Self { - public_key: value.public_key.as_bytes().to_vec(), - } - } -} - -impl TryFrom for SuspendNodeAtom { - type Error = anyhow::Error; - - fn try_from(value: proto::consensus::SuspendNodeAtom) -> Result { - Ok(Self { - public_key: PublicKey::from_canonical_bytes(&value.public_key) - .map_err(|e| anyhow!("SuspendNodeAtom failed to decode public key: {e}"))?, - }) - } -} -// -------------------------------- ResumeNodeAtom -------------------------------- // - -impl From<&ResumeNodeAtom> for proto::consensus::ResumeNodeAtom { - fn from(value: &ResumeNodeAtom) -> Self { - Self { - public_key: value.public_key.as_bytes().to_vec(), - } - } -} - -impl TryFrom for ResumeNodeAtom { - type Error = anyhow::Error; - - fn try_from(value: proto::consensus::ResumeNodeAtom) -> Result { - Ok(Self { - public_key: PublicKey::from_canonical_bytes(&value.public_key) - .map_err(|e| anyhow!("ResumeNodeAtom failed to decode public key: {e}"))?, - }) - } -} // -------------------------------- BlockFee -------------------------------- // impl From<&LeaderFee> for proto::consensus::LeaderFee { @@ -764,7 +721,7 @@ impl TryFrom for ForeignProposalAtom { impl From<&MintConfidentialOutputAtom> for proto::consensus::MintConfidentialOutputAtom { fn from(value: &MintConfidentialOutputAtom) -> Self { Self { - substate_id: value.substate_id.to_bytes(), + commitment: value.commitment.as_bytes().to_vec(), } } } @@ -773,8 +730,30 @@ impl TryFrom for MintConfidentialO type Error = anyhow::Error; fn try_from(value: proto::consensus::MintConfidentialOutputAtom) -> Result { + use tari_template_lib::models::UnclaimedConfidentialOutputAddress; Ok(Self { - substate_id: SubstateId::from_bytes(&value.substate_id)?, + commitment: UnclaimedConfidentialOutputAddress::from_bytes(&value.commitment)?, + }) + } +} + +// -------------------------------- EvictNodeAtom -------------------------------- // + +impl From<&EvictNodeAtom> for proto::consensus::EvictNodeAtom { + fn from(value: &EvictNodeAtom) -> Self { + Self { + public_key: value.public_key.as_bytes().to_vec(), + } + } +} + +impl TryFrom for EvictNodeAtom { + type Error = anyhow::Error; + + fn try_from(value: proto::consensus::EvictNodeAtom) -> Result { + Ok(Self { + public_key: PublicKey::from_canonical_bytes(&value.public_key) + .map_err(|e| anyhow!("EvictNodeAtom failed to decode public key: {e}"))?, }) } } @@ -878,7 +857,8 @@ impl TryFrom for Evidence { impl From<&QuorumCertificate> for proto::consensus::QuorumCertificate { fn from(source: &QuorumCertificate) -> Self { Self { - block_id: source.block_id().as_bytes().to_vec(), + header_hash: source.header_hash().as_bytes().to_vec(), + parent_id: source.parent_id().as_bytes().to_vec(), block_height: source.block_height().as_u64(), epoch: source.epoch().as_u64(), shard_group: source.shard_group().encode_as_u32(), @@ -896,7 +876,8 @@ impl TryFrom for QuorumCertificate { let shard_group = ShardGroup::decode_from_u32(value.shard_group) .ok_or_else(|| anyhow!("QC shard_group ({}) is not a valid", value.shard_group))?; Ok(Self::new( - value.block_id.try_into()?, + value.header_hash.try_into().context("header_hash")?, + value.parent_id.try_into().context("parent_id")?, NodeHeight(value.block_height), Epoch(value.epoch), shard_group, diff --git a/dan_layer/p2p/src/conversions/transaction.rs b/dan_layer/p2p/src/conversions/transaction.rs index 5b2b53126..a4f2c033c 100644 --- a/dan_layer/p2p/src/conversions/transaction.rs +++ b/dan_layer/p2p/src/conversions/transaction.rs @@ -316,7 +316,7 @@ impl From for proto::transaction::Instruction { }, Instruction::ClaimBurn { claim } => { result.instruction_type = InstructionType::ClaimBurn as i32; - result.claim_burn_commitment_address = claim.output_address.to_vec(); + result.claim_burn_commitment_address = claim.output_address.as_bytes().to_vec(); result.claim_burn_range_proof = claim.range_proof.to_vec(); result.claim_burn_proof_of_knowledge = Some(claim.proof_of_knowledge.into()); result.claim_burn_public_key = claim.public_key.to_vec(); diff --git a/dan_layer/rpc_state_sync/src/manager.rs b/dan_layer/rpc_state_sync/src/manager.rs index d5d0cd051..d81ac7009 100644 --- a/dan_layer/rpc_state_sync/src/manager.rs +++ b/dan_layer/rpc_state_sync/src/manager.rs @@ -316,14 +316,16 @@ where TConsensusSpec: ConsensusSpec let local_info = self.epoch_manager.get_local_committee_info(current_epoch).await?; let prev_epoch = current_epoch.saturating_sub(Epoch(1)); info!(target: LOG_TARGET,"Previous epoch is {}", prev_epoch); + // We want to get any committees from the previous epoch that overlap with our shard group in this epoch let committees = self .epoch_manager - .get_committees_by_shard_group(prev_epoch, local_info.shard_group()) + .get_committees_overlapping_shard_group(prev_epoch, local_info.shard_group()) .await?; // TODO: not strictly necessary to sort by shard but easier on the eyes in logs let mut committees = committees.into_iter().collect::>(); committees.sort_by_key(|(k, _)| *k); + info!(target: LOG_TARGET, "🛜 Querying {} shard group(s) from epoch {}", committees.len(), prev_epoch); Ok(committees) } @@ -361,16 +363,18 @@ where TConsensusSpec: ConsensusSpec + Send + Sync + 'static async fn check_sync(&self) -> Result { let current_epoch = self.epoch_manager.current_epoch().await?; - let leaf_epoch = self.state_store.with_read_tx(|tx| { - let epoch = LeafBlock::get(tx, current_epoch) - .optional()? - .map(|leaf| leaf.epoch()) - .unwrap_or(Epoch(0)); - Ok::<_, Self::Error>(epoch) - })?; + let leaf_block = self + .state_store + .with_read_tx(|tx| LeafBlock::get(tx, current_epoch).optional())?; // We only sync if we're behind by an epoch. The current epoch is replayed in consensus. - if current_epoch > leaf_epoch { + if current_epoch > leaf_block.map_or(Epoch::zero(), |b| b.epoch()) { + info!(target: LOG_TARGET, "🛜Our current leaf block is behind the current epoch. Syncing..."); + return Ok(SyncStatus::Behind); + } + + if leaf_block.is_some_and(|l| l.height.is_zero()) { + // We only have the genesis for the epoch, let's assume we're behind in this case info!(target: LOG_TARGET, "🛜Our current leaf block is behind the current epoch. Syncing..."); return Ok(SyncStatus::Behind); } @@ -414,8 +418,10 @@ where TConsensusSpec: ConsensusSpec + Send + Sync + 'static let checkpoint = match self.fetch_epoch_checkpoint(&mut client, current_epoch).await { Ok(Some(cp)) => cp, Ok(None) => { - // EDGE-CASE: This may occur because the previous epoch had not started at the consensus - // level. + // TODO: we should check with f + 1 validators in this case. If a single validator reports + // this falsely, this will prevent us from continuing with consensus for a long time (state + // root will mismatch). + // TODO: we should instead ask the base layer if this is the first epoch in the network warn!( target: LOG_TARGET, "❓No checkpoint for epoch {current_epoch}. This may mean that this is the first epoch in the network" @@ -486,6 +492,7 @@ where TConsensusSpec: ConsensusSpec + Send + Sync + 'static return Err(err); } + info!(target: LOG_TARGET, "🛜State sync complete"); Ok(()) } } diff --git a/dan_layer/state_store_sqlite/Cargo.toml b/dan_layer/state_store_sqlite/Cargo.toml index 5ac5991ed..83cc162aa 100644 --- a/dan_layer/state_store_sqlite/Cargo.toml +++ b/dan_layer/state_store_sqlite/Cargo.toml @@ -19,7 +19,7 @@ tari_utilities = { workspace = true } anyhow = { workspace = true } bigdecimal = { workspace = true } -diesel = { workspace = true, default-features = false, features = ["sqlite", "time", "numeric", "returning_clauses_for_sqlite_3_35"] } +diesel = { workspace = true, default-features = false, features = ["sqlite", "time", "numeric", "returning_clauses_for_sqlite_3_35", "32-column-tables"] } diesel_migrations = { workspace = true } indexmap = { workspace = true } log = { workspace = true } diff --git a/dan_layer/state_store_sqlite/migrations/2023-06-08-091819_create_state_store/up.sql b/dan_layer/state_store_sqlite/migrations/2023-06-08-091819_create_state_store/up.sql index 4ca94a0cc..142939624 100644 --- a/dan_layer/state_store_sqlite/migrations/2023-06-08-091819_create_state_store/up.sql +++ b/dan_layer/state_store_sqlite/migrations/2023-06-08-091819_create_state_store/up.sql @@ -134,6 +134,8 @@ create table block_diffs change text NOT NULL, -- NULL for Down state text NULL, + -- state_hash is to aid in debugging + state_hash text NULL, created_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, -- FOREIGN KEY (transaction_id) REFERENCES transactions (transaction_id), FOREIGN KEY (block_id) REFERENCES blocks (block_id) @@ -368,7 +370,7 @@ create index transaction_pool_state_updates_idx_is_applied on transaction_pool_s create table votes ( id integer not null primary key AUTOINCREMENT, - hash text not null, + siphash bigint not null, epoch bigint not null, block_id text not NULL, decision integer not null, @@ -428,13 +430,13 @@ CREATE TABLE foreign_receive_counters CREATE TABLE burnt_utxos ( id integer not null primary key AUTOINCREMENT, - substate_id text not NULL, - substate text not NULL, + commitment text not NULL, + output text not NULL, base_layer_block_height bigint not NULL, proposed_in_block text NULL REFERENCES blocks (block_id), proposed_in_block_height bigint NULL, created_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - UNIQUE (substate_id) + UNIQUE (commitment) ); CREATE TABLE state_tree @@ -520,21 +522,19 @@ CREATE TABLE validator_epoch_stats CREATE UNIQUE INDEX participation_shares_uniq_idx_epoch_public_key on validator_epoch_stats (epoch, public_key); -CREATE TABLE suspended_nodes +CREATE TABLE evicted_nodes ( - id integer not NULL primary key AUTOINCREMENT, - epoch bigint not NULL, - public_key text not NULL, - suspended_in_block text not NULL, - suspended_in_block_height bigint not NULL, - resumed_in_block text NULL, - resumed_in_block_height bigint NULL, - created_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP + id integer not NULL primary key AUTOINCREMENT, + epoch bigint not NULL, + public_key text not NULL, + evicted_in_block text NULL REFERENCES blocks (block_id), + evicted_in_block_height bigint NULL, + eviction_committed_in_epoch bigint NULL, + created_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ); -CREATE UNIQUE INDEX suspended_nodes_uniq_idx_epoch_public_key on suspended_nodes (epoch, public_key); -CREATE INDEX suspended_nodes_idx_suspended_in_block on suspended_nodes (suspended_in_block); -CREATE INDEX suspended_nodes_idx_unsuspended_in_block on suspended_nodes (resumed_in_block); +CREATE UNIQUE INDEX evicted_nodes_uniq_idx_epoch_public_key on evicted_nodes (epoch, public_key); +CREATE INDEX evicted_nodes_idx_evicted_in_block on evicted_nodes (evicted_in_block); CREATE TABLE diagnostics_no_votes ( diff --git a/dan_layer/state_store_sqlite/src/reader.rs b/dan_layer/state_store_sqlite/src/reader.rs index 480905120..dd6e401ba 100644 --- a/dan_layer/state_store_sqlite/src/reader.rs +++ b/dan_layer/state_store_sqlite/src/reader.rs @@ -1,6 +1,5 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause - use std::{ borrow::Borrow, collections::{HashMap, HashSet}, @@ -84,7 +83,7 @@ use tari_dan_storage::{ StateStoreReadTransaction, StorageError, }; -use tari_engine_types::substate::SubstateId; +use tari_engine_types::{substate::SubstateId, template_models::UnclaimedConfidentialOutputAddress}; use tari_state_tree::{Node, NodeKey, TreeNode, Version}; use tari_transaction::TransactionId; use tari_utilities::{hex::Hex, ByteArray}; @@ -1444,6 +1443,15 @@ impl<'tx, TAddr: NodeAddressable + Serialize + DeserializeOwned + 'tx> StateStor substate_id: &SubstateId, ) -> Result { use crate::schema::block_diffs; + if !Block::record_exists(self, block_id)? { + return Err(StorageError::QueryError { + reason: format!( + "block_diffs_get_last_change_for_substate: Block {} does not exist", + block_id + ), + }); + } + let commit_block = self.get_commit_block()?; let block_ids = self.get_block_ids_with_commands_between(commit_block.block_id(), block_id)?; @@ -2181,7 +2189,7 @@ impl<'tx, TAddr: NodeAddressable + Serialize + DeserializeOwned + 'tx> StateStor // Get the last committed block let commit_block = self.get_commit_block()?; - // Block may modify state with zero commands because the justify a block that changes state + // Block may modify state with zero commands because it justifies a block that changes state let block_ids = self.get_block_ids_between(commit_block.block_id(), block_id, 1000)?; if block_ids.is_empty() { @@ -2191,6 +2199,7 @@ impl<'tx, TAddr: NodeAddressable + Serialize + DeserializeOwned + 'tx> StateStor let diff_recs = pending_state_tree_diffs::table .filter(pending_state_tree_diffs::block_id.eq_any(block_ids)) .order_by(pending_state_tree_diffs::block_height.asc()) + .then_order_by(pending_state_tree_diffs::id.asc()) .get_results::(self.connection()) .map_err(|e| SqliteStorageError::DieselError { operation: "pending_state_tree_diffs_get_all_pending", @@ -2355,7 +2364,7 @@ impl<'tx, TAddr: NodeAddressable + Serialize + DeserializeOwned + 'tx> StateStor #[allow(clippy::mutable_key_type)] let mut pledges = SubstatePledges::with_capacity(recs.len()); for pledge in recs { - let substate_id = parse_from_string(&pledge.substate_id)?; + let substate_id = parse_from_string::(&pledge.substate_id)?; let version = pledge.version as u32; let id = VersionedSubstateId::new(substate_id, version); let lock_type = parse_from_string(&pledge.lock_type)?; @@ -2372,11 +2381,11 @@ impl<'tx, TAddr: NodeAddressable + Serialize + DeserializeOwned + 'tx> StateStor Ok(pledges) } - fn burnt_utxos_get(&self, substate_id: &SubstateId) -> Result { + fn burnt_utxos_get(&self, commitment: &UnclaimedConfidentialOutputAddress) -> Result { use crate::schema::burnt_utxos; let burnt_utxo = burnt_utxos::table - .filter(burnt_utxos::substate_id.eq(substate_id.to_string())) + .filter(burnt_utxos::commitment.eq(commitment.to_string())) .first::(self.connection()) .map_err(|e| SqliteStorageError::DieselError { operation: "burnt_utxos_get", @@ -2487,94 +2496,51 @@ impl<'tx, TAddr: NodeAddressable + Serialize + DeserializeOwned + 'tx> StateStor }) } - fn validator_epoch_stats_get_nodes_to_suspend( - &self, - block_id: &BlockId, - min_missed_proposals: u64, - limit: usize, - ) -> Result, StorageError> { - use crate::schema::{suspended_nodes, validator_epoch_stats}; - if limit == 0 { - return Ok(vec![]); - } - let commit_block = self.get_commit_block()?; - - let block_ids = self.get_block_ids_between(commit_block.block_id(), block_id, 1000)?; - - let pks = validator_epoch_stats::table - .select(validator_epoch_stats::public_key) - .filter( - validator_epoch_stats::public_key.ne_all( - suspended_nodes::table.select(suspended_nodes::public_key).filter( - // Not already suspended in uncommitted blocks - suspended_nodes::suspended_in_block - .eq_any(block_ids) - // Not suspended in committed blocks - .or(suspended_nodes::suspended_in_block_height.lt(commit_block.height().as_u64() as i64)), - ), - ), - ) - .filter(validator_epoch_stats::missed_proposals.ge(min_missed_proposals as i64)) - .filter(validator_epoch_stats::epoch.eq(commit_block.epoch().as_u64() as i64)) - .limit(limit as i64) - .get_results::(self.connection()) - .map_err(|e| SqliteStorageError::DieselError { - operation: "validator_epoch_stats_get_nodes_to_suspend", - source: e, - })?; - - pks.iter() - .map(|s| { - PublicKey::from_hex(s).map_err(|e| StorageError::DecodingError { - operation: "validator_epoch_stats_get_nodes_to_suspend", - item: "public key", - details: format!("Failed to decode public key: {e}"), - }) - }) - .collect() - } - - fn validator_epoch_stats_get_nodes_to_resume( + fn validator_epoch_stats_get_nodes_to_evict( &self, block_id: &BlockId, - limit: usize, + threshold: u64, + limit: u64, ) -> Result, StorageError> { - use crate::schema::{suspended_nodes, validator_epoch_stats}; + use crate::schema::{evicted_nodes, validator_epoch_stats}; if limit == 0 { return Ok(vec![]); } - let commit_block = self.get_commit_block()?; let block_ids = self.get_block_ids_between(commit_block.block_id(), block_id, 1000)?; let pks = validator_epoch_stats::table .select(validator_epoch_stats::public_key) + .left_join(evicted_nodes::table.on(evicted_nodes::public_key.eq(validator_epoch_stats::public_key))) .filter( - // Must be suspended - validator_epoch_stats::public_key.eq_any( - suspended_nodes::table.select(suspended_nodes::public_key).filter( - suspended_nodes::resumed_in_block - .ne_all(block_ids) - .or(suspended_nodes::resumed_in_block_height.is_null()) - .or(suspended_nodes::resumed_in_block_height - .lt(Some(commit_block.height().as_u64() as i64))), - ), + // Not evicted + evicted_nodes::evicted_in_block + .is_null() + // Not already evicted in uncommitted blocks + .or(evicted_nodes::evicted_in_block + .ne_all(block_ids) + // Not evicted in committed blocks + .and(evicted_nodes::evicted_in_block_height.le(commit_block.height().as_u64() as i64)) ), ) - .filter(validator_epoch_stats::missed_proposals_capped.eq(0i64)) + // Only suspended nodes can be evicted + // .filter(evicted_nodes::suspended_in_block.is_not_null()) + // Not already evicted + .filter(evicted_nodes::eviction_committed_in_epoch.is_null()) + .filter(validator_epoch_stats::missed_proposals.ge(threshold as i64)) .filter(validator_epoch_stats::epoch.eq(commit_block.epoch().as_u64() as i64)) .limit(limit as i64) .get_results::(self.connection()) .map_err(|e| SqliteStorageError::DieselError { - operation: "validator_epoch_stats_get_nodes_to_resume", + operation: "validator_epoch_stats_get_nodes_to_evict", source: e, })?; pks.iter() .map(|s| { PublicKey::from_hex(s).map_err(|e| StorageError::DecodingError { - operation: "validator_epoch_stats_get_nodes_to_resume", + operation: "validator_epoch_stats_get_nodes_to_evict", item: "public key", details: format!("Failed to decode public key: {e}"), }) @@ -2582,8 +2548,8 @@ impl<'tx, TAddr: NodeAddressable + Serialize + DeserializeOwned + 'tx> StateStor .collect() } - fn suspended_nodes_is_suspended(&self, block_id: &BlockId, public_key: &PublicKey) -> Result { - use crate::schema::suspended_nodes; + fn suspended_nodes_is_evicted(&self, block_id: &BlockId, public_key: &PublicKey) -> Result { + use crate::schema::evicted_nodes; if !self.blocks_exists(block_id)? { return Err(StorageError::QueryError { @@ -2594,31 +2560,35 @@ impl<'tx, TAddr: NodeAddressable + Serialize + DeserializeOwned + 'tx> StateStor let commit_block = self.get_commit_block()?; let block_ids = self.get_block_ids_between(commit_block.block_id(), block_id, 1000)?; - let count = suspended_nodes::table + let count = evicted_nodes::table .count() - .filter(suspended_nodes::public_key.eq(public_key.to_hex())) + .filter(evicted_nodes::public_key.eq(public_key.to_hex())) .filter( - suspended_nodes::resumed_in_block - .is_null() - .or(suspended_nodes::resumed_in_block.ne_all(block_ids)), + evicted_nodes::evicted_in_block.is_not_null().and( + evicted_nodes::evicted_in_block_height + .le(commit_block.height().as_u64() as i64) + .or(evicted_nodes::evicted_in_block.ne_all(block_ids)), + ), ) .first::(self.connection()) .map_err(|e| SqliteStorageError::DieselError { - operation: "suspended_nodes_exists", + operation: "suspended_nodes_is_evicted", source: e, })?; Ok(count > 0) } - fn suspended_nodes_count(&self) -> Result { - use crate::schema::suspended_nodes; + fn evicted_nodes_count(&self, epoch: Epoch) -> Result { + use crate::schema::evicted_nodes; - let count = suspended_nodes::table + let count = evicted_nodes::table .count() + .filter(evicted_nodes::evicted_in_block.is_not_null()) + .filter(evicted_nodes::eviction_committed_in_epoch.eq(epoch.as_u64() as i64)) .first::(self.connection()) .map_err(|e| SqliteStorageError::DieselError { - operation: "suspended_nodes_exists", + operation: "evicted_nodes_count", source: e, })?; diff --git a/dan_layer/state_store_sqlite/src/schema.rs b/dan_layer/state_store_sqlite/src/schema.rs index 93febe35c..2529052e6 100644 --- a/dan_layer/state_store_sqlite/src/schema.rs +++ b/dan_layer/state_store_sqlite/src/schema.rs @@ -10,6 +10,7 @@ diesel::table! { shard -> Integer, change -> Text, state -> Nullable, + state_hash -> Nullable, created_at -> Timestamp, } } @@ -48,8 +49,8 @@ diesel::table! { diesel::table! { burnt_utxos (id) { id -> Integer, - substate_id -> Text, - substate -> Text, + commitment -> Text, + output -> Text, base_layer_block_height -> BigInt, proposed_in_block -> Nullable, proposed_in_block_height -> Nullable, @@ -110,6 +111,18 @@ diesel::table! { } } +diesel::table! { + evicted_nodes (id) { + id -> Integer, + epoch -> BigInt, + public_key -> Text, + evicted_in_block -> Nullable, + evicted_in_block_height -> Nullable, + eviction_committed_in_epoch -> Nullable, + created_at -> Timestamp, + } +} + diesel::table! { foreign_missing_transactions (id) { id -> Integer, @@ -410,19 +423,6 @@ diesel::table! { } } -diesel::table! { - suspended_nodes (id) { - id -> Integer, - epoch -> BigInt, - public_key -> Text, - suspended_in_block -> Text, - suspended_in_block_height -> BigInt, - resumed_in_block -> Nullable, - resumed_in_block_height -> Nullable, - created_at -> Timestamp, - } -} - diesel::table! { transaction_executions (id) { id -> Integer, @@ -539,7 +539,7 @@ diesel::table! { diesel::table! { votes (id) { id -> Integer, - hash -> Text, + siphash -> BigInt, epoch -> BigInt, block_id -> Text, decision -> Integer, @@ -558,6 +558,7 @@ diesel::allow_tables_to_appear_in_same_query!( diagnostic_deleted_blocks, diagnostics_no_votes, epoch_checkpoints, + evicted_nodes, foreign_missing_transactions, foreign_parked_blocks, foreign_proposals, @@ -581,7 +582,6 @@ diesel::allow_tables_to_appear_in_same_query!( state_tree_shard_versions, substate_locks, substates, - suspended_nodes, transaction_executions, transaction_pool, transaction_pool_history, diff --git a/dan_layer/state_store_sqlite/src/sql_models/block_diff.rs b/dan_layer/state_store_sqlite/src/sql_models/block_diff.rs index 05f0f340a..a53ba59b4 100644 --- a/dan_layer/state_store_sqlite/src/sql_models/block_diff.rs +++ b/dan_layer/state_store_sqlite/src/sql_models/block_diff.rs @@ -1,9 +1,12 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause +use std::str::FromStr; + use diesel::Queryable; use tari_dan_common_types::{shard::Shard, VersionedSubstateId}; use tari_dan_storage::{consensus_models, consensus_models::BlockId, StorageError}; +use tari_engine_types::substate::SubstateId; use time::PrimitiveDateTime; use crate::serialization::{deserialize_hex_try_from, deserialize_json}; @@ -20,6 +23,7 @@ pub struct BlockDiff { pub shard: i32, pub change: String, pub state: Option, + pub _state_hash: Option, #[allow(dead_code)] pub created_at: PrimitiveDateTime, } @@ -34,7 +38,7 @@ impl BlockDiff { } pub fn try_convert_change(d: Self) -> Result { - let substate_id = d.substate_id.parse().map_err(|err| StorageError::DataInconsistency { + let substate_id = SubstateId::from_str(&d.substate_id).map_err(|err| StorageError::DataInconsistency { details: format!("Invalid substate id {}: {}", d.substate_id, err), })?; let id = VersionedSubstateId::new(substate_id, d.version as u32); diff --git a/dan_layer/state_store_sqlite/src/sql_models/burnt_utxo.rs b/dan_layer/state_store_sqlite/src/sql_models/burnt_utxo.rs index da2aaf479..ffb0719b1 100644 --- a/dan_layer/state_store_sqlite/src/sql_models/burnt_utxo.rs +++ b/dan_layer/state_store_sqlite/src/sql_models/burnt_utxo.rs @@ -10,8 +10,8 @@ use crate::serialization::{deserialize_json, parse_from_string}; #[derive(Debug, Clone, Queryable)] pub struct BurntUtxo { pub id: i32, - pub substate_id: String, - pub substate_value: String, + pub commitment: String, + pub output: String, pub base_layer_block_height: i64, pub proposed_in_block: Option, pub proposed_in_block_height: Option, @@ -23,8 +23,8 @@ impl TryFrom for consensus_models::BurntUtxo { fn try_from(value: BurntUtxo) -> Result { Ok(Self { - substate_id: parse_from_string(&value.substate_id)?, - substate_value: deserialize_json(&value.substate_value)?, + commitment: parse_from_string(&value.commitment)?, + output: deserialize_json(&value.output)?, proposed_in_block: value.proposed_in_block.as_deref().map(deserialize_json).transpose()?, base_layer_block_height: value.base_layer_block_height as u64, }) diff --git a/dan_layer/state_store_sqlite/src/sql_models/vote.rs b/dan_layer/state_store_sqlite/src/sql_models/vote.rs index 360e92940..428e19775 100644 --- a/dan_layer/state_store_sqlite/src/sql_models/vote.rs +++ b/dan_layer/state_store_sqlite/src/sql_models/vote.rs @@ -14,7 +14,7 @@ use crate::{ #[derive(Debug, Clone, Queryable)] pub struct Vote { pub id: i32, - pub hash: String, + pub hash: i64, pub epoch: i64, pub block_id: String, pub decision: i32, diff --git a/dan_layer/state_store_sqlite/src/writer.rs b/dan_layer/state_store_sqlite/src/writer.rs index 5527c4822..80b87d819 100644 --- a/dan_layer/state_store_sqlite/src/writer.rs +++ b/dan_layer/state_store_sqlite/src/writer.rs @@ -73,7 +73,7 @@ use tari_dan_storage::{ StateStoreWriteTransaction, StorageError, }; -use tari_engine_types::substate::SubstateId; +use tari_engine_types::{substate::SubstateId, template_models::UnclaimedConfidentialOutputAddress}; use tari_state_tree::{Node, NodeKey, StaleTreeNode, TreeNode, Version}; use tari_transaction::TransactionId; use tari_utilities::{hex::Hex, ByteArray}; @@ -358,6 +358,7 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta block_diffs::shard.eq(ch.shard().as_u32() as i32), block_diffs::change.eq(ch.as_change_string()), block_diffs::state.eq(ch.substate().map(serialize_json).transpose()?), + block_diffs::state_hash.eq(ch.substate().map(|s| s.to_value_hash().to_string())), )) }) .collect::, StorageError>>()?; @@ -1483,7 +1484,7 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta use crate::schema::votes; let insert = ( - votes::hash.eq(serialize_hex(vote.calculate_hash())), + votes::siphash.eq(vote.get_hash() as i64), votes::epoch.eq(vote.epoch.as_u64() as i64), votes::block_id.eq(serialize_hex(vote.block_id)), votes::sender_leaf_hash.eq(serialize_hex(vote.sender_leaf_hash)), @@ -1999,8 +2000,8 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta use crate::schema::burnt_utxos; let values = ( - burnt_utxos::substate_id.eq(burnt_utxo.substate_id.to_string()), - burnt_utxos::substate.eq(serialize_json(&burnt_utxo.substate_value)?), + burnt_utxos::commitment.eq(burnt_utxo.commitment.to_string()), + burnt_utxos::output.eq(serialize_json(&burnt_utxo.output)?), burnt_utxos::base_layer_block_height.eq(burnt_utxo.base_layer_block_height as i64), ); @@ -2017,14 +2018,14 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta fn burnt_utxos_set_proposed_block( &mut self, - substate_id: &SubstateId, + commitment: &UnclaimedConfidentialOutputAddress, proposed_in_block: &BlockId, ) -> Result<(), StorageError> { use crate::schema::{blocks, burnt_utxos}; let proposed_in_block_hex = serialize_hex(proposed_in_block); let num_affected = diesel::update(burnt_utxos::table) - .filter(burnt_utxos::substate_id.eq(substate_id.to_string())) + .filter(burnt_utxos::commitment.eq(commitment.to_string())) .set(( burnt_utxos::proposed_in_block.eq(&proposed_in_block_hex), burnt_utxos::proposed_in_block_height.eq(blocks::table @@ -2041,7 +2042,7 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta if num_affected == 0 { return Err(StorageError::NotFound { item: "burnt_utxo", - key: substate_id.to_string(), + key: commitment.to_string(), }); } @@ -2067,11 +2068,11 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta Ok(()) } - fn burnt_utxos_delete(&mut self, substate_id: &SubstateId) -> Result<(), StorageError> { + fn burnt_utxos_delete(&mut self, commitment: &UnclaimedConfidentialOutputAddress) -> Result<(), StorageError> { use crate::schema::burnt_utxos; let num_affected = diesel::delete(burnt_utxos::table) - .filter(burnt_utxos::substate_id.eq(substate_id.to_string())) + .filter(burnt_utxos::commitment.eq(commitment.to_string())) .execute(self.connection()) .map_err(|e| SqliteStorageError::DieselError { operation: "burnt_utxos_delete", @@ -2081,7 +2082,7 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta if num_affected == 0 { return Err(StorageError::NotFound { item: "burnt_utxo", - key: substate_id.to_string(), + key: commitment.to_string(), }); } @@ -2279,60 +2280,41 @@ impl<'tx, TAddr: NodeAddressable + 'tx> StateStoreWriteTransaction for SqliteSta Ok(()) } - fn suspended_nodes_insert( - &mut self, - public_key: &PublicKey, - suspended_in_block: BlockId, - ) -> Result<(), StorageError> { - let suspended_in_block = serialize_hex(suspended_in_block); + fn evicted_nodes_evict(&mut self, public_key: &PublicKey, evicted_in_block: BlockId) -> Result<(), StorageError> { + if !self.blocks_exists(&evicted_in_block)? { + return Err(StorageError::QueryError { + reason: format!("suspended_nodes_evict: block {evicted_in_block} does not exist"), + }); + } + let evicted_in_block = serialize_hex(evicted_in_block); + sql_query( r#" INSERT INTO - suspended_nodes (public_key, epoch, suspended_in_block, suspended_in_block_height) + evicted_nodes (public_key, epoch, evicted_in_block, evicted_in_block_height) SELECT ?, epoch, block_id, height FROM blocks where block_id = ?"#, ) .bind::(public_key.to_hex()) - .bind::(suspended_in_block) + .bind::(evicted_in_block) .execute(self.connection()) .map_err(|e| SqliteStorageError::DieselError { - operation: "suspended_nodes_insert", + operation: "suspended_nodes_evict", source: e, })?; Ok(()) } - fn suspended_nodes_mark_for_removal( + fn evicted_nodes_mark_eviction_as_committed( &mut self, public_key: &PublicKey, - resumed_in_block: BlockId, + epoch: Epoch, ) -> Result<(), StorageError> { - use crate::schema::{blocks, suspended_nodes}; - let resumed_in_block = serialize_hex(resumed_in_block); - - diesel::update(suspended_nodes::table) - .set(( - suspended_nodes::resumed_in_block.eq(&resumed_in_block), - suspended_nodes::resumed_in_block_height.eq(blocks::table - .select(blocks::height) - .filter(blocks::block_id.eq(&resumed_in_block)) - .single_value()), - )) - .filter(suspended_nodes::public_key.eq(public_key.to_hex())) - .execute(self.connection()) - .map_err(|e| SqliteStorageError::DieselError { - operation: "suspended_nodes_mark_for_removal", - source: e, - })?; - - Ok(()) - } - - fn suspended_nodes_delete(&mut self, public_key: &PublicKey) -> Result<(), StorageError> { - use crate::schema::suspended_nodes; + use crate::schema::evicted_nodes; - let num_affected = diesel::delete(suspended_nodes::table) - .filter(suspended_nodes::public_key.eq(public_key.to_hex())) + let num_affected = diesel::update(evicted_nodes::table) + .set(evicted_nodes::eviction_committed_in_epoch.eq(epoch.as_u64() as i64)) + .filter(evicted_nodes::public_key.eq(public_key.to_hex())) .execute(self.connection()) .map_err(|e| SqliteStorageError::DieselError { operation: "suspended_nodes_delete", diff --git a/dan_layer/state_tree/Cargo.toml b/dan_layer/state_tree/Cargo.toml index 01a273ab3..5423c461f 100644 --- a/dan_layer/state_tree/Cargo.toml +++ b/dan_layer/state_tree/Cargo.toml @@ -12,8 +12,9 @@ tari_engine_types = { workspace = true } tari_template_lib = { workspace = true } tari_common_types = { workspace = true } tari_crypto = { workspace = true } +tari_bor = { workspace = true } -hex = { workspace = true } +blake2 = { workspace = true } thiserror = { workspace = true } serde = { workspace = true, features = ["derive"] } log = { workspace = true } diff --git a/dan_layer/state_tree/src/jellyfish/tree.rs b/dan_layer/state_tree/src/jellyfish/tree.rs index ef386c958..188da6636 100644 --- a/dan_layer/state_tree/src/jellyfish/tree.rs +++ b/dan_layer/state_tree/src/jellyfish/tree.rs @@ -265,10 +265,10 @@ impl<'a, R: 'a + TreeStoreReader

, P: Clone> JellyfishMerkleTree<'a, R, P> { // Reuse the current `InternalNode` in memory to create a new internal node. let mut old_children = internal_node.into_children(); - let mut new_created_children: HashMap> = HashMap::new(); + let mut new_created_children: Vec<(Nibble, Node

)> = Vec::new(); for (child_nibble, child_option) in new_children { if let Some(child) = child_option { - new_created_children.insert(child_nibble, child); + new_created_children.push((child_nibble, child)); } else { old_children.swap_remove(&child_nibble); } @@ -278,7 +278,7 @@ impl<'a, R: 'a + TreeStoreReader

, P: Clone> JellyfishMerkleTree<'a, R, P> { return Ok(None); } if old_children.len() <= 1 && new_created_children.len() <= 1 { - if let Some((new_nibble, new_child)) = new_created_children.iter().next() { + if let Some((new_nibble, new_child)) = new_created_children.first() { if let Some((old_nibble, _old_child)) = old_children.iter().next() { if old_nibble == new_nibble && new_child.is_leaf() { return Ok(Some(new_child.clone())); diff --git a/dan_layer/state_tree/src/jellyfish/types.rs b/dan_layer/state_tree/src/jellyfish/types.rs index 3df342a78..12a41db04 100644 --- a/dan_layer/state_tree/src/jellyfish/types.rs +++ b/dan_layer/state_tree/src/jellyfish/types.rs @@ -81,33 +81,56 @@ // Copyright (c) Aptos // SPDX-License-Identifier: Apache-2.0 -use std::{fmt, ops::Range}; +use std::{fmt, io, ops::Range}; +use blake2::{digest::consts::U32, Blake2b}; use indexmap::IndexMap; use serde::{Deserialize, Serialize}; -use tari_crypto::{hash_domain, tari_utilities::ByteArray}; -use tari_dan_common_types::{ - hasher::{tari_hasher, TariHasher}, - optional::IsNotFoundError, +use tari_crypto::{ + hash_domain, + hashing::{AsFixedBytes, DomainSeparatedHasher}, + tari_utilities::ByteArray, }; +use tari_dan_common_types::optional::IsNotFoundError; use tari_engine_types::serde_with; use crate::jellyfish::store::TreeStoreReader; pub type Hash = tari_common_types::types::FixedHash; -hash_domain!(SparseMerkleTree, "com.tari.dan.state_tree", 0); +hash_domain!(ValidatorJmtHashDomain, "com.tari.jmt", 0); -fn jmt_node_hasher() -> TariHasher { - tari_hasher::("JmtNode") +pub type JmtHasher = DomainSeparatedHasher, ValidatorJmtHashDomain>; + +fn jmt_node_hasher() -> JmtHasher { + JmtHasher::new_with_label("Node") +} + +struct HashWriter<'a>(&'a mut JmtHasher); + +impl io::Write for HashWriter<'_> { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.0.update(buf); + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } } pub fn jmt_node_hash(data: &T) -> Hash { - jmt_node_hasher().chain(data).result() + let mut hasher = jmt_node_hasher(); + let mut hash_writer = HashWriter(&mut hasher); + tari_bor::encode_into_std_writer(data, &mut hash_writer).expect("encoding failed"); + let bytes: [u8; 32] = hasher.finalize().as_fixed_bytes().expect("hash is 32 bytes"); + bytes.into() } pub fn jmt_node_hash2(d1: &[u8], d2: &[u8]) -> Hash { - jmt_node_hasher().chain(d1).chain(d2).result() + let hasher = jmt_node_hasher().chain(d1).chain(d2); + let bytes: [u8; 32] = hasher.finalize().as_fixed_bytes().expect("hash is 32 bytes"); + bytes.into() } // SOURCE: https://github.com/aptos-labs/aptos-core/blob/1.0.4/types/src/proof/definition.rs#L182 @@ -419,7 +442,6 @@ pub struct NibblePath { num_nibbles: usize, /// The underlying bytes that stores the path, 2 nibbles per byte. If the number of nibbles is /// odd, the second half of the last byte must be 0. - #[serde(with = "serde_with::hex")] bytes: Vec, } @@ -1115,7 +1137,7 @@ pub struct LeafNode

{ version: Version, } -impl LeafNode

{ +impl

LeafNode

{ /// Creates a new leaf node. pub fn new(leaf_key: LeafKey, value_hash: Hash, payload: P, version: Version) -> Self { Self { @@ -1184,7 +1206,7 @@ impl From> for Node

{ } } -impl Node

{ +impl

Node

{ // /// Creates the [`Internal`](Node::Internal) variant. // #[cfg(any(test, feature = "fuzzing"))] // pub fn new_internal(children: Children) -> Self { @@ -1201,6 +1223,13 @@ impl Node

{ matches!(self, Node::Leaf(_)) } + pub fn leaf(&self) -> Option<&LeafNode

> { + match self { + Node::Leaf(leaf) => Some(leaf), + _ => None, + } + } + /// Returns `NodeType` pub fn node_type(&self) -> NodeType { match self { diff --git a/dan_layer/state_tree/src/staged_store.rs b/dan_layer/state_tree/src/staged_store.rs index fd7e91475..addcfd4c6 100644 --- a/dan_layer/state_tree/src/staged_store.rs +++ b/dan_layer/state_tree/src/staged_store.rs @@ -4,6 +4,7 @@ use std::collections::{HashMap, VecDeque}; use log::debug; +use tari_dan_common_types::option::DisplayContainer; use crate::{JmtStorageError, Node, NodeKey, StaleTreeNode, StateHashTreeDiff, TreeStoreReader, TreeStoreWriter}; @@ -29,7 +30,7 @@ impl<'s, S: TreeStoreReader

, P> StagedTreeStore<'s, S, P> { pub fn apply_pending_diff(&mut self, diff: StateHashTreeDiff

) { self.preceding_pending_state.reserve(diff.new_nodes.len()); for (key, node) in diff.new_nodes { - debug!(target: LOG_TARGET, "PENDING INSERT: node {}", key); + debug!(target: LOG_TARGET, "PENDING INSERT: node {} leaf: {}", key, node.leaf().map(|l| l.value_hash()).display()); self.preceding_pending_state.insert(key, node); } diff --git a/dan_layer/storage/Cargo.toml b/dan_layer/storage/Cargo.toml index b9bb07e01..9fed74a38 100644 --- a/dan_layer/storage/Cargo.toml +++ b/dan_layer/storage/Cargo.toml @@ -12,15 +12,18 @@ license.workspace = true tari_common = { workspace = true } tari_common_types = { workspace = true } tari_dan_common_types = { workspace = true } +tari_hashing = { workspace = true } +tari_template_lib = { workspace = true } +tari_sidechain = { workspace = true } # Shard store deps tari_engine_types = { workspace = true } tari_transaction = { workspace = true } -tari_core = { workspace = true, default-features = true } tari_crypto = { workspace = true } tari_state_tree = { workspace = true } anyhow = { workspace = true } +borsh = { workspace = true } chrono = { workspace = true } indexmap = { workspace = true, features = ["serde"] } log = { workspace = true } diff --git a/dan_layer/storage/src/consensus_models/block.rs b/dan_layer/storage/src/consensus_models/block.rs index 185fa98ba..e6619b749 100644 --- a/dan_layer/storage/src/consensus_models/block.rs +++ b/dan_layer/storage/src/consensus_models/block.rs @@ -2,12 +2,13 @@ // SPDX-License-Identifier: BSD-3-Clause use std::{ - collections::{BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet, HashSet}, fmt::{Debug, Display, Formatter}, iter, ops::{Deref, RangeInclusive}, }; +use borsh::BorshSerialize; use indexmap::IndexMap; use log::*; use serde::{Deserialize, Serialize}; @@ -36,6 +37,7 @@ use ts_rs::TS; use super::{ BlockDiff, BlockPledge, + EvictNodeAtom, ForeignProposal, ForeignProposalAtom, ForeignSendCounters, @@ -43,7 +45,6 @@ use super::{ MintConfidentialOutputAtom, PendingShardStateTreeDiff, QuorumCertificate, - ResumeNodeAtom, SubstateChange, SubstateDestroyedProof, SubstatePledge, @@ -54,7 +55,7 @@ use super::{ }; use crate::{ consensus_models::{ - block_header::{compute_command_merkle_root, BlockHeader}, + block_header::BlockHeader, Command, LastExecuted, LastProposed, @@ -113,7 +114,7 @@ impl Block { commands: BTreeSet, state_merkle_root: FixedHash, total_leader_fee: u64, - sorted_foreign_indexes: IndexMap, + sorted_foreign_indexes: BTreeMap, signature: Option, timestamp: u64, base_layer_block_height: u64, @@ -170,7 +171,7 @@ impl Block { is_dummy: bool, is_justified: bool, is_committed: bool, - sorted_foreign_indexes: IndexMap, + sorted_foreign_indexes: BTreeMap, signature: Option, created_at: PrimitiveDateTime, block_time: Option, @@ -240,7 +241,7 @@ impl Block { Default::default(), state_merkle_root, 0, - IndexMap::new(), + BTreeMap::new(), None, 0, 0, @@ -263,8 +264,8 @@ impl Block { } } - pub fn calculate_hash(&self) -> FixedHash { - self.header.calculate_hash() + pub fn calculate_id(&self) -> BlockId { + self.header.calculate_id() } pub fn header(&self) -> &BlockHeader { @@ -310,8 +311,8 @@ impl Block { self.commands.iter().filter_map(|c| c.foreign_proposal()) } - pub fn all_resume_nodes(&self) -> impl Iterator + '_ { - self.commands.iter().filter_map(|c| c.resume_node()) + pub fn all_evict_nodes(&self) -> impl Iterator + '_ { + self.commands.iter().filter_map(|c| c.evict_node()) } pub fn all_confidential_output_mints(&self) -> impl Iterator + '_ { @@ -406,10 +407,6 @@ impl Block { self.header.command_merkle_root() } - pub fn compute_command_merkle_root(&self) -> Result { - compute_command_merkle_root(&self.commands) - } - pub fn commands(&self) -> &BTreeSet { &self.commands } @@ -434,7 +431,7 @@ impl Block { self.header.get_foreign_counter(shard) } - pub fn foreign_indexes(&self) -> &IndexMap { + pub fn foreign_indexes(&self) -> &BTreeMap { self.header.foreign_indexes() } @@ -450,10 +447,6 @@ impl Block { self.header.signature() } - pub fn set_signature(&mut self, signature: ValidatorSchnorrSignature) { - self.header.set_signature(signature); - } - pub fn base_layer_block_height(&self) -> u64 { self.header.base_layer_block_height() } @@ -833,7 +826,7 @@ impl Block { TTx: StateStoreWriteTransaction + Deref, TTx::Target: StateStoreReadTransaction, TFnOnLock: FnMut(&mut TTx, &LockedBlock, &Block, &QuorumCertificate) -> Result<(), E>, - TFnOnCommit: FnMut(&mut TTx, &LastExecuted, &Block) -> Result<(), E>, + TFnOnCommit: FnMut(&mut TTx, &LastExecuted, Block) -> Result<(), E>, E: From, { let high_qc = self.justify().update_high_qc(tx)?; @@ -842,34 +835,34 @@ impl Block { let justified_node = self.justify().get_block(&**tx)?; // b' <- b''.justify.node - let prepared_node = justified_node.justify().get_block(&**tx)?; + let new_locked = justified_node.justify().get_block(&**tx)?; - if prepared_node.is_genesis() { + if new_locked.is_genesis() { return Ok(high_qc); } let current_locked = LockedBlock::get(&**tx, self.epoch())?; - if prepared_node.height() > current_locked.height { + if new_locked.height() > current_locked.height { on_locked_block_recurse( tx, ¤t_locked, - &prepared_node, + &new_locked, justified_node.justify(), &mut on_lock_block, )?; - prepared_node.as_locked_block().set(tx)?; + new_locked.as_locked_block().set(tx)?; } // b <- b'.justify.node - let commit_node = prepared_node.justify().block_id(); - if justified_node.parent() == prepared_node.id() && prepared_node.parent() == commit_node { + let commit_node = new_locked.justify().block_id(); + if justified_node.parent() == new_locked.id() && new_locked.parent() == commit_node { debug!( target: LOG_TARGET, "✅ Block {} {} forms a 3-chain b'' = {}, b' = {}, b = {}", self.height(), self.id(), justified_node.id(), - prepared_node.id(), + new_locked.id(), commit_node, ); @@ -879,8 +872,9 @@ impl Block { } let prepare_node = Block::get(&**tx, commit_node)?; let last_executed = LastExecuted::get(&**tx)?; - on_commit_block_recurse(tx, &last_executed, &prepare_node, &mut on_commit)?; - prepare_node.as_last_executed().set(tx)?; + let last_exec = prepare_node.as_last_executed(); + on_commit_block_recurse(tx, &last_executed, prepare_node, &mut on_commit)?; + last_exec.set(tx)?; } else { debug!( target: LOG_TARGET, @@ -888,7 +882,7 @@ impl Block { self.height(), self.id(), justified_node.id(), - prepared_node.id(), + new_locked.id(), commit_node, self.id() ); @@ -1041,7 +1035,7 @@ impl Display for Block { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, BorshSerialize)] #[serde(transparent)] pub struct BlockId(#[serde(with = "serde_with::hex")] FixedHash); @@ -1087,6 +1081,12 @@ impl From for BlockId { } } +impl From<[u8; 32]> for BlockId { + fn from(value: [u8; 32]) -> Self { + Self(value.into()) + } +} + impl TryFrom> for BlockId { type Error = FixedHashSizeError; @@ -1133,19 +1133,19 @@ where fn on_commit_block_recurse( tx: &mut TTx, last_executed: &LastExecuted, - block: &Block, + block: Block, callback: &mut F, ) -> Result<(), E> where TTx: StateStoreWriteTransaction + Deref, TTx::Target: StateStoreReadTransaction, E: From, - F: FnMut(&mut TTx, &LastExecuted, &Block) -> Result<(), E>, + F: FnMut(&mut TTx, &LastExecuted, Block) -> Result<(), E>, { if last_executed.height < block.height() { let parent = block.get_parent(&**tx)?; - // Recurse to "catch up" any parent parent blocks we may not have executed - on_commit_block_recurse(tx, last_executed, &parent, callback)?; + // Recurse to "catch up" any parent blocks we may not have executed + on_commit_block_recurse(tx, last_executed, parent, callback)?; callback(tx, last_executed, block)?; } Ok(()) diff --git a/dan_layer/storage/src/consensus_models/block_header.rs b/dan_layer/storage/src/consensus_models/block_header.rs index 62cf952d8..d4215500b 100644 --- a/dan_layer/storage/src/consensus_models/block_header.rs +++ b/dan_layer/storage/src/consensus_models/block_header.rs @@ -2,17 +2,16 @@ // SPDX-License-Identifier: BSD-3-Clause use std::{ - collections::BTreeSet, + collections::{BTreeMap, BTreeSet}, fmt::{Debug, Display, Formatter}, }; -use indexmap::IndexMap; use serde::{Deserialize, Serialize}; use tari_common::configuration::Network; use tari_common_types::types::{FixedHash, PublicKey}; use tari_crypto::tari_utilities::epoch_time::EpochTime; use tari_dan_common_types::{hashing, shard::Shard, Epoch, ExtraData, NodeHeight, NumPreshards, ShardGroup}; -use tari_state_tree::{compute_merkle_root_for_hashes, StateTreeError}; +use tari_state_tree::compute_merkle_root_for_hashes; #[cfg(feature = "ts")] use ts_rs::TS; @@ -41,11 +40,10 @@ pub struct BlockHeader { state_merkle_root: FixedHash, #[cfg_attr(feature = "ts", ts(type = "string"))] command_merkle_root: FixedHash, - /// If the block is a dummy block. This is metadata and not sent over - /// the wire or part of the block hash. + /// If the block is a dummy block. is_dummy: bool, /// Counter for each foreign shard for reliable broadcast. - foreign_indexes: IndexMap, + foreign_indexes: BTreeMap, /// Signature of block by the proposer. #[cfg_attr(feature = "ts", ts(type = "{public_nonce : string, signature: string} | null"))] signature: Option, @@ -71,14 +69,14 @@ impl BlockHeader { state_merkle_root: FixedHash, commands: &BTreeSet, total_leader_fee: u64, - sorted_foreign_indexes: IndexMap, + sorted_foreign_indexes: BTreeMap, signature: Option, timestamp: u64, base_layer_block_height: u64, base_layer_block_hash: FixedHash, extra_data: ExtraData, ) -> Result { - let command_merkle_root = compute_command_merkle_root(commands)?; + let command_merkle_root = Self::compute_command_merkle_root(commands)?; let mut header = BlockHeader { id: BlockId::zero(), network, @@ -99,11 +97,16 @@ impl BlockHeader { base_layer_block_hash, extra_data, }; - header.id = header.calculate_hash().into(); + header.id = header.calculate_id(); Ok(header) } + pub fn compute_command_merkle_root(commands: &BTreeSet) -> Result { + let hashes = commands.iter().map(|cmd| cmd.hash()).peekable(); + compute_merkle_root_for_hashes(hashes).map_err(BlockError::StateTreeError) + } + #[allow(clippy::too_many_arguments)] pub fn load( id: BlockId, @@ -117,7 +120,7 @@ impl BlockHeader { state_merkle_root: FixedHash, total_leader_fee: u64, is_dummy: bool, - sorted_foreign_indexes: IndexMap, + sorted_foreign_indexes: BTreeMap, signature: Option, timestamp: u64, base_layer_block_height: u64, @@ -162,7 +165,7 @@ impl BlockHeader { command_merkle_root: FixedHash::zero(), total_leader_fee: 0, is_dummy: false, - foreign_indexes: IndexMap::new(), + foreign_indexes: BTreeMap::new(), signature: None, timestamp: EpochTime::now().as_u64(), base_layer_block_height: 0, @@ -194,22 +197,22 @@ impl BlockHeader { shard_group, proposed_by, state_merkle_root: parent_state_merkle_root, - command_merkle_root: compute_command_merkle_root([].into_iter().peekable()) + command_merkle_root: BlockHeader::compute_command_merkle_root(&BTreeSet::new()) .expect("compute_command_merkle_root is infallible for empty commands"), total_leader_fee: 0, is_dummy: true, - foreign_indexes: IndexMap::new(), + foreign_indexes: BTreeMap::new(), signature: None, timestamp: parent_timestamp, base_layer_block_height: parent_base_layer_block_height, base_layer_block_hash: parent_base_layer_block_hash, extra_data: ExtraData::new(), }; - block.id = block.calculate_hash().into(); + block.id = block.calculate_id(); block } - pub fn calculate_hash(&self) -> FixedHash { + pub fn calculate_id(&self) -> BlockId { // Hash is created from the hash of the "body" and // then hashed with the parent, so that you can // create a merkle proof of a chain of blocks @@ -223,8 +226,41 @@ impl BlockHeader { // blockbody // ``` - let inner_hash = hashing::block_hasher() - .chain(&self.network) + let header_hash = self.calculate_hash(); + Self::calculate_block_id(&header_hash, &self.parent) + } + + pub(crate) fn calculate_block_id(contents_hash: &FixedHash, parent_id: &BlockId) -> BlockId { + if *contents_hash == FixedHash::zero() && parent_id.is_zero() { + return BlockId::zero(); + } + + hashing::block_hasher() + .chain(parent_id) + .chain(contents_hash) + .finalize_into_array() + .into() + } + + pub fn create_extra_data_hash(&self) -> FixedHash { + hashing::extra_data_hasher().chain(&self.extra_data).finalize().into() + } + + pub fn create_foreign_indexes_hash(&self) -> FixedHash { + hashing::foreign_indexes_hasher() + .chain(&self.foreign_indexes) + .finalize() + .into() + } + + pub fn calculate_hash(&self) -> FixedHash { + // These hashes reduce proof sizes, specifically, a proof-of-commit only needs to include these hashes and not + // their data. + let extra_data_hash = self.create_extra_data_hash(); + let foreign_indexes_hash = self.create_foreign_indexes_hash(); + + hashing::block_hasher() + .chain(&self.network.as_byte()) .chain(&self.justify_id) .chain(&self.height) .chain(&self.total_leader_fee) @@ -234,14 +270,13 @@ impl BlockHeader { .chain(&self.state_merkle_root) .chain(&self.is_dummy) .chain(&self.command_merkle_root) - .chain(&self.foreign_indexes) + .chain(&foreign_indexes_hash) .chain(&self.timestamp) .chain(&self.base_layer_block_height) .chain(&self.base_layer_block_hash) - .chain(&self.extra_data) - .result(); - - hashing::block_hasher().chain(&self.parent).chain(&inner_hash).result() + .chain(&extra_data_hash) + .finalize() + .into() } pub fn is_genesis(&self) -> bool { @@ -344,7 +379,7 @@ impl BlockHeader { self.foreign_indexes.get(bucket).copied() } - pub fn foreign_indexes(&self) -> &IndexMap { + pub fn foreign_indexes(&self) -> &BTreeMap { &self.foreign_indexes } @@ -389,10 +424,3 @@ impl Display for BlockHeader { ) } } - -pub(crate) fn compute_command_merkle_root<'a, I: IntoIterator>( - commands: I, -) -> Result { - let hashes = commands.into_iter().map(|cmd| cmd.hash()).peekable(); - compute_merkle_root_for_hashes(hashes) -} diff --git a/dan_layer/storage/src/consensus_models/burnt_utxo.rs b/dan_layer/storage/src/consensus_models/burnt_utxo.rs index 219b131ae..40e4d7eb2 100644 --- a/dan_layer/storage/src/consensus_models/burnt_utxo.rs +++ b/dan_layer/storage/src/consensus_models/burnt_utxo.rs @@ -1,26 +1,32 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use std::hash::Hash; +use std::{hash::Hash, io::Write}; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; -use tari_engine_types::substate::{SubstateId, SubstateValue}; +use tari_engine_types::confidential::UnclaimedConfidentialOutput; +use tari_template_lib::models::UnclaimedConfidentialOutputAddress; use crate::{consensus_models::BlockId, StateStoreReadTransaction, StateStoreWriteTransaction, StorageError}; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct BurntUtxo { - pub substate_id: SubstateId, - pub substate_value: SubstateValue, + pub commitment: UnclaimedConfidentialOutputAddress, + pub output: UnclaimedConfidentialOutput, pub proposed_in_block: Option, pub base_layer_block_height: u64, } impl BurntUtxo { - pub fn new(substate_id: SubstateId, substate_value: SubstateValue, base_layer_block_height: u64) -> Self { + pub fn new( + commitment: UnclaimedConfidentialOutputAddress, + output: UnclaimedConfidentialOutput, + base_layer_block_height: u64, + ) -> Self { Self { - substate_id, - substate_value, + commitment, + output, proposed_in_block: None, base_layer_block_height, } @@ -28,7 +34,7 @@ impl BurntUtxo { pub fn to_atom(&self) -> MintConfidentialOutputAtom { MintConfidentialOutputAtom { - substate_id: self.substate_id.clone(), + commitment: self.commitment, } } } @@ -40,10 +46,10 @@ impl BurntUtxo { pub fn set_proposed_in_block( tx: &mut TTx, - substate_id: &SubstateId, + commitment: &UnclaimedConfidentialOutputAddress, proposed_in_block: &BlockId, ) -> Result<(), StorageError> { - tx.burnt_utxos_set_proposed_block(substate_id, proposed_in_block)?; + tx.burnt_utxos_set_proposed_block(commitment, proposed_in_block)?; Ok(()) } @@ -67,15 +73,21 @@ impl BurntUtxo { ts(export, export_to = "../../bindings/src/types/") )] pub struct MintConfidentialOutputAtom { - pub substate_id: SubstateId, + pub commitment: UnclaimedConfidentialOutputAddress, } impl MintConfidentialOutputAtom { pub fn get(&self, tx: &TTx) -> Result { - tx.burnt_utxos_get(&self.substate_id) + tx.burnt_utxos_get(&self.commitment) } pub fn delete(&self, tx: &mut TTx) -> Result<(), StorageError> { - tx.burnt_utxos_delete(&self.substate_id) + tx.burnt_utxos_delete(&self.commitment) + } +} + +impl BorshSerialize for MintConfidentialOutputAtom { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + BorshSerialize::serialize(&self.commitment.as_object_key().into_array(), writer) } } diff --git a/dan_layer/storage/src/consensus_models/command.rs b/dan_layer/storage/src/consensus_models/command.rs index 74f90a00d..cc5bdeb5b 100644 --- a/dan_layer/storage/src/consensus_models/command.rs +++ b/dan_layer/storage/src/consensus_models/command.rs @@ -6,10 +6,11 @@ use std::{ fmt::{Display, Formatter}, }; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; use tari_common_types::types::{FixedHash, PublicKey}; -use tari_dan_common_types::{hashing::command_hasher, ShardGroup}; -use tari_engine_types::substate::SubstateId; +use tari_dan_common_types::{hashing::command_hasher, Epoch, ShardGroup}; +use tari_template_lib::models::UnclaimedConfidentialOutputAddress; use tari_transaction::TransactionId; use super::{ @@ -28,7 +29,7 @@ use crate::{ StorageError, }; -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize)] #[cfg_attr( feature = "ts", derive(ts_rs::TS), @@ -84,7 +85,7 @@ impl Display for TransactionAtom { } } -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize)] #[cfg_attr( feature = "ts", derive(ts_rs::TS), @@ -112,21 +113,18 @@ pub enum Command { // Validator node commands ForeignProposal(ForeignProposalAtom), MintConfidentialOutput(MintConfidentialOutputAtom), - SuspendNode(SuspendNodeAtom), - ResumeNode(ResumeNodeAtom), - // EvictNode(EvictNodeAtom), + EvictNode(EvictNodeAtom), EndEpoch, } /// Defines the order in which commands should be processed in a block. "Smallest" comes first and "largest" comes last. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] enum CommandOrdering<'a> { - ResumeNode, - SuspendNode, + EvictNode, // EvictNode, /// Foreign proposals should come first in the block so that they are processed before commands ForeignProposal(ShardGroup, &'a BlockId), - MintConfidentialOutput(&'a SubstateId), + MintConfidentialOutput(&'a UnclaimedConfidentialOutputAddress), TransactionId(&'a TransactionId), EndEpoch, } @@ -144,8 +142,7 @@ impl Command { Command::LocalOnly(tx) => Some(tx), Command::ForeignProposal(_) | Command::MintConfidentialOutput(_) | - Command::SuspendNode(_) | - Command::ResumeNode(_) | + Command::EvictNode(_) | Command::EndEpoch => None, } } @@ -164,15 +161,14 @@ impl Command { // Order by shard group then by block id CommandOrdering::ForeignProposal(foreign_proposal.shard_group, &foreign_proposal.block_id) }, - Command::MintConfidentialOutput(mint) => CommandOrdering::MintConfidentialOutput(&mint.substate_id), - Command::SuspendNode(_) => CommandOrdering::SuspendNode, - Command::ResumeNode(_) => CommandOrdering::ResumeNode, + Command::MintConfidentialOutput(mint) => CommandOrdering::MintConfidentialOutput(&mint.commitment), + Command::EvictNode(_) => CommandOrdering::EvictNode, Command::EndEpoch => CommandOrdering::EndEpoch, } } pub fn hash(&self) -> FixedHash { - command_hasher().chain(self).result() + command_hasher().chain(self).finalize().into() } pub fn local_only(&self) -> Option<&TransactionAtom> { @@ -217,9 +213,9 @@ impl Command { } } - pub fn resume_node(&self) -> Option<&ResumeNodeAtom> { + pub fn evict_node(&self) -> Option<&EvictNodeAtom> { match self { - Command::ResumeNode(atom) => Some(atom), + Command::EvictNode(atom) => Some(atom), _ => None, } } @@ -300,9 +296,8 @@ impl Display for Command { Command::AllAccept(tx) => write!(f, "AllAccept({}, {})", tx.id, tx.decision), Command::SomeAccept(tx) => write!(f, "SomeAccept({}, {})", tx.id, tx.decision), Command::ForeignProposal(fp) => write!(f, "ForeignProposal {}", fp.block_id), - Command::MintConfidentialOutput(mint) => write!(f, "MintConfidentialOutput({})", mint.substate_id), - Command::SuspendNode(atom) => write!(f, "SuspendNode({atom})"), - Command::ResumeNode(atom) => write!(f, "ResumeNode({atom})"), + Command::MintConfidentialOutput(mint) => write!(f, "MintConfidentialOutput({})", mint.commitment), + Command::EvictNode(atom) => write!(f, "EvictNode({atom})"), Command::EndEpoch => write!(f, "EndEpoch"), } } @@ -313,54 +308,19 @@ impl Display for Command { derive(ts_rs::TS), ts(export, export_to = "../../bindings/src/types/") )] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct SuspendNodeAtom { - #[cfg_attr(feature = "ts", ts(type = "string"))] - pub public_key: PublicKey, -} - -impl Display for SuspendNodeAtom { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.public_key) - } -} - -#[cfg_attr( - feature = "ts", - derive(ts_rs::TS), - ts(export, export_to = "../../bindings/src/types/") -)] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct ResumeNodeAtom { - #[cfg_attr(feature = "ts", ts(type = "string"))] - pub public_key: PublicKey, -} - -impl crate::consensus_models::ResumeNodeAtom { - pub fn delete_suspended_node(&self, tx: &mut TTx) -> Result<(), StorageError> { - tx.suspended_nodes_delete(&self.public_key) - } -} - -impl Display for crate::consensus_models::ResumeNodeAtom { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.public_key) - } -} -#[cfg_attr( - feature = "ts", - derive(ts_rs::TS), - ts(export, export_to = "../../bindings/src/types/") -)] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize)] pub struct EvictNodeAtom { #[cfg_attr(feature = "ts", ts(type = "string"))] pub public_key: PublicKey, } impl EvictNodeAtom { - pub fn delete_suspended_node(&self, tx: &mut TTx) -> Result<(), StorageError> { - tx.suspended_nodes_delete(&self.public_key) + pub fn mark_as_committed_in_epoch( + &self, + tx: &mut TTx, + epoch: Epoch, + ) -> Result<(), StorageError> { + tx.evicted_nodes_mark_eviction_as_committed(&self.public_key, epoch) } } @@ -374,6 +334,8 @@ impl Display for EvictNodeAtom { mod tests { use std::{collections::BTreeSet, str::FromStr}; + use tari_template_lib::models::UnclaimedConfidentialOutputAddress; + use super::*; #[test] @@ -386,18 +348,20 @@ mod tests { CommandOrdering::ForeignProposal(ShardGroup::new(0, 64), &BlockId::zero()) < CommandOrdering::TransactionId(&TransactionId::default()) ); - let substate_id = - SubstateId::from_str("component_0000000000000000000000000000000000000000000000000000000000000000").unwrap(); + let commitment = UnclaimedConfidentialOutputAddress::from_str( + "commitment_0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); assert!( - CommandOrdering::MintConfidentialOutput(&substate_id) < + CommandOrdering::MintConfidentialOutput(&commitment) < CommandOrdering::TransactionId(&TransactionId::default()) ); - assert!(CommandOrdering::MintConfidentialOutput(&substate_id) < CommandOrdering::EndEpoch); + assert!(CommandOrdering::MintConfidentialOutput(&commitment) < CommandOrdering::EndEpoch); let mut set = BTreeSet::new(); let cmds = [ Command::EndEpoch, - Command::MintConfidentialOutput(MintConfidentialOutputAtom { substate_id }), + Command::MintConfidentialOutput(MintConfidentialOutputAtom { commitment }), Command::ForeignProposal(ForeignProposalAtom { block_id: BlockId::zero(), shard_group: ShardGroup::new(0, 64), diff --git a/dan_layer/storage/src/consensus_models/evidence.rs b/dan_layer/storage/src/consensus_models/evidence.rs index 670fd51e8..6ac8b234a 100644 --- a/dan_layer/storage/src/consensus_models/evidence.rs +++ b/dan_layer/storage/src/consensus_models/evidence.rs @@ -3,10 +3,12 @@ use std::fmt::{Display, Formatter}; +use borsh::BorshSerialize; use indexmap::IndexMap; use log::*; use serde::{Deserialize, Serialize}; use tari_dan_common_types::{ + borsh::indexmap as indexmap_borsh, committee::CommitteeInfo, NumPreshards, ShardGroup, @@ -20,7 +22,7 @@ use crate::consensus_models::{QcId, VersionedSubstateIdLockIntent}; const LOG_TARGET: &str = "tari::dan::consensus_models::evidence"; -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default, BorshSerialize)] #[cfg_attr( feature = "ts", derive(ts_rs::TS), @@ -30,6 +32,7 @@ pub struct Evidence { // Serialize JSON as an array of objects since ShardGroup is a non-string key #[serde(with = "serde_with::vec")] #[cfg_attr(feature = "ts", ts(type = "Array<[any, any]>"))] + #[borsh(serialize_with = "indexmap_borsh::serialize")] evidence: IndexMap, } @@ -199,13 +202,14 @@ impl Display for Evidence { } } -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default, BorshSerialize)] #[cfg_attr( feature = "ts", derive(ts_rs::TS), ts(export, export_to = "../../bindings/src/types/") )] pub struct ShardGroupEvidence { + #[borsh(serialize_with = "indexmap_borsh::serialize")] substates: IndexMap, #[cfg_attr(feature = "ts", ts(type = "string | null"))] prepare_qc: Option, diff --git a/dan_layer/storage/src/consensus_models/foreign_proposal.rs b/dan_layer/storage/src/consensus_models/foreign_proposal.rs index 4d80f6b3e..af80cd1a6 100644 --- a/dan_layer/storage/src/consensus_models/foreign_proposal.rs +++ b/dan_layer/storage/src/consensus_models/foreign_proposal.rs @@ -8,6 +8,7 @@ use std::{ str::FromStr, }; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; use tari_dan_common_types::{Epoch, ShardGroup}; @@ -112,7 +113,7 @@ impl ForeignProposal { } } -#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize, PartialOrd, Ord)] +#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize, PartialOrd, Ord, BorshSerialize)] #[cfg_attr( feature = "ts", derive(ts_rs::TS), diff --git a/dan_layer/storage/src/consensus_models/leader_fee.rs b/dan_layer/storage/src/consensus_models/leader_fee.rs index 34bbe2254..e38fdd041 100644 --- a/dan_layer/storage/src/consensus_models/leader_fee.rs +++ b/dan_layer/storage/src/consensus_models/leader_fee.rs @@ -3,9 +3,10 @@ use std::fmt::{Display, Formatter}; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; -#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, BorshSerialize)] #[cfg_attr( feature = "ts", derive(ts_rs::TS), diff --git a/dan_layer/storage/src/consensus_models/no_vote.rs b/dan_layer/storage/src/consensus_models/no_vote.rs index f2598649b..adc81712b 100644 --- a/dan_layer/storage/src/consensus_models/no_vote.rs +++ b/dan_layer/storage/src/consensus_models/no_vote.rs @@ -54,16 +54,12 @@ pub enum NoVoteReason { CommandMerkleRootMismatch, #[error("Not all foreign input pledges are present")] NotAllForeignInputPledges, - #[error("Leader proposed to suspend a node that should not be suspended")] - ShouldNotSuspendNode, - #[error("Leader proposed to suspend a node but node is already suspended")] - NodeAlreadySuspended, - #[error("Leader proposed to resume a node but node is not suspended")] - NodeNotSuspended, - #[error("Leader proposed to suspend a node but it is not permitted to suspend more than f nodes")] - CannotSuspendNodeBelowQuorumThreshold, - #[error("Leader proposed to resume a node but the node should not be resumed")] - ShouldNodeResumeNode, + #[error("Leader proposed to EVICT a node that should not be evicted")] + ShouldNotEvictNode, + #[error("Leader proposed to EVICT a node but node is already evicted")] + NodeAlreadyEvicted, + #[error("Leader proposed to evict a node but it is not permitted to suspend more than f nodes")] + CannotEvictNodeBelowQuorumThreshold, } impl NoVoteReason { @@ -92,11 +88,9 @@ impl NoVoteReason { Self::StateMerkleRootMismatch => "StateMerkleRootMismatch", Self::CommandMerkleRootMismatch => "CommandMerkleRootMismatch", Self::NotAllForeignInputPledges => "NotAllForeignInputPledges", - Self::ShouldNotSuspendNode => "ShouldNotSuspendNode", - Self::NodeAlreadySuspended => "NodeAlreadySuspended", - Self::NodeNotSuspended => "NodeNotSuspended", - Self::ShouldNodeResumeNode => "ShouldNodeResumeNode", - Self::CannotSuspendNodeBelowQuorumThreshold => "CannotSuspendNodeBelowQuorumThreshold", + Self::NodeAlreadyEvicted => "NodeAlreadyEvicted", + Self::ShouldNotEvictNode => "ShouldNotEvictNode", + Self::CannotEvictNodeBelowQuorumThreshold => "CannotSuspendNodeBelowQuorumThreshold", } } } diff --git a/dan_layer/storage/src/consensus_models/quorum.rs b/dan_layer/storage/src/consensus_models/quorum.rs index 762210772..18797675f 100644 --- a/dan_layer/storage/src/consensus_models/quorum.rs +++ b/dan_layer/storage/src/consensus_models/quorum.rs @@ -3,11 +3,12 @@ use std::fmt::Display; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; #[cfg(feature = "ts")] use ts_rs::TS; -#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, BorshSerialize)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] pub enum QuorumDecision { Accept, diff --git a/dan_layer/storage/src/consensus_models/quorum_certificate.rs b/dan_layer/storage/src/consensus_models/quorum_certificate.rs index b564a37a3..f9e372f53 100644 --- a/dan_layer/storage/src/consensus_models/quorum_certificate.rs +++ b/dan_layer/storage/src/consensus_models/quorum_certificate.rs @@ -3,6 +3,7 @@ use std::{fmt::Display, ops::Deref}; +use borsh::BorshSerialize; use log::*; use serde::{Deserialize, Serialize}; use tari_common_types::types::{FixedHash, FixedHashSizeError}; @@ -18,6 +19,7 @@ use tari_dan_common_types::{ use crate::{ consensus_models::{ Block, + BlockHeader, BlockId, HighQc, LastVoted, @@ -44,6 +46,10 @@ pub struct QuorumCertificate { qc_id: QcId, #[cfg_attr(feature = "ts", ts(type = "string"))] block_id: BlockId, + #[cfg_attr(feature = "ts", ts(type = "string"))] + header_hash: FixedHash, + #[cfg_attr(feature = "ts", ts(type = "string"))] + parent_id: BlockId, block_height: NodeHeight, epoch: Epoch, shard_group: ShardGroup, @@ -57,7 +63,8 @@ pub struct QuorumCertificate { impl QuorumCertificate { pub fn new( - block: BlockId, + header_hash: FixedHash, + parent_id: BlockId, block_height: NodeHeight, epoch: Epoch, shard_group: ShardGroup, @@ -68,7 +75,9 @@ impl QuorumCertificate { leaf_hashes.sort(); let mut qc = Self { qc_id: QcId::zero(), - block_id: block, + block_id: BlockHeader::calculate_block_id(&header_hash, &parent_id), + header_hash, + parent_id, block_height, epoch, shard_group, @@ -82,27 +91,34 @@ impl QuorumCertificate { } pub fn genesis(epoch: Epoch, shard_group: ShardGroup) -> Self { - Self::new( - BlockId::zero(), - NodeHeight::zero(), + let mut qc = Self { + qc_id: QcId::zero(), + block_id: BlockHeader::calculate_block_id(&FixedHash::zero(), &BlockId::zero()), + header_hash: FixedHash::zero(), + parent_id: BlockId::zero(), + block_height: NodeHeight::zero(), epoch, shard_group, - vec![], - vec![], - QuorumDecision::Accept, - ) + signatures: vec![], + leaf_hashes: vec![], + decision: QuorumDecision::Accept, + is_shares_processed: false, + }; + qc.qc_id = qc.calculate_id(); + qc } pub fn calculate_id(&self) -> QcId { quorum_certificate_hasher() .chain(&self.epoch) .chain(&self.shard_group) - .chain(&self.block_id) + .chain(&self.header_hash) + .chain(&self.parent_id) .chain(&self.block_height) .chain(&self.signatures) .chain(&self.leaf_hashes) .chain(&self.decision) - .result() + .finalize_into_array() .into() } } @@ -144,6 +160,14 @@ impl QuorumCertificate { &self.block_id } + pub fn header_hash(&self) -> &FixedHash { + &self.header_hash + } + + pub fn parent_id(&self) -> &BlockId { + &self.parent_id + } + pub fn as_high_qc(&self) -> HighQc { HighQc { block_id: self.block_id, @@ -290,7 +314,7 @@ impl Display for QuorumCertificate { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, BorshSerialize)] #[serde(transparent)] pub struct QcId(#[serde(with = "serde_with::hex")] FixedHash); @@ -329,6 +353,12 @@ impl From for QcId { } } +impl From<[u8; 32]> for QcId { + fn from(value: [u8; 32]) -> Self { + Self(value.into()) + } +} + impl TryFrom> for QcId { type Error = FixedHashSizeError; diff --git a/dan_layer/storage/src/consensus_models/state_tree_diff.rs b/dan_layer/storage/src/consensus_models/state_tree_diff.rs index 4c2834c08..47ebefd89 100644 --- a/dan_layer/storage/src/consensus_models/state_tree_diff.rs +++ b/dan_layer/storage/src/consensus_models/state_tree_diff.rs @@ -4,7 +4,7 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use std::{collections::HashMap, ops::Deref}; +use std::{collections::HashMap, fmt::Display, ops::Deref}; use indexmap::IndexMap; use tari_dan_common_types::shard::Shard; @@ -69,3 +69,15 @@ impl VersionedStateHashTreeDiff { Self { version, diff } } } + +impl Display for VersionedStateHashTreeDiff { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "VersionedStateHashTreeDiff(v{}, {} new node(s), {} stale node(s))", + self.version, + self.diff.new_nodes.len(), + self.diff.stale_tree_nodes.len() + ) + } +} diff --git a/dan_layer/storage/src/consensus_models/substate_change.rs b/dan_layer/storage/src/consensus_models/substate_change.rs index fee64b261..d284da216 100644 --- a/dan_layer/storage/src/consensus_models/substate_change.rs +++ b/dan_layer/storage/src/consensus_models/substate_change.rs @@ -1,6 +1,8 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause +use std::fmt::Display; + use tari_dan_common_types::{shard::Shard, SubstateAddress, ToSubstateAddress, VersionedSubstateId}; use tari_engine_types::substate::Substate; use tari_state_tree::SubstateTreeChange; @@ -115,6 +117,31 @@ impl From for SubstateChange { } } +impl Display for SubstateChange { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + SubstateChange::Up { + id, + shard, + transaction_id, + substate, + } => write!( + f, + "Up: {}, {}, transaction_id: {}, substate hash: {}", + id, + shard, + transaction_id, + substate.to_value_hash() + ), + SubstateChange::Down { + id, + shard, + transaction_id, + } => write!(f, "Down: {}, {}, transaction_id: {}", id, shard, transaction_id), + } + } +} + impl From<&SubstateChange> for SubstateTreeChange { fn from(value: &SubstateChange) -> Self { match value { diff --git a/dan_layer/storage/src/consensus_models/substate_lock.rs b/dan_layer/storage/src/consensus_models/substate_lock.rs index 02142c7e0..edece4e45 100644 --- a/dan_layer/storage/src/consensus_models/substate_lock.rs +++ b/dan_layer/storage/src/consensus_models/substate_lock.rs @@ -60,8 +60,8 @@ impl fmt::Display for SubstateLock { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "LockedSubstate(transaction_id: {}, version: {}, lock_flag: {}, is_local_only: {})", - self.transaction_id, self.version, self.lock_type, self.is_local_only + "SubstateLock(version: {}, lock_flag: {}, is_local_only: {}, transaction_id: {})", + self.version, self.lock_type, self.is_local_only, self.transaction_id, ) } } diff --git a/dan_layer/storage/src/consensus_models/transaction_decision.rs b/dan_layer/storage/src/consensus_models/transaction_decision.rs index 19ed2e0c8..937eabe24 100644 --- a/dan_layer/storage/src/consensus_models/transaction_decision.rs +++ b/dan_layer/storage/src/consensus_models/transaction_decision.rs @@ -7,6 +7,7 @@ use std::{ str::FromStr, }; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; use strum::ParseError; use strum_macros::{AsRefStr, EnumString}; @@ -22,7 +23,7 @@ pub enum FromStrConversionError { InvalidAbortReason(String, ParseError), } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, BorshSerialize)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] pub enum Decision { /// Decision to COMMIT the transaction @@ -31,7 +32,7 @@ pub enum Decision { Abort(AbortReason), } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, AsRefStr, EnumString)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, AsRefStr, EnumString, BorshSerialize)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] pub enum AbortReason { None, diff --git a/dan_layer/storage/src/consensus_models/transaction_execution.rs b/dan_layer/storage/src/consensus_models/transaction_execution.rs index 900981eae..adc80234d 100644 --- a/dan_layer/storage/src/consensus_models/transaction_execution.rs +++ b/dan_layer/storage/src/consensus_models/transaction_execution.rs @@ -1,7 +1,7 @@ // Copyright 2024 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use std::time::Duration; +use std::{fmt::Display, time::Duration}; use tari_engine_types::commit_result::{ExecuteResult, RejectReason}; use tari_transaction::TransactionId; @@ -188,3 +188,16 @@ impl BlockTransactionExecution { tx.transaction_executions_get(transaction_id, block_id) } } + +impl Display for BlockTransactionExecution { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "BlockTransactionExecution(block_id: {}, transaction_id: {}, decision: {}, execution_time: {:.2?})", + self.block_id, + self.execution.transaction_id, + self.decision(), + self.execution_time() + ) + } +} diff --git a/dan_layer/storage/src/consensus_models/validator_stats.rs b/dan_layer/storage/src/consensus_models/validator_stats.rs index 74a248cb0..13d6e4305 100644 --- a/dan_layer/storage/src/consensus_models/validator_stats.rs +++ b/dan_layer/storage/src/consensus_models/validator_stats.rs @@ -76,21 +76,16 @@ pub struct ValidatorConsensusStats { } impl ValidatorConsensusStats { - pub fn get_nodes_to_suspend( + pub fn get_nodes_to_evict( tx: &TTx, block_id: &BlockId, - suspend_threshold: u64, - limit: usize, + threshold: u64, + limit: u64, ) -> Result, StorageError> { - tx.validator_epoch_stats_get_nodes_to_suspend(block_id, suspend_threshold, limit) - } - - pub fn get_nodes_to_resume( - tx: &TTx, - block_id: &BlockId, - limit: usize, - ) -> Result, StorageError> { - tx.validator_epoch_stats_get_nodes_to_resume(block_id, limit) + if limit == 0 { + return Ok(Vec::new()); + } + tx.validator_epoch_stats_get_nodes_to_evict(block_id, threshold, limit) } pub fn get_by_public_key( @@ -101,31 +96,26 @@ impl ValidatorConsensusStats { tx.validator_epoch_stats_get(epoch, public_key) } - pub fn is_node_suspended( + pub fn is_node_evicted( tx: &TTx, block_id: &BlockId, public_key: &PublicKey, ) -> Result { - tx.suspended_nodes_is_suspended(block_id, public_key) + tx.suspended_nodes_is_evicted(block_id, public_key) } - pub fn suspend_node( + pub fn evict_node( tx: &mut TTx, public_key: &PublicKey, - suspended_in_block: BlockId, + evicted_in_block: BlockId, ) -> Result<(), StorageError> { - tx.suspended_nodes_insert(public_key, suspended_in_block) + tx.evicted_nodes_evict(public_key, evicted_in_block) } - pub fn resume_node( - tx: &mut TTx, - public_key: &PublicKey, - resumed_in_block: BlockId, - ) -> Result<(), StorageError> { - tx.suspended_nodes_mark_for_removal(public_key, resumed_in_block) - } - - pub fn count_number_suspended_nodes(tx: &TTx) -> Result { - tx.suspended_nodes_count() + pub fn count_number_evicted_nodes( + tx: &TTx, + epoch: Epoch, + ) -> Result { + tx.evicted_nodes_count(epoch) } } diff --git a/dan_layer/storage/src/consensus_models/vote.rs b/dan_layer/storage/src/consensus_models/vote.rs index beca58068..ed3db5c84 100644 --- a/dan_layer/storage/src/consensus_models/vote.rs +++ b/dan_layer/storage/src/consensus_models/vote.rs @@ -1,11 +1,15 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use std::ops::Deref; +use std::{ + hash::{DefaultHasher, Hasher}, + ops::Deref, +}; use serde::{Deserialize, Serialize}; use tari_common_types::types::FixedHash; -use tari_dan_common_types::{hashing::vote_hasher, optional::Optional, Epoch}; +use tari_crypto::tari_utilities::ByteArray; +use tari_dan_common_types::{optional::Optional, Epoch}; use crate::{ consensus_models::{BlockId, QuorumDecision, ValidatorSignature}, @@ -24,8 +28,17 @@ pub struct Vote { } impl Vote { - pub fn calculate_hash(&self) -> FixedHash { - vote_hasher().chain(self).result() + /// Returns a SIPHASH hash used to uniquely identify this vote + pub fn get_hash(&self) -> u64 { + let mut hasher = DefaultHasher::new(); + hasher.write_u64(self.epoch.as_u64()); + hasher.write(self.block_id.as_bytes()); + hasher.write_u8(self.decision.as_u8()); + hasher.write(self.sender_leaf_hash.as_slice()); + hasher.write(self.signature.public_key.as_bytes()); + hasher.write(self.signature.signature.get_public_nonce().as_bytes()); + hasher.write(self.signature.signature.get_signature().as_bytes()); + hasher.finish() } pub fn signature(&self) -> &ValidatorSignature { diff --git a/dan_layer/storage/src/consensus_models/vote_signature.rs b/dan_layer/storage/src/consensus_models/vote_signature.rs index a515f163c..d4948fe08 100644 --- a/dan_layer/storage/src/consensus_models/vote_signature.rs +++ b/dan_layer/storage/src/consensus_models/vote_signature.rs @@ -1,17 +1,18 @@ // Copyright 2023 The Tari Project // SPDX-License-Identifier: BSD-3-Clause +use borsh::BorshSerialize; use rand::rngs::OsRng; use serde::{Deserialize, Serialize}; use tari_common_types::types::{PrivateKey, PublicKey}; -use tari_core::transactions::transaction_components::ValidatorNodeHashDomain; use tari_crypto::{keys::PublicKey as _, signatures::SchnorrSignature}; +use tari_hashing::ValidatorNodeHashDomain; #[cfg(feature = "ts")] use ts_rs::TS; pub type ValidatorSchnorrSignature = SchnorrSignature; -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize, BorshSerialize)] #[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] pub struct ValidatorSignature { #[cfg_attr(feature = "ts", ts(type = "string"))] diff --git a/dan_layer/storage/src/global/backend_adapter.rs b/dan_layer/storage/src/global/backend_adapter.rs index f01a40e6f..8f4367b55 100644 --- a/dan_layer/storage/src/global/backend_adapter.rs +++ b/dan_layer/storage/src/global/backend_adapter.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{collections::HashMap, ops::RangeInclusive}; +use std::collections::HashMap; use serde::{de::DeserializeOwned, Serialize}; use tari_common_types::types::{FixedHash, PublicKey}; @@ -33,10 +33,11 @@ use tari_dan_common_types::{ SubstateAddress, }; -use super::{base_layer_hashes_db::DbBaseLayerBlockInfo, DbEpoch}; +use super::{DbBaseLayerBlockInfo, DbEpoch}; use crate::{ atomic::AtomicDb, global::{ + base_layer_db::DbLayer1Transaction, metadata_db::MetadataKey, models::ValidatorNode, template_db::{DbTemplate, DbTemplateUpdate}, @@ -82,24 +83,27 @@ pub trait GlobalDbAdapter: AtomicDb + Send + Sync + Clone { address: Self::Addr, public_key: PublicKey, shard_key: SubstateAddress, - registered_at_base_height: u64, start_epoch: Epoch, fee_claim_public_key: PublicKey, - sidechain_id: Option, ) -> Result<(), Self::Error>; - fn remove_validator_node( + fn deactivate_validator_node( &self, tx: &mut Self::DbTransaction<'_>, public_key: PublicKey, - sidechain_id: Option, + deactivation_epoch: Epoch, ) -> Result<(), Self::Error>; - fn get_validator_nodes_within_epoch( + fn get_validator_nodes_within_start_epoch( + &self, + tx: &mut Self::DbTransaction<'_>, + epoch: Epoch, + ) -> Result>, Self::Error>; + + fn get_validator_nodes_within_committee_epoch( &self, tx: &mut Self::DbTransaction<'_>, epoch: Epoch, - sidechain_id: Option<&PublicKey>, ) -> Result>, Self::Error>; fn get_validator_node_by_address( @@ -107,7 +111,6 @@ pub trait GlobalDbAdapter: AtomicDb + Send + Sync + Clone { tx: &mut Self::DbTransaction<'_>, epoch: Epoch, address: &Self::Addr, - sidechain_id: Option<&PublicKey>, ) -> Result, Self::Error>; fn get_validator_node_by_public_key( @@ -115,25 +118,12 @@ pub trait GlobalDbAdapter: AtomicDb + Send + Sync + Clone { tx: &mut Self::DbTransaction<'_>, epoch: Epoch, public_key: &PublicKey, - sidechain_id: Option<&PublicKey>, ) -> Result, Self::Error>; - fn validator_nodes_count( - &self, - tx: &mut Self::DbTransaction<'_>, - epoch: Epoch, - sidechain_id: Option<&PublicKey>, - ) -> Result; - fn validator_nodes_count_by_start_epoch( - &self, - tx: &mut Self::DbTransaction<'_>, - epoch: Epoch, - sidechain_id: Option<&PublicKey>, - ) -> Result; + fn validator_nodes_count(&self, tx: &mut Self::DbTransaction<'_>, epoch: Epoch) -> Result; fn validator_nodes_count_for_shard_group( &self, tx: &mut Self::DbTransaction<'_>, epoch: Epoch, - sidechain_id: Option<&PublicKey>, shard_group: ShardGroup, ) -> Result; @@ -142,19 +132,17 @@ pub trait GlobalDbAdapter: AtomicDb + Send + Sync + Clone { tx: &mut Self::DbTransaction<'_>, shard_key: SubstateAddress, shard_group: ShardGroup, - sidechain_id: Option<&PublicKey>, epoch: Epoch, ) -> Result<(), Self::Error>; - fn validator_nodes_get_by_substate_range( + fn validator_nodes_get_for_shard_group( &self, tx: &mut Self::DbTransaction<'_>, epoch: Epoch, - sidechain_id: Option<&PublicKey>, - substate_range: RangeInclusive, - ) -> Result>, Self::Error>; + shard_group: ShardGroup, + ) -> Result, Self::Error>; - fn validator_nodes_get_for_shard_group( + fn validator_nodes_get_overlapping_shard_group( &self, tx: &mut Self::DbTransaction<'_>, epoch: Epoch, @@ -165,7 +153,6 @@ pub trait GlobalDbAdapter: AtomicDb + Send + Sync + Clone { &self, tx: &mut Self::DbTransaction<'_>, epoch: Epoch, - sidechain_id: Option<&PublicKey>, ) -> Result>, Self::Error>; fn insert_epoch(&self, tx: &mut Self::DbTransaction<'_>, epoch: DbEpoch) -> Result<(), Self::Error>; @@ -193,4 +180,10 @@ pub trait GlobalDbAdapter: AtomicDb + Send + Sync + Clone { tx: &mut Self::DbTransaction<'_>, epoch: Epoch, ) -> Result, Self::Error>; + + fn insert_layer_one_transaction( + &self, + tx: &mut Self::DbTransaction<'_>, + data: DbLayer1Transaction, + ) -> Result<(), Self::Error>; } diff --git a/dan_layer/storage/src/global/base_layer_hashes_db.rs b/dan_layer/storage/src/global/base_layer_db.rs similarity index 68% rename from dan_layer/storage/src/global/base_layer_hashes_db.rs rename to dan_layer/storage/src/global/base_layer_db.rs index 82728eff2..3c3cb5ec3 100644 --- a/dan_layer/storage/src/global/base_layer_hashes_db.rs +++ b/dan_layer/storage/src/global/base_layer_db.rs @@ -20,16 +20,20 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use std::fmt::Display; + use tari_common_types::types::FixedHash; +use tari_dan_common_types::Epoch; +use tari_sidechain::EvictionProof; use crate::global::GlobalDbAdapter; -pub struct BaseLayerHashesDb<'a, 'tx, TGlobalDbAdapter: GlobalDbAdapter> { +pub struct BaseLayerDb<'a, 'tx, TGlobalDbAdapter: GlobalDbAdapter> { backend: &'a TGlobalDbAdapter, tx: &'tx mut TGlobalDbAdapter::DbTransaction<'a>, } -impl<'a, 'tx, TGlobalDbAdapter: GlobalDbAdapter> BaseLayerHashesDb<'a, 'tx, TGlobalDbAdapter> { +impl<'a, 'tx, TGlobalDbAdapter: GlobalDbAdapter> BaseLayerDb<'a, 'tx, TGlobalDbAdapter> { pub fn new(backend: &'a TGlobalDbAdapter, tx: &'tx mut TGlobalDbAdapter::DbTransaction<'a>) -> Self { Self { backend, tx } } @@ -48,6 +52,16 @@ impl<'a, 'tx, TGlobalDbAdapter: GlobalDbAdapter> BaseLayerHashesDb<'a, 'tx, TGlo .get_base_layer_block_info(self.tx, hash) .map_err(TGlobalDbAdapter::Error::into) } + + pub fn insert_eviction_proof(&mut self, proof: &EvictionProof) -> Result<(), TGlobalDbAdapter::Error> { + self.backend + .insert_layer_one_transaction(self.tx, DbLayer1Transaction { + epoch: Epoch(proof.epoch().as_u64()), + proof_type: DbLayerOnePayloadType::EvictionProof, + payload: proof, + }) + .map_err(TGlobalDbAdapter::Error::into) + } } #[derive(Debug, Clone)] @@ -55,3 +69,23 @@ pub struct DbBaseLayerBlockInfo { pub hash: FixedHash, pub height: u64, } + +#[derive(Debug, Clone)] +pub struct DbLayer1Transaction { + pub epoch: Epoch, + pub proof_type: DbLayerOnePayloadType, + pub payload: T, +} + +#[derive(Debug, Clone)] +pub enum DbLayerOnePayloadType { + EvictionProof, +} + +impl Display for DbLayerOnePayloadType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + DbLayerOnePayloadType::EvictionProof => write!(f, "EvictionProof"), + } + } +} diff --git a/dan_layer/storage/src/global/global_db.rs b/dan_layer/storage/src/global/global_db.rs index e19eb74a6..7ca68c30b 100644 --- a/dan_layer/storage/src/global/global_db.rs +++ b/dan_layer/storage/src/global/global_db.rs @@ -22,7 +22,7 @@ use std::sync::Arc; -use super::{validator_node_db::ValidatorNodeDb, BaseLayerHashesDb, BmtDb, EpochDb}; +use super::{validator_node_db::ValidatorNodeDb, BaseLayerDb, BmtDb, EpochDb}; use crate::{ global::{backend_adapter::GlobalDbAdapter, metadata_db::MetadataDb, template_db::TemplateDb}, StorageError, @@ -87,11 +87,11 @@ impl GlobalDb { EpochDb::new(&self.adapter, tx) } - pub fn base_layer_hashes<'a, 'tx>( + pub fn base_layer<'a, 'tx>( &'a self, tx: &'tx mut TGlobalDbAdapter::DbTransaction<'a>, - ) -> BaseLayerHashesDb<'a, 'tx, TGlobalDbAdapter> { - BaseLayerHashesDb::new(&self.adapter, tx) + ) -> BaseLayerDb<'a, 'tx, TGlobalDbAdapter> { + BaseLayerDb::new(&self.adapter, tx) } pub fn bmt<'a, 'tx>( diff --git a/dan_layer/storage/src/global/mod.rs b/dan_layer/storage/src/global/mod.rs index c242c9677..d00952398 100644 --- a/dan_layer/storage/src/global/mod.rs +++ b/dan_layer/storage/src/global/mod.rs @@ -37,8 +37,8 @@ pub use validator_node_db::ValidatorNodeDb; mod epoch_db; pub use epoch_db::{DbEpoch, EpochDb}; -mod base_layer_hashes_db; -pub use base_layer_hashes_db::{BaseLayerHashesDb, DbBaseLayerBlockInfo}; +mod base_layer_db; +pub use base_layer_db::*; mod bmt_db; pub use bmt_db::{BmtDb, DbBmt}; diff --git a/dan_layer/storage/src/global/models/validator_node.rs b/dan_layer/storage/src/global/models/validator_node.rs index 59bf9036a..33379ca3f 100644 --- a/dan_layer/storage/src/global/models/validator_node.rs +++ b/dan_layer/storage/src/global/models/validator_node.rs @@ -11,10 +11,9 @@ pub struct ValidatorNode { pub address: TAddr, pub public_key: PublicKey, pub shard_key: SubstateAddress, - pub registered_at_base_height: u64, pub start_epoch: Epoch, + pub end_epoch: Option, pub fee_claim_public_key: PublicKey, - pub sidechain_id: Option, } impl ValidatorNode { diff --git a/dan_layer/storage/src/global/validator_node_db.rs b/dan_layer/storage/src/global/validator_node_db.rs index 74aa616a1..d5b817e6a 100644 --- a/dan_layer/storage/src/global/validator_node_db.rs +++ b/dan_layer/storage/src/global/validator_node_db.rs @@ -42,10 +42,8 @@ impl<'a, 'tx, TGlobalDbAdapter: GlobalDbAdapter> ValidatorNodeDb<'a, 'tx, TGloba peer_address: TGlobalDbAdapter::Addr, public_key: PublicKey, shard_key: SubstateAddress, - registered_at_base_height: u64, start_epoch: Epoch, fee_claim_public_key: PublicKey, - sidechain_id: Option, ) -> Result<(), TGlobalDbAdapter::Error> { self.backend .insert_validator_node( @@ -53,38 +51,35 @@ impl<'a, 'tx, TGlobalDbAdapter: GlobalDbAdapter> ValidatorNodeDb<'a, 'tx, TGloba peer_address, public_key, shard_key, - registered_at_base_height, start_epoch, fee_claim_public_key, - sidechain_id, ) .map_err(TGlobalDbAdapter::Error::into) } - pub fn remove( + pub fn deactivate( &mut self, public_key: PublicKey, - sidechain_id: Option, + deactivation_epoch: Epoch, ) -> Result<(), TGlobalDbAdapter::Error> { self.backend - .remove_validator_node(self.tx, public_key, sidechain_id) + .deactivate_validator_node(self.tx, public_key, deactivation_epoch) .map_err(TGlobalDbAdapter::Error::into) } - pub fn count(&mut self, epoch: Epoch, sidechain_id: Option<&PublicKey>) -> Result { + pub fn count(&mut self, epoch: Epoch) -> Result { self.backend - .validator_nodes_count(self.tx, epoch, sidechain_id) + .validator_nodes_count(self.tx, epoch) .map_err(TGlobalDbAdapter::Error::into) } pub fn count_in_shard_group( &mut self, epoch: Epoch, - sidechain_id: Option<&PublicKey>, shard_group: ShardGroup, ) -> Result { self.backend - .validator_nodes_count_for_shard_group(self.tx, epoch, sidechain_id, shard_group) + .validator_nodes_count_for_shard_group(self.tx, epoch, shard_group) .map_err(TGlobalDbAdapter::Error::into) } @@ -92,10 +87,9 @@ impl<'a, 'tx, TGlobalDbAdapter: GlobalDbAdapter> ValidatorNodeDb<'a, 'tx, TGloba &mut self, epoch: Epoch, public_key: &PublicKey, - sidechain_id: Option<&PublicKey>, ) -> Result, TGlobalDbAdapter::Error> { self.backend - .get_validator_node_by_public_key(self.tx, epoch, public_key, sidechain_id) + .get_validator_node_by_public_key(self.tx, epoch, public_key) .map_err(TGlobalDbAdapter::Error::into) } @@ -103,40 +97,61 @@ impl<'a, 'tx, TGlobalDbAdapter: GlobalDbAdapter> ValidatorNodeDb<'a, 'tx, TGloba &mut self, epoch: Epoch, address: &TGlobalDbAdapter::Addr, - sidechain_id: Option<&PublicKey>, ) -> Result, TGlobalDbAdapter::Error> { self.backend - .get_validator_node_by_address(self.tx, epoch, address, sidechain_id) + .get_validator_node_by_address(self.tx, epoch, address) .map_err(TGlobalDbAdapter::Error::into) } + /// Returns all registered validator nodes from the given epoch + /// + /// This may be used to fetch validators registered for a future epoch, however since the epoch is not finalized + /// yet, the list may not be complete. + pub fn get_all_registered_within_start_epoch( + &mut self, + epoch: Epoch, + ) -> Result>, TGlobalDbAdapter::Error> { + self.backend + .get_validator_nodes_within_start_epoch(self.tx, epoch) + .map_err(TGlobalDbAdapter::Error::into) + } + + /// Fetches all validator nodes that are active for a given epoch pub fn get_all_within_epoch( &mut self, epoch: Epoch, - sidechain_id: Option<&PublicKey>, ) -> Result>, TGlobalDbAdapter::Error> { self.backend - .get_validator_nodes_within_epoch(self.tx, epoch, sidechain_id) + .get_validator_nodes_within_committee_epoch(self.tx, epoch) .map_err(TGlobalDbAdapter::Error::into) } - pub fn get_committees_for_shard_group( + pub fn get_committee_for_shard_group( &mut self, epoch: Epoch, shard_group: ShardGroup, - ) -> Result>, TGlobalDbAdapter::Error> { + ) -> Result, TGlobalDbAdapter::Error> { self.backend .validator_nodes_get_for_shard_group(self.tx, epoch, shard_group) .map_err(TGlobalDbAdapter::Error::into) } + pub fn get_committees_overlapping_shard_group( + &mut self, + epoch: Epoch, + shard_group: ShardGroup, + ) -> Result>, TGlobalDbAdapter::Error> { + self.backend + .validator_nodes_get_overlapping_shard_group(self.tx, epoch, shard_group) + .map_err(TGlobalDbAdapter::Error::into) + } + pub fn get_committees( &mut self, epoch: Epoch, - sidechain_id: Option<&PublicKey>, ) -> Result>, TGlobalDbAdapter::Error> { self.backend - .validator_nodes_get_committees_for_epoch(self.tx, epoch, sidechain_id) + .validator_nodes_get_committees_for_epoch(self.tx, epoch) .map_err(TGlobalDbAdapter::Error::into) } @@ -144,11 +159,10 @@ impl<'a, 'tx, TGlobalDbAdapter: GlobalDbAdapter> ValidatorNodeDb<'a, 'tx, TGloba &mut self, substate_address: SubstateAddress, shard_group: ShardGroup, - sidechain_id: Option<&PublicKey>, epoch: Epoch, ) -> Result<(), TGlobalDbAdapter::Error> { self.backend - .validator_nodes_set_committee_shard(self.tx, substate_address, shard_group, sidechain_id, epoch) + .validator_nodes_set_committee_shard(self.tx, substate_address, shard_group, epoch) .map_err(TGlobalDbAdapter::Error::into) } } diff --git a/dan_layer/storage/src/state_store/mod.rs b/dan_layer/storage/src/state_store/mod.rs index b10ca688e..b41cd3d4c 100644 --- a/dan_layer/storage/src/state_store/mod.rs +++ b/dan_layer/storage/src/state_store/mod.rs @@ -24,6 +24,7 @@ use tari_dan_common_types::{ }; use tari_engine_types::substate::SubstateId; use tari_state_tree::{Node, NodeKey, StaleTreeNode, Version}; +use tari_template_lib::models::UnclaimedConfidentialOutputAddress; use tari_transaction::TransactionId; #[cfg(feature = "ts")] use ts_rs::TS; @@ -341,7 +342,7 @@ pub trait StateStoreReadTransaction: Sized { ) -> Result; // -------------------------------- BurntUtxos -------------------------------- // - fn burnt_utxos_get(&self, substate_id: &SubstateId) -> Result; + fn burnt_utxos_get(&self, commitment: &UnclaimedConfidentialOutputAddress) -> Result; fn burnt_utxos_get_all_unproposed( &self, leaf_block: &BlockId, @@ -359,20 +360,16 @@ pub trait StateStoreReadTransaction: Sized { epoch: Epoch, public_key: &PublicKey, ) -> Result; - fn validator_epoch_stats_get_nodes_to_suspend( - &self, - block_id: &BlockId, - min_missed_proposals: u64, - limit: usize, - ) -> Result, StorageError>; - fn validator_epoch_stats_get_nodes_to_resume( + + fn validator_epoch_stats_get_nodes_to_evict( &self, block_id: &BlockId, - limit: usize, + threshold: u64, + limit: u64, ) -> Result, StorageError>; // -------------------------------- SuspendedNodes -------------------------------- // - fn suspended_nodes_is_suspended(&self, block_id: &BlockId, public_key: &PublicKey) -> Result; - fn suspended_nodes_count(&self) -> Result; + fn suspended_nodes_is_evicted(&self, block_id: &BlockId, public_key: &PublicKey) -> Result; + fn evicted_nodes_count(&self, epoch: Epoch) -> Result; } pub trait StateStoreWriteTransaction { @@ -585,11 +582,11 @@ pub trait StateStoreWriteTransaction { fn burnt_utxos_insert(&mut self, burnt_utxo: &BurntUtxo) -> Result<(), StorageError>; fn burnt_utxos_set_proposed_block( &mut self, - substate_id: &SubstateId, + commitment: &UnclaimedConfidentialOutputAddress, proposed_in_block: &BlockId, ) -> Result<(), StorageError>; fn burnt_utxos_clear_proposed_block(&mut self, proposed_in_block: &BlockId) -> Result<(), StorageError>; - fn burnt_utxos_delete(&mut self, substate_id: &SubstateId) -> Result<(), StorageError>; + fn burnt_utxos_delete(&mut self, commitment: &UnclaimedConfidentialOutputAddress) -> Result<(), StorageError>; // -------------------------------- Lock conflicts -------------------------------- // fn lock_conflicts_insert_all<'a, I: IntoIterator)>>( @@ -607,17 +604,13 @@ pub trait StateStoreWriteTransaction { ) -> Result<(), StorageError>; // -------------------------------- SuspendedNodes -------------------------------- // - fn suspended_nodes_insert( - &mut self, - public_key: &PublicKey, - suspended_in_block: BlockId, - ) -> Result<(), StorageError>; - fn suspended_nodes_mark_for_removal( + + fn evicted_nodes_evict(&mut self, public_key: &PublicKey, evicted_in_block: BlockId) -> Result<(), StorageError>; + fn evicted_nodes_mark_eviction_as_committed( &mut self, public_key: &PublicKey, - resumed_in_block: BlockId, + epoch: Epoch, ) -> Result<(), StorageError>; - fn suspended_nodes_delete(&mut self, public_key: &PublicKey) -> Result<(), StorageError>; // -------------------------------- Diagnotics -------------------------------- // fn diagnostics_add_no_vote(&mut self, block_id: BlockId, reason: NoVoteReason) -> Result<(), StorageError>; diff --git a/dan_layer/storage_sqlite/Cargo.toml b/dan_layer/storage_sqlite/Cargo.toml index 12b56a9f6..28adb3406 100644 --- a/dan_layer/storage_sqlite/Cargo.toml +++ b/dan_layer/storage_sqlite/Cargo.toml @@ -14,6 +14,7 @@ tari_dan_storage = { workspace = true } diesel = { workspace = true, default-features = false, features = ["sqlite", "chrono"] } diesel_migrations = { workspace = true } +log = { workspace = true } thiserror = { workspace = true } chrono = { workspace = true } serde_json = { workspace = true } diff --git a/dan_layer/storage_sqlite/migrations/2022-06-20-091532_create_metadata/up.sql b/dan_layer/storage_sqlite/migrations/2022-06-20-091532_create_metadata/up.sql deleted file mode 100644 index 809a65cfb..000000000 --- a/dan_layer/storage_sqlite/migrations/2022-06-20-091532_create_metadata/up.sql +++ /dev/null @@ -1,27 +0,0 @@ --- // Copyright 2021. The Tari Project --- // --- // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the --- // following conditions are met: --- // --- // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following --- // disclaimer. --- // --- // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the --- // following disclaimer in the documentation and/or other materials provided with the distribution. --- // --- // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote --- // products derived from this software without specific prior written permission. --- // --- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, --- // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE --- // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, --- // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR --- // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, --- // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE --- // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -create table metadata -( - key_name blob primary key not null, - value blob not null -) diff --git a/dan_layer/storage_sqlite/migrations/2022-06-20-091532_create_metadata/down.sql b/dan_layer/storage_sqlite/migrations/2022-06-20-091532_initial/down.sql similarity index 100% rename from dan_layer/storage_sqlite/migrations/2022-06-20-091532_create_metadata/down.sql rename to dan_layer/storage_sqlite/migrations/2022-06-20-091532_initial/down.sql diff --git a/dan_layer/storage_sqlite/migrations/2022-06-20-091532_initial/up.sql b/dan_layer/storage_sqlite/migrations/2022-06-20-091532_initial/up.sql new file mode 100644 index 000000000..4c3cb271f --- /dev/null +++ b/dan_layer/storage_sqlite/migrations/2022-06-20-091532_initial/up.sql @@ -0,0 +1,109 @@ +-- // Copyright 2021. The Tari Project +-- // +-- // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +-- // following conditions are met: +-- // +-- // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +-- // disclaimer. +-- // +-- // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +-- // following disclaimer in the documentation and/or other materials provided with the distribution. +-- // +-- // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +-- // products derived from this software without specific prior written permission. +-- // +-- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +-- // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +-- // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +-- // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +-- // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +-- // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +-- // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +create table metadata +( + key_name blob primary key not null, + value blob not null +); + +create table validator_nodes +( + id integer primary key autoincrement not null, + public_key blob not null, + address text not null, + shard_key blob not null, + start_epoch bigint not null, + end_epoch bigint null, + fee_claim_public_key blob not null +); + +CREATE TABLE committees +( + id INTEGER PRIMARY KEY autoincrement NOT NULL, + validator_node_id INTEGER NOT NULL, + epoch BIGINT NOT NULL, + shard_start INTEGER NOT NULL, + shard_end INTEGER NOT NULL, + FOREIGN KEY (validator_node_id) REFERENCES validator_nodes (id) ON DELETE CASCADE +); + +CREATE INDEX committees_validator_node_id_epoch_index ON committees (validator_node_id, epoch); + + +create table templates +( + id Integer primary key autoincrement not null, + -- template name + template_name text not null, + expected_hash blob not null, + -- the address is the hash of the content + template_address blob not null, + -- where to find the template code + url text not null, + -- the block height in which the template was published + height bigint not null, + -- The type of template, used to create an enum in code + template_type text not null, + + -- compiled template code as a WASM binary + compiled_code blob null, + -- flow json + flow_json text null, + status VARCHAR(20) NOT NULL DEFAULT 'New', + wasm_path VARCHAR(255) NULL, + manifest text null, + added_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +-- fetching by the template_address will be a very common operation +create unique index templates_template_address_index on templates (template_address); + + +create table epochs +( + epoch bigint primary key not null, + validator_node_mr blob not null +); + +create table bmt_cache +( + epoch bigint primary key not null, + bmt blob not null +); + +create table base_layer_block_info +( + hash blob primary key not null, + height bigint not null +); + +CREATE TABLE layer_one_transactions +( + id INTEGER PRIMARY KEY autoincrement NOT NULL, + epoch BIGINT NOT NULL, + payload_type TEXT NOT NULL, + payload TEXT NOT NULL, + submitted_at DATETIME NULL, + is_observed BOOLEAN NOT NULL DEFAULT '0' +); + diff --git a/dan_layer/storage_sqlite/migrations/2022-09-30-212244_create_vns_and_committees/down.sql b/dan_layer/storage_sqlite/migrations/2022-09-30-212244_create_vns_and_committees/down.sql deleted file mode 100644 index 8e921fe47..000000000 --- a/dan_layer/storage_sqlite/migrations/2022-09-30-212244_create_vns_and_committees/down.sql +++ /dev/null @@ -1,21 +0,0 @@ --- // Copyright 2022. The Tari Project --- // --- // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the --- // following conditions are met: --- // --- // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following --- // disclaimer. --- // --- // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the --- // following disclaimer in the documentation and/or other materials provided with the distribution. --- // --- // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote --- // products derived from this software without specific prior written permission. --- // --- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, --- // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE --- // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, --- // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR --- // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, --- // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE --- // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/dan_layer/storage_sqlite/migrations/2022-09-30-212244_create_vns_and_committees/up.sql b/dan_layer/storage_sqlite/migrations/2022-09-30-212244_create_vns_and_committees/up.sql deleted file mode 100644 index 0cb895abb..000000000 --- a/dan_layer/storage_sqlite/migrations/2022-09-30-212244_create_vns_and_committees/up.sql +++ /dev/null @@ -1,31 +0,0 @@ --- // Copyright 2022. The Tari Project --- // --- // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the --- // following conditions are met: --- // --- // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following --- // disclaimer. --- // --- // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the --- // following disclaimer in the documentation and/or other materials provided with the distribution. --- // --- // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote --- // products derived from this software without specific prior written permission. --- // --- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, --- // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE --- // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, --- // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR --- // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, --- // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE --- // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -create table validator_nodes -( - id integer primary key autoincrement not null, - public_key blob not null, - shard_key blob not null, - registered_at_base_height bigint not null, - start_epoch bigint not null -); - diff --git a/dan_layer/storage_sqlite/migrations/2022-10-06-091532_create_templates/down.sql b/dan_layer/storage_sqlite/migrations/2022-10-06-091532_create_templates/down.sql deleted file mode 100644 index e69de29bb..000000000 diff --git a/dan_layer/storage_sqlite/migrations/2022-10-06-091532_create_templates/up.sql b/dan_layer/storage_sqlite/migrations/2022-10-06-091532_create_templates/up.sql deleted file mode 100644 index 4477b9e4c..000000000 --- a/dan_layer/storage_sqlite/migrations/2022-10-06-091532_create_templates/up.sql +++ /dev/null @@ -1,39 +0,0 @@ --- // Copyright 2021. The Tari Project --- // --- // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the --- // following conditions are met: --- // --- // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following --- // disclaimer. --- // --- // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the --- // following disclaimer in the documentation and/or other materials provided with the distribution. --- // --- // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote --- // products derived from this software without specific prior written permission. --- // --- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, --- // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE --- // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, --- // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR --- // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, --- // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE --- // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -create table templates -( - id Integer primary key autoincrement not null, - -- the address is the hash of the content - template_address blob not null, - -- where to find the template code - url text not null, - -- the block height in which the template was published - height bigint not null, - -- compiled template code as a WASM binary - compiled_code blob not null, - -- template name - template_name text not null -); - --- fetching by the template_address will be a very common operation -create unique index templates_template_address_index on templates (template_address); diff --git a/dan_layer/storage_sqlite/migrations/2022-10-11-121711_add_status_to_templates/down.sql b/dan_layer/storage_sqlite/migrations/2022-10-11-121711_add_status_to_templates/down.sql deleted file mode 100644 index 291a97c5c..000000000 --- a/dan_layer/storage_sqlite/migrations/2022-10-11-121711_add_status_to_templates/down.sql +++ /dev/null @@ -1 +0,0 @@ --- This file should undo anything in `up.sql` \ No newline at end of file diff --git a/dan_layer/storage_sqlite/migrations/2022-10-11-121711_add_status_to_templates/up.sql b/dan_layer/storage_sqlite/migrations/2022-10-11-121711_add_status_to_templates/up.sql deleted file mode 100644 index 416ac07de..000000000 --- a/dan_layer/storage_sqlite/migrations/2022-10-11-121711_add_status_to_templates/up.sql +++ /dev/null @@ -1,9 +0,0 @@ -ALTER TABLE templates - ADD COLUMN status VARCHAR(20) NOT NULL DEFAULT 'New'; - -ALTER TABLE templates - ADD COLUMN wasm_path VARCHAR(255) NULL; - -ALTER TABLE templates - ADD COLUMN added_at INT(11) NOT NULL DEFAULT 0; - diff --git a/dan_layer/storage_sqlite/migrations/2022-11-14-194943_create_epoch_metadata/down.sql b/dan_layer/storage_sqlite/migrations/2022-11-14-194943_create_epoch_metadata/down.sql deleted file mode 100644 index 8e921fe47..000000000 --- a/dan_layer/storage_sqlite/migrations/2022-11-14-194943_create_epoch_metadata/down.sql +++ /dev/null @@ -1,21 +0,0 @@ --- // Copyright 2022. The Tari Project --- // --- // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the --- // following conditions are met: --- // --- // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following --- // disclaimer. --- // --- // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the --- // following disclaimer in the documentation and/or other materials provided with the distribution. --- // --- // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote --- // products derived from this software without specific prior written permission. --- // --- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, --- // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE --- // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, --- // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR --- // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, --- // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE --- // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/dan_layer/storage_sqlite/migrations/2022-11-14-194943_create_epoch_metadata/up.sql b/dan_layer/storage_sqlite/migrations/2022-11-14-194943_create_epoch_metadata/up.sql deleted file mode 100644 index e47c72717..000000000 --- a/dan_layer/storage_sqlite/migrations/2022-11-14-194943_create_epoch_metadata/up.sql +++ /dev/null @@ -1,27 +0,0 @@ --- // Copyright 2022. The Tari Project --- // --- // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the --- // following conditions are met: --- // --- // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following --- // disclaimer. --- // --- // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the --- // following disclaimer in the documentation and/or other materials provided with the distribution. --- // --- // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote --- // products derived from this software without specific prior written permission. --- // --- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, --- // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE --- // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, --- // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR --- // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, --- // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE --- // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -create table epochs -( - epoch bigint primary key not null, - validator_node_mr blob not null -); diff --git a/dan_layer/storage_sqlite/migrations/2023-03-10-121212_add_flow_data/down.sql b/dan_layer/storage_sqlite/migrations/2023-03-10-121212_add_flow_data/down.sql deleted file mode 100644 index d7fd80a1b..000000000 --- a/dan_layer/storage_sqlite/migrations/2023-03-10-121212_add_flow_data/down.sql +++ /dev/null @@ -1,2 +0,0 @@ --- // Copyright 2022 The Tari Project --- // SPDX-License-Identifier: BSD-3-Clause diff --git a/dan_layer/storage_sqlite/migrations/2023-03-10-121212_add_flow_data/up.sql b/dan_layer/storage_sqlite/migrations/2023-03-10-121212_add_flow_data/up.sql deleted file mode 100644 index d962d8704..000000000 --- a/dan_layer/storage_sqlite/migrations/2023-03-10-121212_add_flow_data/up.sql +++ /dev/null @@ -1,33 +0,0 @@ --- // Copyright 2022 The Tari Project --- // SPDX-License-Identifier: BSD-3-Clause - - -drop table templates; - -create table templates -( - id Integer primary key autoincrement not null, - -- template name - template_name text not null, - expected_hash blob not null, - -- the address is the hash of the content - template_address blob not null, - -- where to find the template code - url text not null, - -- the block height in which the template was published - height bigint not null, - -- The type of template, used to create an enum in code - template_type text not null, - - -- compiled template code as a WASM binary - compiled_code blob null, - -- flow json - flow_json text null, - status VARCHAR(20) NOT NULL DEFAULT 'New', - wasm_path VARCHAR(255) NULL, - manifest text null, - added_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP -); - --- fetching by the template_address will be a very common operation -create unique index templates_template_address_index on templates (template_address); diff --git a/dan_layer/storage_sqlite/migrations/2023-08-08-123101_add_fee_claim_public_key_to_validators/down.sql b/dan_layer/storage_sqlite/migrations/2023-08-08-123101_add_fee_claim_public_key_to_validators/down.sql deleted file mode 100644 index 291a97c5c..000000000 --- a/dan_layer/storage_sqlite/migrations/2023-08-08-123101_add_fee_claim_public_key_to_validators/down.sql +++ /dev/null @@ -1 +0,0 @@ --- This file should undo anything in `up.sql` \ No newline at end of file diff --git a/dan_layer/storage_sqlite/migrations/2023-08-08-123101_add_fee_claim_public_key_to_validators/up.sql b/dan_layer/storage_sqlite/migrations/2023-08-08-123101_add_fee_claim_public_key_to_validators/up.sql deleted file mode 100644 index 1da3f538c..000000000 --- a/dan_layer/storage_sqlite/migrations/2023-08-08-123101_add_fee_claim_public_key_to_validators/up.sql +++ /dev/null @@ -1,2 +0,0 @@ -ALTER TABLE validator_nodes - ADD COLUMN fee_claim_public_key BLOB NOT NULL DEFAULT 'invalid'; diff --git a/dan_layer/storage_sqlite/migrations/2023-09-28-152433_add_bmt_cache/down.sql b/dan_layer/storage_sqlite/migrations/2023-09-28-152433_add_bmt_cache/down.sql deleted file mode 100644 index e69de29bb..000000000 diff --git a/dan_layer/storage_sqlite/migrations/2023-09-28-152433_add_bmt_cache/up.sql b/dan_layer/storage_sqlite/migrations/2023-09-28-152433_add_bmt_cache/up.sql deleted file mode 100644 index ac7d66181..000000000 --- a/dan_layer/storage_sqlite/migrations/2023-09-28-152433_add_bmt_cache/up.sql +++ /dev/null @@ -1,27 +0,0 @@ --- // Copyright 2022. The Tari Project --- // --- // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the --- // following conditions are met: --- // --- // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following --- // disclaimer. --- // --- // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the --- // following disclaimer in the documentation and/or other materials provided with the distribution. --- // --- // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote --- // products derived from this software without specific prior written permission. --- // --- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, --- // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE --- // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, --- // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR --- // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, --- // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE --- // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -create table bmt_cache -( - epoch bigint primary key not null, - bmt blob not null -); diff --git a/dan_layer/storage_sqlite/migrations/2023-11-28-050541_add_address_to_validator_nodes/down.sql b/dan_layer/storage_sqlite/migrations/2023-11-28-050541_add_address_to_validator_nodes/down.sql deleted file mode 100644 index 291a97c5c..000000000 --- a/dan_layer/storage_sqlite/migrations/2023-11-28-050541_add_address_to_validator_nodes/down.sql +++ /dev/null @@ -1 +0,0 @@ --- This file should undo anything in `up.sql` \ No newline at end of file diff --git a/dan_layer/storage_sqlite/migrations/2023-11-28-050541_add_address_to_validator_nodes/up.sql b/dan_layer/storage_sqlite/migrations/2023-11-28-050541_add_address_to_validator_nodes/up.sql deleted file mode 100644 index 7e5beb66d..000000000 --- a/dan_layer/storage_sqlite/migrations/2023-11-28-050541_add_address_to_validator_nodes/up.sql +++ /dev/null @@ -1,3 +0,0 @@ --- Your SQL goes here -ALTER TABLE validator_nodes - ADD COLUMN address TEXT NOT NULL DEFAULT 'invalid'; diff --git a/dan_layer/storage_sqlite/migrations/2024-03-11-135158_base_layer_block_info/down.sql b/dan_layer/storage_sqlite/migrations/2024-03-11-135158_base_layer_block_info/down.sql deleted file mode 100644 index 6186a0223..000000000 --- a/dan_layer/storage_sqlite/migrations/2024-03-11-135158_base_layer_block_info/down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE base_layer_block_info; diff --git a/dan_layer/storage_sqlite/migrations/2024-03-11-135158_base_layer_block_info/up.sql b/dan_layer/storage_sqlite/migrations/2024-03-11-135158_base_layer_block_info/up.sql deleted file mode 100644 index a1cd68f7a..000000000 --- a/dan_layer/storage_sqlite/migrations/2024-03-11-135158_base_layer_block_info/up.sql +++ /dev/null @@ -1,5 +0,0 @@ -create table base_layer_block_info -( - hash blob primary key not null, - height bigint not null -); diff --git a/dan_layer/storage_sqlite/migrations/2024-04-01-121212_add_sidechain_id/down.sql b/dan_layer/storage_sqlite/migrations/2024-04-01-121212_add_sidechain_id/down.sql deleted file mode 100644 index e69de29bb..000000000 diff --git a/dan_layer/storage_sqlite/migrations/2024-04-01-121212_add_sidechain_id/up.sql b/dan_layer/storage_sqlite/migrations/2024-04-01-121212_add_sidechain_id/up.sql deleted file mode 100644 index e6b11222d..000000000 --- a/dan_layer/storage_sqlite/migrations/2024-04-01-121212_add_sidechain_id/up.sql +++ /dev/null @@ -1,7 +0,0 @@ --- Your SQL goes here -ALTER TABLE validator_nodes - ADD COLUMN sidechain_id BLOB NOT NULL; - --- drop index validator_nodes_public_key_uniq_idx; - --- create unique index validator_nodes_public_key_uniq_idx on validator_nodes (public_key, sidechain_id); \ No newline at end of file diff --git a/dan_layer/storage_sqlite/migrations/2024-04-12-000000_create_committes/down.sql b/dan_layer/storage_sqlite/migrations/2024-04-12-000000_create_committes/down.sql deleted file mode 100644 index e69de29bb..000000000 diff --git a/dan_layer/storage_sqlite/migrations/2024-04-12-000000_create_committes/up.sql b/dan_layer/storage_sqlite/migrations/2024-04-12-000000_create_committes/up.sql deleted file mode 100644 index 54fb867fb..000000000 --- a/dan_layer/storage_sqlite/migrations/2024-04-12-000000_create_committes/up.sql +++ /dev/null @@ -1,11 +0,0 @@ -CREATE TABLE committees -( - id INTEGER PRIMARY KEY autoincrement NOT NULL, - validator_node_id INTEGER NOT NULL, - epoch BIGINT NOT NULL, - shard_start INTEGER NOT NULL, - shard_end INTEGER NOT NULL, - FOREIGN KEY (validator_node_id) REFERENCES validator_nodes (id) ON DELETE CASCADE -); - -CREATE INDEX committees_validator_node_id_epoch_index ON committees (validator_node_id, epoch); diff --git a/dan_layer/storage_sqlite/src/global/backend_adapter.rs b/dan_layer/storage_sqlite/src/global/backend_adapter.rs index 0a8aeb7a7..369d4e34d 100644 --- a/dan_layer/storage_sqlite/src/global/backend_adapter.rs +++ b/dan_layer/storage_sqlite/src/global/backend_adapter.rs @@ -25,22 +25,22 @@ use std::{ convert::{TryFrom, TryInto}, fmt::{Debug, Formatter}, marker::PhantomData, - ops::RangeInclusive, sync::{Arc, Mutex}, }; use diesel::{ sql_query, sql_types::{BigInt, Bigint}, + BoolExpressionMethods, ExpressionMethods, JoinOnDsl, - NullableExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, SqliteConnection, }; use diesel_migrations::{EmbeddedMigrations, MigrationHarness}; +use log::debug; use serde::{de::DeserializeOwned, Serialize}; use tari_common_types::types::{FixedHash, PublicKey}; use tari_dan_common_types::{ @@ -56,6 +56,7 @@ use tari_dan_storage::{ models::ValidatorNode, DbBaseLayerBlockInfo, DbEpoch, + DbLayer1Transaction, DbTemplate, DbTemplateUpdate, GlobalDbAdapter, @@ -79,12 +80,14 @@ use crate::{ TemplateModel, TemplateUpdateModel, }, - schema::{templates, validator_nodes::dsl::validator_nodes}, + schema::templates, serialization::serialize_json, }, SqliteTransaction, }; +const LOG_TARGET: &str = "tari::dan::storage_sqlite::global::backend_adapter"; + define_sql_function! { #[sql_name = "COALESCE"] fn coalesce_bigint(x: diesel::sql_types::Nullable, y: BigInt) -> BigInt; @@ -366,10 +369,8 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { address: Self::Addr, public_key: PublicKey, shard_key: SubstateAddress, - registered_at_base_height: u64, start_epoch: Epoch, fee_claim_public_key: PublicKey, - sidechain_id: Option, ) -> Result<(), Self::Error> { use crate::global::schema::validator_nodes; let addr = serialize_json(&address)?; @@ -379,10 +380,8 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { validator_nodes::address.eq(&addr), validator_nodes::public_key.eq(ByteArray::as_bytes(&public_key)), validator_nodes::shard_key.eq(shard_key.as_bytes()), - validator_nodes::registered_at_base_height.eq(registered_at_base_height as i64), validator_nodes::start_epoch.eq(start_epoch.as_u64() as i64), validator_nodes::fee_claim_public_key.eq(ByteArray::as_bytes(&fee_claim_public_key)), - validator_nodes::sidechain_id.eq(sidechain_id.as_ref().map(|id| id.as_bytes()).unwrap_or(&[0u8; 32])), )) .execute(tx.connection()) .map_err(|source| SqliteStorageError::DieselError { @@ -393,26 +392,22 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { Ok(()) } - fn remove_validator_node( + fn deactivate_validator_node( &self, tx: &mut Self::DbTransaction<'_>, public_key: PublicKey, - sidechain_id: Option, + deactivation_epoch: Epoch, ) -> Result<(), Self::Error> { use crate::global::schema::validator_nodes; - diesel::delete( - validator_nodes - .filter( - validator_nodes::sidechain_id - .eq(sidechain_id.as_ref().map(|id| id.as_bytes()).unwrap_or(&[0u8; 32])), - ) - .filter(validator_nodes::public_key.eq(ByteArray::as_bytes(&public_key))), - ) - .execute(tx.connection()) - .map_err(|source| SqliteStorageError::DieselError { - source, - operation: "remove::validator_nodes".to_string(), - })?; + + diesel::update(validator_nodes::table) + .set(validator_nodes::end_epoch.eq(deactivation_epoch.as_u64() as i64)) + .filter(validator_nodes::public_key.eq(ByteArray::as_bytes(&public_key))) + .execute(tx.connection()) + .map_err(|source| SqliteStorageError::DieselError { + source, + operation: "remove::validator_nodes".to_string(), + })?; Ok(()) } @@ -422,25 +417,15 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { tx: &mut Self::DbTransaction<'_>, epoch: Epoch, address: &Self::Addr, - sidechain_id: Option<&PublicKey>, ) -> Result, Self::Error> { - use crate::global::schema::validator_nodes; + use crate::global::schema::{committees, validator_nodes}; let vn = validator_nodes::table - .select(( - validator_nodes::id, - validator_nodes::public_key, - validator_nodes::shard_key, - validator_nodes::registered_at_base_height, - validator_nodes::start_epoch, - validator_nodes::fee_claim_public_key, - validator_nodes::address, - validator_nodes::sidechain_id, - )) - .filter(validator_nodes::start_epoch.le(epoch.as_u64() as i64)) + .select(validator_nodes::all_columns) + .inner_join(committees::table.on(validator_nodes::id.eq(committees::validator_node_id))) + .filter(committees::epoch.eq(epoch.as_u64() as i64)) .filter(validator_nodes::address.eq(serialize_json(address)?)) - .filter(validator_nodes::sidechain_id.eq(sidechain_id.map(ByteArray::as_bytes).unwrap_or(&[0u8; 32]))) - .order_by(validator_nodes::registered_at_base_height.desc()) + .order_by(validator_nodes::id.desc()) .first::(tx.connection()) .map_err(|source| SqliteStorageError::DieselError { source, @@ -456,15 +441,15 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { tx: &mut Self::DbTransaction<'_>, epoch: Epoch, public_key: &PublicKey, - sidechain_id: Option<&PublicKey>, ) -> Result, Self::Error> { - use crate::global::schema::validator_nodes; + use crate::global::schema::{committees, validator_nodes}; let vn = validator_nodes::table - .filter(validator_nodes::start_epoch.le(epoch.as_u64() as i64)) + .select(validator_nodes::all_columns) + .inner_join(committees::table.on(validator_nodes::id.eq(committees::validator_node_id))) + .filter(committees::epoch.eq(epoch.as_u64() as i64)) .filter(validator_nodes::public_key.eq(ByteArray::as_bytes(public_key))) - .filter(validator_nodes::sidechain_id.eq(sidechain_id.map(ByteArray::as_bytes).unwrap_or(&[0u8; 32]))) - .order_by(validator_nodes::registered_at_base_height.desc()) + .order_by(validator_nodes::shard_key.desc()) .first::(tx.connection()) .map_err(|source| SqliteStorageError::DieselError { source, @@ -475,45 +460,17 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { Ok(vn) } - fn validator_nodes_count( - &self, - tx: &mut Self::DbTransaction<'_>, - epoch: Epoch, - sidechain_id: Option<&PublicKey>, - ) -> Result { - let db_sidechain_id = sidechain_id.map(|id| id.as_bytes()).unwrap_or(&[0u8; 32]); - + fn validator_nodes_count(&self, tx: &mut Self::DbTransaction<'_>, epoch: Epoch) -> Result { let count = sql_query( - "SELECT COUNT(distinct public_key) as cnt FROM validator_nodes WHERE start_epoch <= ? AND sidechain_id = ?", + "SELECT COUNT(distinct public_key) as cnt FROM validator_nodes WHERE start_epoch <= ? AND (end_epoch IS \ + NULL OR end_epoch > ?)", ) .bind::(epoch.as_u64() as i64) - .bind::(db_sidechain_id) - .get_result::(tx.connection()) - .map_err(|source| SqliteStorageError::DieselError { - source, - operation: "count_validator_nodes".to_string(), - })?; - - Ok(count.cnt as u64) - } - - fn validator_nodes_count_by_start_epoch( - &self, - tx: &mut Self::DbTransaction<'_>, - epoch: Epoch, - sidechain_id: Option<&PublicKey>, - ) -> Result { - let db_sidechain_id = sidechain_id.map(|id| id.as_bytes()).unwrap_or(&[0u8; 32]); - - let count = sql_query( - "SELECT COUNT(distinct public_key) as cnt FROM validator_nodes WHERE start_epoch = ? AND sidechain_id = ?", - ) .bind::(epoch.as_u64() as i64) - .bind::(db_sidechain_id) .get_result::(tx.connection()) .map_err(|source| SqliteStorageError::DieselError { source, - operation: "count_validator_nodes_by_start_epoch".to_string(), + operation: "count_validator_nodes".to_string(), })?; Ok(count.cnt as u64) @@ -523,20 +480,15 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { &self, tx: &mut Self::DbTransaction<'_>, epoch: Epoch, - sidechain_id: Option<&PublicKey>, shard_group: ShardGroup, ) -> Result { - use crate::global::schema::{committees, validator_nodes}; + use crate::global::schema::committees; - let db_sidechain_id = sidechain_id.map(|id| id.as_bytes()).unwrap_or(&[0u8; 32]); let count = committees::table - .inner_join(validator_nodes::table.on(committees::validator_node_id.eq(validator_nodes::id))) .filter(committees::epoch.eq(epoch.as_u64() as i64)) .filter(committees::shard_start.eq(shard_group.start().as_u32() as i32)) .filter(committees::shard_end.eq(shard_group.end().as_u32() as i32)) - .filter(validator_nodes::sidechain_id.eq(db_sidechain_id)) .count() - .limit(1) .get_result::(tx.connection()) .map_err(|source| SqliteStorageError::DieselError { source, @@ -550,11 +502,9 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { &self, tx: &mut Self::DbTransaction<'_>, epoch: Epoch, - sidechain_id: Option<&PublicKey>, ) -> Result>, Self::Error> { use crate::global::schema::{committees, validator_nodes}; - let db_sidechain_id = sidechain_id.map(|id| id.as_bytes()).unwrap_or(&[0u8; 32]); let results = committees::table .inner_join(validator_nodes::table.on(committees::validator_node_id.eq(validator_nodes::id))) .select(( @@ -564,7 +514,6 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { validator_nodes::public_key, )) .filter(committees::epoch.eq(epoch.as_u64() as i64)) - .filter(validator_nodes::sidechain_id.eq(db_sidechain_id)) .load::<(i32, i32, String, Vec)>(tx.connection()) .map_err(|source| SqliteStorageError::DieselError { source, @@ -591,19 +540,16 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { tx: &mut Self::DbTransaction<'_>, shard_key: SubstateAddress, shard_group: ShardGroup, - sidechain_id: Option<&PublicKey>, epoch: Epoch, ) -> Result<(), Self::Error> { use crate::global::schema::{committees, validator_nodes}; - let db_sidechain_id = sidechain_id.map(|id| id.as_bytes()).unwrap_or(&[0u8; 32]); // This is probably not the most robust way of doing this. Ideally you would pass the validator ID to the // function and use that to insert into the committees table. let validator_id = validator_nodes::table .select(validator_nodes::id) .filter(validator_nodes::shard_key.eq(shard_key.as_bytes())) .filter(validator_nodes::start_epoch.le(epoch.as_u64() as i64)) - .filter(validator_nodes::sidechain_id.eq(db_sidechain_id)) - .order_by(validator_nodes::registered_at_base_height.desc()) + .order_by(validator_nodes::id.desc()) .first::(tx.connection()) .map_err(|source| SqliteStorageError::DieselError { source, @@ -625,45 +571,45 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { Ok(()) } - fn validator_nodes_get_by_substate_range( + fn validator_nodes_get_for_shard_group( &self, tx: &mut Self::DbTransaction<'_>, epoch: Epoch, - sidechain_id: Option<&PublicKey>, - shard_range: RangeInclusive, - ) -> Result>, Self::Error> { - // TODO: is this method still needed? Most of this can be handled by the committees table - use crate::global::schema::validator_nodes; + shard_group: ShardGroup, + ) -> Result, Self::Error> { + use crate::global::schema::{committees, validator_nodes}; - let db_sidechain_id = sidechain_id.map(|id| id.as_bytes()).unwrap_or(&[0u8; 32]); let validators = validator_nodes::table - .select(( - validator_nodes::id, - validator_nodes::public_key, - validator_nodes::shard_key, - validator_nodes::registered_at_base_height, - validator_nodes::start_epoch, - validator_nodes::fee_claim_public_key, - validator_nodes::address, - validator_nodes::sidechain_id - )) - .filter(validator_nodes::start_epoch.le(epoch.as_u64() as i64)) - // SQLite compares BLOB types using memcmp which, IIRC, compares bytes "left to right"/big-endian which is - // the same way convert shard IDs to 256-bit integers when allocating committee shards. - .filter(validator_nodes::shard_key.ge(shard_range.start().as_bytes())) - .filter(validator_nodes::shard_key.le(shard_range.end().as_bytes())) - .filter(validator_nodes::sidechain_id.eq(db_sidechain_id)) - .order_by(validator_nodes::shard_key.asc()) + .inner_join(committees::table.on(committees::validator_node_id.eq(validator_nodes::id))) + .select(validator_nodes::all_columns) + .filter(committees::epoch.eq(epoch.as_u64() as i64)) + .filter(committees::shard_start.eq(shard_group.start().as_u32() as i32)) + .filter(committees::shard_end.eq(shard_group.end().as_u32() as i32)) .get_results::(tx.connection()) .map_err(|source| SqliteStorageError::DieselError { source, - operation: "validator_nodes_get_by_shard_range".to_string(), + operation: "validator_nodes_get_for_shard_group".to_string(), })?; - distinct_validators_sorted(validators) + debug!(target: LOG_TARGET, "Found {} validators", validators.len()); + + validators + .into_iter() + .map(|vn| { + Ok(( + DbValidatorNode::try_parse_address(&vn.address)?, + PublicKey::from_canonical_bytes(&vn.public_key).map_err(|_| { + SqliteStorageError::MalformedDbData(format!( + "validator_nodes_get_for_shard_group: Invalid public key in validator node record id={}", + vn.id + )) + })?, + )) + }) + .collect() } - fn validator_nodes_get_for_shard_group( + fn validator_nodes_get_overlapping_shard_group( &self, tx: &mut Self::DbTransaction<'_>, epoch: Epoch, @@ -671,21 +617,23 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { ) -> Result>, Self::Error> { use crate::global::schema::{committees, validator_nodes}; - let mut committees = HashMap::with_capacity(shard_group.len()); let validators = validator_nodes::table - .left_join(committees::table.on(committees::validator_node_id.eq(validator_nodes::id))) - .select((validator_nodes::all_columns, committees::all_columns.nullable())) + .inner_join(committees::table.on(committees::validator_node_id.eq(validator_nodes::id))) + .select((validator_nodes::all_columns, committees::all_columns)) .filter(committees::epoch.eq(epoch.as_u64() as i64)) - .filter(committees::shard_start.le(shard_group.start().as_u32() as i32)) - .filter(committees::shard_end.ge(shard_group.end().as_u32() as i32)) - .get_results::<(DbValidatorNode, Option)>(tx.connection()) + // Overlapping c.shard_start <= :end and c.shard_end >= :start; + .filter(committees::shard_start.le(shard_group.end().as_u32() as i32)) + .filter(committees::shard_end.ge(shard_group.start().as_u32() as i32)) + .get_results::<(DbValidatorNode, DbCommittee)>(tx.connection()) .map_err(|source| SqliteStorageError::DieselError { source, - operation: "validator_nodes_get_by_buckets".to_string(), + operation: "validator_nodes_get_overlapping_shard_group".to_string(), })?; + debug!(target: LOG_TARGET, "Found {} validators", validators.len()); + + let mut committees = HashMap::with_capacity(shard_group.len()); for (vn, committee) in validators { - let committee = committee.unwrap(); let validators = committees .entry(committee.as_shard_group()) .or_insert_with(|| Committee::empty()); @@ -694,7 +642,8 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { DbValidatorNode::try_parse_address(&vn.address)?, PublicKey::from_canonical_bytes(&vn.public_key).map_err(|_| { SqliteStorageError::MalformedDbData(format!( - "Invalid public key in validator node record id={}", + "validator_nodes_get_overlapping_shard_group: Invalid public key in validator node record \ + id={}", vn.id )) })?, @@ -704,28 +653,20 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { Ok(committees) } - fn get_validator_nodes_within_epoch( + fn get_validator_nodes_within_start_epoch( &self, tx: &mut Self::DbTransaction<'_>, epoch: Epoch, - sidechain_id: Option<&PublicKey>, ) -> Result>, Self::Error> { use crate::global::schema::validator_nodes; - let db_sidechain_id = sidechain_id.map(|id| id.as_bytes()).unwrap_or(&[0u8; 32]); let sqlite_vns = validator_nodes::table - .select(( - validator_nodes::id, - validator_nodes::public_key, - validator_nodes::shard_key, - validator_nodes::registered_at_base_height, - validator_nodes::start_epoch, - validator_nodes::fee_claim_public_key, - validator_nodes::address, - validator_nodes::sidechain_id, - )) .filter(validator_nodes::start_epoch.le(epoch.as_u64() as i64)) - .filter(validator_nodes::sidechain_id.eq(db_sidechain_id)) + .filter( + validator_nodes::end_epoch + .is_null() + .or(validator_nodes::end_epoch.gt(epoch.as_u64() as i64)), + ) .get_results::(tx.connection()) .map_err(|source| SqliteStorageError::DieselError { source, @@ -735,6 +676,27 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { distinct_validators_sorted(sqlite_vns) } + fn get_validator_nodes_within_committee_epoch( + &self, + tx: &mut Self::DbTransaction<'_>, + epoch: Epoch, + ) -> Result>, Self::Error> { + use crate::global::schema::{committees, validator_nodes}; + + let sqlite_vns = validator_nodes::table + .select(validator_nodes::all_columns) + .inner_join(committees::table.on(validator_nodes::id.eq(committees::validator_node_id))) + .filter(committees::epoch.eq(epoch.as_u64() as i64)) + .order_by(validator_nodes::shard_key.asc()) + .get_results::(tx.connection()) + .map_err(|source| SqliteStorageError::DieselError { + source, + operation: format!("get::get_validator_nodes_within_epochs({})", epoch), + })?; + + sqlite_vns.into_iter().map(TryInto::try_into).collect() + } + fn insert_epoch(&self, tx: &mut Self::DbTransaction<'_>, epoch: DbEpoch) -> Result<(), Self::Error> { use crate::global::schema::epochs; @@ -851,6 +813,28 @@ impl GlobalDbAdapter for SqliteGlobalDbAdapter { None => Ok(None), } } + + fn insert_layer_one_transaction( + &self, + tx: &mut Self::DbTransaction<'_>, + data: DbLayer1Transaction, + ) -> Result<(), Self::Error> { + use crate::global::schema::layer_one_transactions; + + diesel::insert_into(layer_one_transactions::table) + .values(( + layer_one_transactions::epoch.eq(data.epoch.as_u64() as i64), + layer_one_transactions::payload_type.eq(data.proof_type.to_string()), + layer_one_transactions::payload.eq(serde_json::to_string_pretty(&data.payload)?), + )) + .execute(tx.connection()) + .map_err(|source| SqliteStorageError::DieselError { + source, + operation: "insert::layer_one_transaction".to_string(), + })?; + + Ok(()) + } } impl Debug for SqliteGlobalDbAdapter { @@ -875,7 +859,7 @@ fn distinct_validators( ) -> Result>, SqliteStorageError> { // first, sort by registration block height so that we get newer registrations first let mut db_vns = Vec::with_capacity(sqlite_vns.len()); - sqlite_vns.sort_by(|a, b| a.registered_at_base_height.cmp(&b.registered_at_base_height).reverse()); + sqlite_vns.sort_by(|a, b| a.start_epoch.cmp(&b.start_epoch).reverse()); let mut dedup_map = HashSet::>::with_capacity(sqlite_vns.len()); for vn in sqlite_vns { if !dedup_map.contains(&vn.public_key) { diff --git a/dan_layer/storage_sqlite/src/global/models/validator_node.rs b/dan_layer/storage_sqlite/src/global/models/validator_node.rs index df882967a..a286d0531 100644 --- a/dan_layer/storage_sqlite/src/global/models/validator_node.rs +++ b/dan_layer/storage_sqlite/src/global/models/validator_node.rs @@ -35,12 +35,11 @@ use crate::{ pub struct DbValidatorNode { pub id: i32, pub public_key: Vec, + pub address: String, pub shard_key: Vec, - pub registered_at_base_height: i64, pub start_epoch: i64, + pub end_epoch: Option, pub fee_claim_public_key: Vec, - pub address: String, - pub sidechain_id: Vec, } impl TryFrom for ValidatorNode { type Error = SqliteStorageError; @@ -54,24 +53,14 @@ impl TryFrom for ValidatorNode { public_key: PublicKey::from_canonical_bytes(&vn.public_key).map_err(|_| { SqliteStorageError::MalformedDbData(format!("Invalid public key in validator node record id={}", vn.id)) })?, - registered_at_base_height: vn.registered_at_base_height as u64, start_epoch: Epoch(vn.start_epoch as u64), + end_epoch: vn.end_epoch.map(|e| Epoch(e as u64)), fee_claim_public_key: PublicKey::from_canonical_bytes(&vn.fee_claim_public_key).map_err(|_| { SqliteStorageError::MalformedDbData(format!( "Invalid fee claim public key in validator node record id={}", vn.id )) })?, - sidechain_id: if vn.sidechain_id == [0u8; 32] { - None - } else { - Some(PublicKey::from_canonical_bytes(&vn.sidechain_id).map_err(|_| { - SqliteStorageError::MalformedDbData(format!( - "Invalid sidechain id in validator node record id={}", - vn.id - )) - })?) - }, }) } } diff --git a/dan_layer/storage_sqlite/src/global/schema.rs b/dan_layer/storage_sqlite/src/global/schema.rs index e3cc6ffb8..ac98da8e3 100644 --- a/dan_layer/storage_sqlite/src/global/schema.rs +++ b/dan_layer/storage_sqlite/src/global/schema.rs @@ -31,6 +31,17 @@ diesel::table! { } } +diesel::table! { + layer_one_transactions (id) { + id -> Integer, + epoch -> BigInt, + payload_type -> Text, + payload -> Text, + submitted_at -> Nullable, + is_observed -> Bool, + } +} + diesel::table! { metadata (key_name) { key_name -> Binary, @@ -60,12 +71,11 @@ diesel::table! { validator_nodes (id) { id -> Integer, public_key -> Binary, + address -> Text, shard_key -> Binary, - registered_at_base_height -> BigInt, start_epoch -> BigInt, + end_epoch -> Nullable, fee_claim_public_key -> Binary, - address -> Text, - sidechain_id -> Binary, } } @@ -76,6 +86,7 @@ diesel::allow_tables_to_appear_in_same_query!( bmt_cache, committees, epochs, + layer_one_transactions, metadata, templates, validator_nodes, diff --git a/dan_layer/storage_sqlite/tests/global_db.rs b/dan_layer/storage_sqlite/tests/global_db.rs index aa91c3570..0bbbfeeb3 100644 --- a/dan_layer/storage_sqlite/tests/global_db.rs +++ b/dan_layer/storage_sqlite/tests/global_db.rs @@ -5,7 +5,7 @@ use diesel::{Connection, SqliteConnection}; use rand::rngs::OsRng; use tari_common_types::types::{FixedHash, PublicKey}; use tari_crypto::keys::PublicKey as _; -use tari_dan_common_types::{Epoch, PeerAddress, ShardGroup, SubstateAddress}; +use tari_dan_common_types::{Epoch, NumPreshards, PeerAddress, ShardGroup, SubstateAddress}; use tari_dan_storage::global::{GlobalDb, ValidatorNodeDb}; use tari_dan_storage_sqlite::global::SqliteGlobalDbAdapter; use tari_utilities::ByteArray; @@ -32,10 +32,11 @@ fn insert_vns( validator_nodes: &mut ValidatorNodeDb<'_, '_, SqliteGlobalDbAdapter>, num: usize, epoch: Epoch, - sidechain_id: Option, ) { for _ in 0..num { - insert_vn_with_public_key(validator_nodes, new_public_key(), epoch, sidechain_id.clone()) + let pk = new_public_key(); + insert_vn_with_public_key(validator_nodes, pk.clone(), epoch); + set_committee_shard_group(validator_nodes, &pk, ShardGroup::all_shards(NumPreshards::P256), epoch); } } @@ -43,17 +44,14 @@ fn insert_vn_with_public_key( validator_nodes: &mut ValidatorNodeDb<'_, '_, SqliteGlobalDbAdapter>, public_key: PublicKey, start_epoch: Epoch, - sidechain_id: Option, ) { validator_nodes .insert_validator_node( public_key.clone().into(), public_key.clone(), derived_substate_address(&public_key), - 0, start_epoch, public_key, - sidechain_id, ) .unwrap() } @@ -65,7 +63,7 @@ fn set_committee_shard_group( epoch: Epoch, ) { validator_nodes - .set_committee_shard(derived_substate_address(public_key), shard_group, None, epoch) + .set_committee_shard(derived_substate_address(public_key), shard_group, epoch) .unwrap(); } @@ -74,9 +72,9 @@ fn insert_and_get_within_epoch() { let db = create_db(); let mut tx = db.create_transaction().unwrap(); let mut validator_nodes = db.validator_nodes(&mut tx); - insert_vns(&mut validator_nodes, 3, Epoch(0), None); - insert_vns(&mut validator_nodes, 2, Epoch(1), None); - let vns = validator_nodes.get_all_within_epoch(Epoch(0), None).unwrap(); + insert_vns(&mut validator_nodes, 3, Epoch(0)); + insert_vns(&mut validator_nodes, 2, Epoch(1)); + let vns = validator_nodes.get_all_registered_within_start_epoch(Epoch(0)).unwrap(); assert_eq!(vns.len(), 3); } @@ -86,14 +84,22 @@ fn change_committee_shard_group() { let mut tx = db.create_transaction().unwrap(); let mut validator_nodes = db.validator_nodes(&mut tx); let pk = new_public_key(); - insert_vn_with_public_key(&mut validator_nodes, pk.clone(), Epoch(0), None); + insert_vn_with_public_key(&mut validator_nodes, pk.clone(), Epoch(0)); set_committee_shard_group(&mut validator_nodes, &pk, ShardGroup::new(1, 2), Epoch(0)); + let count = validator_nodes.count(Epoch(0)).unwrap(); + assert_eq!(count, 1); set_committee_shard_group(&mut validator_nodes, &pk, ShardGroup::new(3, 4), Epoch(1)); set_committee_shard_group(&mut validator_nodes, &pk, ShardGroup::new(7, 8), Epoch(2)); set_committee_shard_group(&mut validator_nodes, &pk, ShardGroup::new(4, 5), Epoch(3)); - set_committee_shard_group(&mut validator_nodes, &pk, ShardGroup::new(4, 5), Epoch(3)); + let pk2 = new_public_key(); + insert_vn_with_public_key(&mut validator_nodes, pk2.clone(), Epoch(3)); + set_committee_shard_group(&mut validator_nodes, &pk2, ShardGroup::new(4, 5), Epoch(3)); + let count = validator_nodes.count(Epoch(0)).unwrap(); + assert_eq!(count, 1); + let count = validator_nodes.count(Epoch(3)).unwrap(); + assert_eq!(count, 2); let vns = validator_nodes - .get_committees_for_shard_group(Epoch(3), ShardGroup::new(4, 5)) + .get_committee_for_shard_group(Epoch(3), ShardGroup::new(4, 5)) .unwrap(); - assert_eq!(vns.get(&ShardGroup::new(4, 5)).unwrap().len(), 2); + assert_eq!(vns.len(), 2); } diff --git a/dan_layer/template_lib/src/models/layer_one_commitment.rs b/dan_layer/template_lib/src/models/layer_one_commitment.rs index de5a7f67d..a63310cab 100644 --- a/dan_layer/template_lib/src/models/layer_one_commitment.rs +++ b/dan_layer/template_lib/src/models/layer_one_commitment.rs @@ -37,8 +37,12 @@ impl UnclaimedConfidentialOutputAddress { &self.0 } - pub fn to_vec(&self) -> Vec { - self.0.to_vec() + pub fn from_bytes(bytes: &[u8]) -> Result { + Ok(Self(BorTag::new(ObjectKey::try_from(bytes)?))) + } + + pub fn as_bytes(&self) -> &[u8] { + self.0.inner() } } @@ -52,7 +56,7 @@ impl TryFrom<&[u8]> for UnclaimedConfidentialOutputAddress { type Error = KeyParseError; fn try_from(value: &[u8]) -> Result { - Ok(Self(BorTag::new(ObjectKey::try_from(value)?))) + Self::from_bytes(value) } } diff --git a/dan_layer/template_lib/src/models/non_fungible_index.rs b/dan_layer/template_lib/src/models/non_fungible_index.rs index 64b9aee91..bd079d493 100644 --- a/dan_layer/template_lib/src/models/non_fungible_index.rs +++ b/dan_layer/template_lib/src/models/non_fungible_index.rs @@ -24,14 +24,16 @@ use std::{error::Error, str::FromStr}; use serde::{Deserialize, Serialize}; use tari_template_abi::rust::{fmt, fmt::Display}; -#[cfg(feature = "ts")] -use ts_rs::TS; use super::ResourceAddress; /// The unique identifier of a non-fungible index in the Tari network #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -#[cfg_attr(feature = "ts", derive(TS), ts(export, export_to = "../../bindings/src/types/"))] +#[cfg_attr( + feature = "ts", + derive(ts_rs::TS), + ts(export, export_to = "../../bindings/src/types/") +)] pub struct NonFungibleIndexAddress { resource_address: ResourceAddress, #[cfg_attr(feature = "ts", ts(type = "number"))] diff --git a/dan_layer/transaction/Cargo.toml b/dan_layer/transaction/Cargo.toml index e7876dcfa..0673b65c3 100644 --- a/dan_layer/transaction/Cargo.toml +++ b/dan_layer/transaction/Cargo.toml @@ -14,6 +14,7 @@ tari_dan_common_types = { workspace = true } tari_crypto = { workspace = true, features = ["borsh"] } tari_template_lib = { workspace = true } +borsh = { workspace = true } rand = { workspace = true } indexmap = { workspace = true, features = ["serde"] } serde = { workspace = true, default-features = true } diff --git a/dan_layer/transaction/src/transaction_id.rs b/dan_layer/transaction/src/transaction_id.rs index d5dc8ff56..78aec77e9 100644 --- a/dan_layer/transaction/src/transaction_id.rs +++ b/dan_layer/transaction/src/transaction_id.rs @@ -6,6 +6,7 @@ use std::{ fmt::{Display, Formatter}, }; +use borsh::BorshSerialize; use serde::{Deserialize, Serialize}; use tari_common_types::types::FixedHashSizeError; use tari_crypto::tari_utilities::hex::{from_hex, Hex}; @@ -13,7 +14,7 @@ use tari_dan_common_types::{SubstateAddress, ToSubstateAddress}; use tari_engine_types::{serde_with, transaction_receipt::TransactionReceiptAddress}; use tari_template_lib::Hash; -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize, Default)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize, Default, BorshSerialize)] #[serde(transparent)] pub struct TransactionId { #[serde(with = "serde_with::hex")] diff --git a/integration_tests/Cargo.toml b/integration_tests/Cargo.toml index adc217766..ad168a7e1 100644 --- a/integration_tests/Cargo.toml +++ b/integration_tests/Cargo.toml @@ -19,6 +19,7 @@ minotari_console_wallet = { workspace = true, features = ["grpc"] } minotari_wallet = { workspace = true } tari_p2p = { workspace = true } tari_shutdown = { workspace = true } +tari_sidechain = { workspace = true } tari_crypto = { workspace = true } tari_indexer = { workspace = true } @@ -62,6 +63,7 @@ time = { workspace = true } tokio = { workspace = true, features = ["default", "macros", "time", "sync", "rt-multi-thread", "signal"] } tonic = { workspace = true } regex = "1.11.0" +notify = "7.0.0" [[test]] name = "cucumber" # this should be the same as the filename of your test target diff --git a/integration_tests/src/indexer.rs b/integration_tests/src/indexer.rs index 1563aaa0b..fbfb901f3 100644 --- a/integration_tests/src/indexer.rs +++ b/integration_tests/src/indexer.rs @@ -154,7 +154,7 @@ pub async fn spawn_indexer(world: &mut TariWorld, indexer_name: String, base_nod // we need to add all the validator nodes as seed peers let peer_seeds: Vec = world - .all_validators_iter() + .all_running_validators_iter() .map(|vn| format!("{}::/ip4/127.0.0.1/tcp/{}", vn.public_key, vn.port)) .collect(); diff --git a/integration_tests/src/lib.rs b/integration_tests/src/lib.rs index b375fffbf..093433a59 100644 --- a/integration_tests/src/lib.rs +++ b/integration_tests/src/lib.rs @@ -21,6 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{ + collections::HashMap, fmt::{Debug, Formatter}, fs, time::{Duration, Instant}, @@ -47,6 +48,7 @@ use tari_crypto::{ ristretto::{RistrettoComSig, RistrettoSecretKey}, }; use tari_dan_common_types::SubstateRequirement; +use tari_sidechain::EvictionProof; use template::RegisteredTemplate; use validator_node::ValidatorNodeProcess; use wallet::WalletProcess; @@ -96,6 +98,7 @@ pub struct TariWorld { /// A receiver wallet address that is used for default one-sided coinbase payments pub default_payment_address: TariAddress, pub consensus_manager: ConsensusManager, + pub eviction_proofs: HashMap, } impl TariWorld { @@ -155,8 +158,18 @@ impl TariWorld { .unwrap_or_else(|| panic!("Validator node {} not found", name)) } - pub fn all_validators_iter(&self) -> impl Iterator { - self.validator_nodes.values().chain(self.vn_seeds.values()) + pub fn get_validator_node_mut(&mut self, name: &str) -> &mut ValidatorNodeProcess { + self.validator_nodes + .get_mut(name) + .or_else(|| self.vn_seeds.get_mut(name)) + .unwrap_or_else(|| panic!("Validator node {} not found", name)) + } + + pub fn all_running_validators_iter(&self) -> impl Iterator + Clone { + self.validator_nodes + .values() + .chain(self.vn_seeds.values()) + .filter(|vn| !vn.handle.is_finished()) } pub fn get_indexer(&self, name: &str) -> &IndexerProcess { @@ -179,6 +192,11 @@ impl TariWorld { all_components.get("components/Account").cloned() } + pub fn add_eviction_proof>(&mut self, name: T, eviction_proof: EvictionProof) -> &mut Self { + self.eviction_proofs.insert(name.into(), eviction_proof); + self + } + pub fn after(&mut self, _scenario: &Scenario) { let _drop = self.http_server.take(); @@ -287,6 +305,7 @@ impl Default for TariWorld { wallet_private_key, default_payment_address, consensus_manager: ConsensusManager::builder(Network::LocalNet).build().unwrap(), + eviction_proofs: HashMap::new(), } } } diff --git a/integration_tests/src/validator_node.rs b/integration_tests/src/validator_node.rs index ac482fcf1..dd51a6741 100644 --- a/integration_tests/src/validator_node.rs +++ b/integration_tests/src/validator_node.rs @@ -62,6 +62,10 @@ impl ValidatorNodeProcess { get_vn_client(self.json_rpc_port) } + pub fn layer_one_transaction_path(&self) -> PathBuf { + self.temp_dir_path.join("data/layer_one_transactions") + } + pub async fn save_database(&self, database_name: String, to: &Path) { fs::create_dir_all(to).expect("Could not create directory"); let from = &self.temp_dir_path.join(format!("{}.db", database_name)); @@ -119,6 +123,7 @@ pub async fn spawn_validator_node( world.current_scenario_name.as_ref().unwrap(), &validator_node_name, ); + // Connect to shard db let temp_dir_path = temp_dir.clone(); let handle = task::spawn(async move { let mut config = ApplicationConfig { diff --git a/integration_tests/tests/cucumber.rs b/integration_tests/tests/cucumber.rs index e90399095..8298168d4 100644 --- a/integration_tests/tests/cucumber.rs +++ b/integration_tests/tests/cucumber.rs @@ -128,9 +128,10 @@ async fn main() { shutdown.trigger(); } +#[then(expr = "I stop validator node {word}")] #[when(expr = "I stop validator node {word}")] async fn stop_validator_node(world: &mut TariWorld, vn_name: String) { - let vn_ps = world.validator_nodes.get_mut(&vn_name).unwrap(); + let vn_ps = world.get_validator_node_mut(&vn_name); vn_ps.stop(); } @@ -569,6 +570,7 @@ async fn create_transaction_signing_key(world: &mut TariWorld, name: String) { validator_node_cli::create_or_use_key(world, name); } +#[then(expr = "I create an account {word} on {word}")] #[when(expr = "I create an account {word} on {word}")] async fn create_account(world: &mut TariWorld, account_name: String, vn_name: String) { validator_node_cli::create_account(world, account_name, vn_name).await; diff --git a/integration_tests/tests/features/eviction.feature b/integration_tests/tests/features/eviction.feature new file mode 100644 index 000000000..3449442cc --- /dev/null +++ b/integration_tests/tests/features/eviction.feature @@ -0,0 +1,63 @@ +# Copyright 2022 The Tari Project +# SPDX-License-Identifier: BSD-3-Clause + +@concurrent +@eviction +Feature: Eviction scenarios + + @flaky + Scenario: Offline validator gets evicted + # Initialize a base node, wallet, miner and several VNs + Given a base node BASE + Given a wallet WALLET connected to base node BASE + Given a miner MINER connected to base node BASE and wallet WALLET + + # Initialize VNs + Given a seed validator node VN1 connected to base node BASE and wallet daemon WALLET_D + Given a seed validator node VN2 connected to base node BASE and wallet daemon WALLET_D + Given a seed validator node VN3 connected to base node BASE and wallet daemon WALLET_D + Given a seed validator node VN4 connected to base node BASE and wallet daemon WALLET_D + Given a seed validator node VN5 connected to base node BASE and wallet daemon WALLET_D + + # Initialize an indexer + Given an indexer IDX connected to base node BASE + # Initialize the wallet daemon + Given a wallet daemon WALLET_D connected to indexer IDX + + When miner MINER mines 9 new blocks + When wallet WALLET has at least 25000 T + When validator node VN1 sends a registration transaction to base wallet WALLET + When validator node VN2 sends a registration transaction to base wallet WALLET + When validator node VN3 sends a registration transaction to base wallet WALLET + When validator node VN4 sends a registration transaction to base wallet WALLET + When validator node VN5 sends a registration transaction to base wallet WALLET + + When miner MINER mines 26 new blocks + Then all validators have scanned to height 32 + And indexer IDX has scanned to height 32 + Then all validator nodes are listed as registered + + When indexer IDX connects to all other validators + + When all validator nodes have started epoch 3 + + Then I stop validator node VN5 + + # Submit some transactions to speed up block production + Then I create an account ACC_1 via the wallet daemon WALLET_D with 10000 free coins + Then I create an account ACC_2 via the wallet daemon WALLET_D with 10000 free coins + Then I create an account ACC_3 via the wallet daemon WALLET_D with 10000 free coins + Then I create an account ACC_4 via the wallet daemon WALLET_D with 10000 free coins + Then I create an account ACC_5 via the wallet daemon WALLET_D with 10000 free coins + + Then I wait for VN1 to list VN5 as evicted in EVICT_PROOF + Then I submit the eviction proof EVICT_PROOF to WALLET + + When miner MINER mines 10 new blocks + Then all validators have scanned to height 42 + # fixme: flaky +# When all validator nodes have started epoch 4 +# When miner MINER mines 10 new blocks +# Then all validators have scanned to height 52 +# When all validator nodes have started epoch 5 +# Then validator VN5 is not a member of the current network according to BASE diff --git a/integration_tests/tests/steps/indexer.rs b/integration_tests/tests/steps/indexer.rs index 8936eb82b..437b61ff1 100644 --- a/integration_tests/tests/steps/indexer.rs +++ b/integration_tests/tests/steps/indexer.rs @@ -18,12 +18,15 @@ use tari_indexer_client::types::AddPeerRequest; #[when(expr = "indexer {word} connects to all other validators")] async fn given_validator_connects_to_other_vns(world: &mut TariWorld, name: String) { let indexer = world.get_indexer(&name); - let details = world.all_validators_iter().filter(|vn| vn.name != name).map(|vn| { - ( - vn.public_key.clone(), - Multiaddr::from_str(&format!("/ip4/127.0.0.1/tcp/{}", vn.port)).unwrap(), - ) - }); + let details = world + .all_running_validators_iter() + .filter(|vn| vn.name != name) + .map(|vn| { + ( + vn.public_key.clone(), + Multiaddr::from_str(&format!("/ip4/127.0.0.1/tcp/{}", vn.port)).unwrap(), + ) + }); let mut cli = indexer.get_jrpc_indexer_client(); for (pk, addr) in details { diff --git a/integration_tests/tests/steps/validator_node.rs b/integration_tests/tests/steps/validator_node.rs index 85f3f4ece..4c50b97d1 100644 --- a/integration_tests/tests/steps/validator_node.rs +++ b/integration_tests/tests/steps/validator_node.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: BSD-3-Clause use std::{ + fs, str::FromStr, time::{Duration, Instant}, }; @@ -16,11 +17,14 @@ use integration_tests::{ }; use libp2p::Multiaddr; use minotari_app_grpc::tari_rpc::{RegisterValidatorNodeRequest, Signature}; +use notify::Watcher; use tari_base_node_client::{grpc::GrpcBaseNodeClient, BaseNodeClient}; use tari_crypto::tari_utilities::ByteArray; -use tari_dan_common_types::{Epoch, SubstateAddress}; +use tari_dan_common_types::{layer_one_transaction::LayerOneTransactionDef, Epoch, SubstateAddress}; use tari_engine_types::substate::SubstateId; +use tari_sidechain::EvictionProof; use tari_validator_node_client::types::{AddPeerRequest, GetBlocksRequest, GetStateRequest, GetTemplateRequest}; +use tokio::{sync::mpsc, time::timeout}; #[given(expr = "a validator node {word} connected to base node {word} and wallet daemon {word}")] async fn start_validator_node(world: &mut TariWorld, vn_name: String, bn_name: String, wallet_daemon_name: String) { @@ -119,7 +123,7 @@ async fn start_multiple_validator_nodes(world: &mut TariWorld, num_nodes: u64, b #[given(expr = "validator {word} nodes connect to all other validators")] async fn given_validator_connects_to_other_vns(world: &mut TariWorld, name: String) { let details = world - .all_validators_iter() + .all_running_validators_iter() .filter(|vn| vn.name != name) .map(|vn| { ( @@ -205,7 +209,7 @@ async fn register_template(world: &mut TariWorld, wallet_name: String, template_ #[then(expr = "all validator nodes are listed as registered")] async fn assert_all_vns_are_registered(world: &mut TariWorld) { - for vn_ps in world.all_validators_iter() { + for vn_ps in world.all_running_validators_iter() { // create a base node client let base_node_grpc_port = vn_ps.base_node_grpc_port; let mut base_node_client: GrpcBaseNodeClient = get_base_node_client(base_node_grpc_port); @@ -299,7 +303,7 @@ async fn assert_template_is_registered_by_all(world: &mut TariWorld, template_na // try to get the template for each VN let timer = Instant::now(); 'outer: loop { - for vn_ps in world.all_validators_iter() { + for vn_ps in world.all_running_validators_iter() { let mut client = vn_ps.get_client(); let req = GetTemplateRequest { template_address }; let resp = client.get_template(req).await.ok(); @@ -411,9 +415,11 @@ async fn vn_has_scanned_to_height(world: &mut TariWorld, vn_name: String, block_ } #[then(expr = "all validators have scanned to height {int}")] +#[when(expr = "all validators have scanned to height {int}")] async fn all_vns_have_scanned_to_height(world: &mut TariWorld, block_height: u64) { let all_names = world - .all_validators_iter() + .all_running_validators_iter() + .filter(|vn| !vn.handle.is_finished()) .map(|vn| vn.name.clone()) .collect::>(); for vn in all_names { @@ -530,3 +536,108 @@ async fn then_validator_node_switches_epoch(world: &mut TariWorld, vn_name: Stri } panic!("Validator node {vn_name} did not switch to epoch {epoch}"); } + +#[then(expr = "I wait for {word} to list {word} as evicted in {word}")] +async fn then_i_wait_for_validator_node_to_be_evicted( + world: &mut TariWorld, + vn_name: String, + evict_vn_name: String, + proof_name: String, +) { + let vn = world.get_validator_node(&vn_name); + let evict_vn = world.get_validator_node(&evict_vn_name); + + let (tx, mut rx) = mpsc::channel(1); + let l1_tx_path = vn.layer_one_transaction_path(); + fs::create_dir_all(&l1_tx_path).unwrap(); + + let mut watcher = notify::RecommendedWatcher::new( + move |res| { + tx.blocking_send(res).unwrap(); + }, + notify::Config::default(), + ) + .unwrap(); + + watcher.watch(&l1_tx_path, notify::RecursiveMode::NonRecursive).unwrap(); + + loop { + let event = timeout(Duration::from_secs(2000), rx.recv()) + .await + .unwrap_or_else(|_| panic!("Timeout waiting for eviction file at path {}", l1_tx_path.display())) + .expect("unexpected channel close") + .unwrap_or_else(|err| panic!("Error when watching files {err}")); + + if let notify::Event { + kind: notify::EventKind::Access(notify::event::AccessKind::Close(notify::event::AccessMode::Write)), + paths, + .. + } = event + { + if let Some(json_file) = paths + .into_iter() + .find(|p| p.extension().is_some_and(|ext| ext == "json") && p.is_file()) + { + eprintln!("🗒️ Found file: {}", json_file.display()); + let contents = fs::read(json_file).expect("Could not read file"); + let transaction_def = match serde_json::from_slice::>(&contents) { + Ok(def) => def, + Err(err) => { + eprintln!("Error deserializing eviction proof: {}", err); + continue; + }, + }; + if *transaction_def.payload.node_to_evict() != evict_vn.public_key { + panic!( + "Got an eviction proof for public key {}, however this did not match the public key of \ + validator {evict_vn_name}", + transaction_def.payload.node_to_evict() + ); + } + watcher.unwatch(&l1_tx_path).unwrap(); + world.add_eviction_proof(proof_name.clone(), transaction_def.payload); + break; + } + } + } +} + +#[when(expr = "all validator nodes have started epoch {int}")] +async fn all_validators_have_started_epoch(world: &mut TariWorld, epoch: u64) { + let mut remaining_attempts = 60; + for vn in world.all_running_validators_iter().cycle() { + let mut client = vn.create_client(); + let status = client.get_consensus_status().await.unwrap(); + if status.epoch.as_u64() >= epoch { + println!( + "Validator {} has started epoch {} (consensus state {}, height {})", + vn.name, epoch, status.state, status.height + ); + return; + } + if remaining_attempts == 0 { + panic!( + "Validator {} did not start epoch {} (at epoch: {}, status: {})", + vn.name, epoch, status.epoch, status.state + ); + } + remaining_attempts -= 1; + tokio::time::sleep(Duration::from_secs(1)).await; + } +} + +#[then(expr = "validator {word} is not a member of the current network according to {word}")] +async fn validator_not_member_of_network(world: &mut TariWorld, validator: String, base_node: String) { + let bn = world.get_base_node(&base_node); + let vn = world.get_validator_node(&validator); + let mut client = bn.create_client(); + let tip = client.get_tip_info().await.unwrap(); + let vns = client.get_validator_nodes(tip.height_of_longest_chain).await.unwrap(); + let has_vn = vns.iter().any(|v| v.public_key == vn.public_key); + if has_vn { + panic!( + "Validator {} is a member of the network but expected it not to be", + validator + ); + } +} diff --git a/integration_tests/tests/steps/wallet.rs b/integration_tests/tests/steps/wallet.rs index 9353fb389..20f1b285c 100644 --- a/integration_tests/tests/steps/wallet.rs +++ b/integration_tests/tests/steps/wallet.rs @@ -3,8 +3,8 @@ use std::time::Duration; -use cucumber::{given, when}; -use minotari_app_grpc::tari_rpc::{GetBalanceRequest, ValidateRequest}; +use cucumber::{given, then, when}; +use minotari_app_grpc::tari_rpc::{GetBalanceRequest, SubmitValidatorEvictionProofRequest, ValidateRequest}; use tari_common_types::types::{Commitment, PrivateKey, PublicKey}; use tari_crypto::{ristretto::RistrettoComSig, tari_utilities::ByteArray}; use tokio::time::sleep; @@ -108,3 +108,23 @@ pub async fn check_balance(world: &mut TariWorld, wallet_name: String, balance: iterations += 1; } } + +#[then(expr = "I submit the eviction proof {word} to {word}")] +#[when(expr = "I submit the eviction proof {word} to {word}")] +pub async fn submit_eviction(world: &mut TariWorld, eviction_name: String, wallet_name: String) { + let eviction = world + .eviction_proofs + .get(&eviction_name) + .unwrap_or_else(|| panic!("Eviction proof {} not found", eviction_name)); + let wallet = world.get_wallet(&wallet_name); + let mut client = wallet.create_client().await; + client + .submit_validator_eviction_proof(SubmitValidatorEvictionProofRequest { + proof: Some(eviction.into()), + fee_per_gram: 1, + message: "Eviction proof in cucumber".to_string(), + sidechain_deployment_key: vec![], + }) + .await + .unwrap(); +} diff --git a/integration_tests/tests/steps/wallet_daemon.rs b/integration_tests/tests/steps/wallet_daemon.rs index 5af18fd4b..486515b47 100644 --- a/integration_tests/tests/steps/wallet_daemon.rs +++ b/integration_tests/tests/steps/wallet_daemon.rs @@ -165,6 +165,7 @@ async fn when_i_create_transfer_proof_via_wallet_daemon( .await; } +#[then(expr = "I create an account {word} via the wallet daemon {word}")] #[when(expr = "I create an account {word} via the wallet daemon {word}")] async fn when_i_create_account_via_wallet_daemon( world: &mut TariWorld, @@ -174,6 +175,7 @@ async fn when_i_create_account_via_wallet_daemon( wallet_daemon_cli::create_account(world, account_name, wallet_daemon_name).await; } +#[then(expr = "I create an account {word} via the wallet daemon {word} with {int} free coins")] #[when(expr = "I create an account {word} via the wallet daemon {word} with {int} free coins")] async fn when_i_create_account_via_wallet_daemon_with_free_coins( world: &mut TariWorld, @@ -192,6 +194,7 @@ async fn when_i_create_a_wallet_key(world: &mut TariWorld, key_name: String, wal world.wallet_keys.insert(key_name, key.id); } +#[then(expr = "I create an account {word} via the wallet daemon {word} with {int} free coins using key {word}")] #[when(expr = "I create an account {word} via the wallet daemon {word} with {int} free coins using key {word}")] async fn when_i_create_account_via_wallet_daemon_with_free_coins_using_key( world: &mut TariWorld, diff --git a/networking/core/src/peer.rs b/networking/core/src/peer.rs index e2cd88375..629f08867 100644 --- a/networking/core/src/peer.rs +++ b/networking/core/src/peer.rs @@ -36,8 +36,8 @@ impl std::fmt::Display for PeerInfo { Ok(()) } } -fn print_key_value(k: &str, v: &V, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "{k}: {v:?}") +fn print_key_value(k: &str, v: &V, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "{k}: {v}") } fn print_key(k: &str, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "{k}:") diff --git a/networking/core/src/worker.rs b/networking/core/src/worker.rs index b14ea2035..174172aec 100644 --- a/networking/core/src/worker.rs +++ b/networking/core/src/worker.rs @@ -432,7 +432,7 @@ where error, .. } => { - warn!(target: LOG_TARGET, "🚨 Outgoing connection error: peer_id={}, error={}", peer_id, error); + debug!(target: LOG_TARGET, "🚨 Outgoing connection error: peer_id={}, error={}", peer_id, error); let Some(waiters) = self.pending_dial_requests.remove(&peer_id) else { debug!(target: LOG_TARGET, "No pending dial requests initiated by this service for peer {}", peer_id); return Ok(()); @@ -823,7 +823,7 @@ where if is_connected_through_relay { info!(target: LOG_TARGET, "📡 Peer {} has a p2p-circuit address. Upgrading to DCUtR", peer_id); // Ignore as connection failures are logged in events, or an error here is because the peer is - // already connected/being dialled + // already connected/being dialed let _ignore = self .swarm .dial(DialOpts::peer_id(peer_id).addresses(vec![address.clone()]).build()); @@ -840,7 +840,7 @@ where } } - // If this peer is the selected relay that was dialled previously, listen on the circuit address + // If this peer is the selected relay that was dialed previously, listen on the circuit address // Note we only select a relay if autonat says we are not publicly accessible. if is_relay { self.establish_relay_circuit_on_connect(&peer_id, connection_id); @@ -872,7 +872,7 @@ where return false; }; - // If the peer we've connected with is the selected relay that we previously dialled, then continue + // If the peer we've connected with is the selected relay that we previously dialed, then continue if relay.peer_id != *peer_id { return false; } @@ -883,18 +883,19 @@ where } // Check if we've got a confirmed address for the relay - let Some(dialled_address) = relay.remote_address.as_ref() else { + let Some(remote_address) = relay.remote_address.as_ref() else { return false; }; - let circuit_addr = dialled_address.clone().with(Protocol::P2pCircuit); + let circuit_addr = remote_address.clone().with(Protocol::P2pCircuit); match self.swarm.listen_on(circuit_addr.clone()) { Ok(id) => { + let local_peer_id = *self.swarm.local_peer_id(); self.swarm .behaviour_mut() .peer_sync - .add_known_local_public_addresses(vec![circuit_addr]); + .add_known_local_public_addresses(vec![circuit_addr.with(Protocol::P2p(local_peer_id))]); info!(target: LOG_TARGET, "🌍️ Peer {peer_id} is a relay. Listening (id={id:?}) for circuit connections"); let Some(relay_mut) = self.relays.selected_relay_mut() else { // unreachable