diff --git a/.github/workflows/base_node_binaries.json b/.github/workflows/base_node_binaries.json index 6c2a8bcac2..c636da8df7 100644 --- a/.github/workflows/base_node_binaries.json +++ b/.github/workflows/base_node_binaries.json @@ -2,7 +2,7 @@ { "name": "linux-x86_64", "runs-on": "ubuntu-20.04", - "rust": "nightly-2023-12-12", + "rust": "nightly-2024-02-04", "target": "x86_64-unknown-linux-gnu", "cross": false }, diff --git a/.github/workflows/base_node_binaries.yml b/.github/workflows/base_node_binaries.yml index afa35b7181..a54bf213d3 100644 --- a/.github/workflows/base_node_binaries.yml +++ b/.github/workflows/base_node_binaries.yml @@ -26,7 +26,7 @@ env: TBN_FEATURES: "default, safe" TBN_LIBRARIES: "minotari_mining_helper_ffi" TARI_NETWORK_DIR: testnet - toolchain: nightly-2023-12-12 + toolchain: nightly-2024-02-04 matrix-json-file: ".github/workflows/base_node_binaries.json" CARGO_HTTP_MULTIPLEXING: false CARGO_UNSTABLE_SPARSE_REGISTRY: true @@ -130,7 +130,9 @@ jobs: run: | echo "VBRANCH=${{ github.ref_name }}" >> $GITHUB_ENV echo "VSHA_SHORT=$(git rev-parse --short HEAD)" >> $GITHUB_ENV - TARI_VERSION=$(awk -F ' = ' '$1 ~ /version/ { gsub(/["]/, "", $2); printf("%s",$2) }' "$GITHUB_WORKSPACE/applications/minotari_node/Cargo.toml") + TARI_VERSION=$(awk -F ' = ' '$1 ~ /^version/ \ + { gsub(/["]/, "", $2); printf("%s",$2) }' \ + "$GITHUB_WORKSPACE/applications/minotari_node/Cargo.toml") echo "TARI_VERSION=${TARI_VERSION}" >> $GITHUB_ENV echo "TARI_VERSION=${TARI_VERSION}" >> $GITHUB_OUTPUT if [[ "${{ matrix.builds.features }}" == "" ]]; then diff --git a/.github/workflows/build_dockers.yml b/.github/workflows/build_dockers.yml index dd176540c7..001881331b 100644 --- a/.github/workflows/build_dockers.yml +++ b/.github/workflows/build_dockers.yml @@ -48,7 +48,7 @@ name: Build docker images - xmrig env: - toolchain_default: nightly-2023-12-12 + toolchain_default: nightly-2024-02-04 concurrency: # https://docs.github.com/en/actions/examples/using-concurrency-expressions-and-a-test-matrix diff --git a/.github/workflows/build_dockers_workflow.yml b/.github/workflows/build_dockers_workflow.yml index 46b954c70a..c24e98e779 100644 --- a/.github/workflows/build_dockers_workflow.yml +++ b/.github/workflows/build_dockers_workflow.yml @@ -14,7 +14,7 @@ name: Build docker images - workflow_call/on-demand toolchain: type: string description: 'Rust toolchain' - default: nightly-2023-12-12 + default: nightly-2024-02-04 arch: type: string default: x86-64 @@ -134,10 +134,9 @@ jobs: if [ -z "${{ inputs.version }}" ] ; then echo "Get tari version" TARI_SOURCE_ROOT="tari/" - VAPP=$(awk -F ' = ' \ - '$1 ~ /version/ { gsub(/["]/, "", $2); printf("%s",$2) }' \ + VAPP=$(awk -F ' = ' '$1 ~ /^version/ \ + { gsub(/["]/, "", $2); printf("%s",$2) }' \ "${TARI_SOURCE_ROOT}/applications/minotari_node/Cargo.toml") - VBRANCH=$(git --git-dir ${TARI_SOURCE_ROOT}/.git branch --show-current) VSHA_SHORT=$(git --git-dir ${TARI_SOURCE_ROOT}/.git rev-parse --short HEAD) VERSION="v${VAPP}_${VBRANCH}_$(date -u '+%Y%m%d')_${VSHA_SHORT}" diff --git a/.github/workflows/build_libffis.yml b/.github/workflows/build_libffis.yml index edbcfcc8b5..dcb47def4a 100644 --- a/.github/workflows/build_libffis.yml +++ b/.github/workflows/build_libffis.yml @@ -50,6 +50,7 @@ jobs: ## build only single target image # matrix_selection=$( jq -c '.[] | select( ."runs-on" == "ubuntu-latest" )' ${{ env.matrix-json-file }} ) # matrix_selection=$( jq -c '.[] | select( ."target" == "x86_64-linux-android" )' ${{ env.matrix-json-file }} ) + # matrix_selection=$( jq -c '.[] | select( ."target" | contains("android") )' ${{ env.matrix-json-file }} ) # ## buid select target images - build_enabled matrix_selection=$( jq -c '.[] | select( ."build_enabled" != false )' ${{ env.matrix-json-file }} ) @@ -225,9 +226,12 @@ jobs: path: ${{ runner.temp }}/lib${{ matrix.libffis }}-${{ env.TARGET_PLATFORM }}-${{ env.TARGET_ARCH }}${{ env.TARGET_SIM }} ios_assemble: - name: Assemble iOS multiArch for ${{ matrix.libffis }}" + name: Assemble iOS multiArch for ${{ matrix.libffis }} + # Disable iOS Assembly workflow + #if: ${{ false }} + #continue-on-error: true - # Limits to only iOS builds + # Limits to only iOS builds? runs-on: macos-latest needs: [matrix-prep, builds] @@ -353,9 +357,11 @@ jobs: create_release: name: Create release for ffi libraries + if: ${{ startsWith(github.ref, 'refs/tags/v') }} + runs-on: ubuntu-latest needs: [matrix-prep, builds, ios_assemble] - if: ${{ startsWith(github.ref, 'refs/tags/v') }} + steps: - name: Download all ffi libraries uses: actions/download-artifact@v4 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 08515cf7c8..166f8d432c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,7 +11,7 @@ name: CI merge_group: env: - toolchain: nightly-2023-12-12 + toolchain: nightly-2024-02-04 CARGO_HTTP_MULTIPLEXING: false CARGO_TERM_COLOR: always CARGO_UNSTABLE_SPARSE_REGISTRY: true diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index fd3a740fb2..39ad2e4b91 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -11,7 +11,7 @@ name: Source Coverage - ci-coverage-* env: - toolchain: nightly-2023-12-12 + toolchain: nightly-2024-02-04 concurrency: group: ${{ github.workflow }}-${{ github.ref }} diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index 6c15cf1ea3..57ff669092 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -27,7 +27,7 @@ name: Integration tests type: string env: - toolchain: nightly-2023-12-12 + toolchain: nightly-2024-02-04 concurrency: group: ${{ github.workflow }}-${{ github.ref }} diff --git a/Cargo.lock b/Cargo.lock index 1524bbe225..d985569569 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -54,13 +54,15 @@ dependencies = [ [[package]] name = "ahash" -version = "0.7.7" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ + "cfg-if", "getrandom", "once_cell", "version_check", + "zerocopy", ] [[package]] @@ -209,15 +211,6 @@ dependencies = [ "syn 2.0.38", ] -[[package]] -name = "atomic-polyfill" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4" -dependencies = [ - "critical-section", -] - [[package]] name = "atty" version = "0.2.14" @@ -892,11 +885,12 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "config" -version = "0.13.3" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7" +checksum = "7328b20597b53c2454f0b1919720c25c7339051c02b72b7e05409e00b14132be" dependencies = [ "async-trait", + "convert_case 0.6.0", "json5", "lazy_static", "nom", @@ -905,7 +899,7 @@ dependencies = [ "rust-ini", "serde", "serde_json", - "toml 0.5.11", + "toml 0.8.8", "yaml-rust", ] @@ -964,12 +958,41 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom", + "once_cell", + "tiny-keccak", +] + [[package]] name = "convert_case" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" version = "0.9.3" @@ -1234,6 +1257,29 @@ dependencies = [ "typenum", ] +[[package]] +name = "cssparser" +version = "0.31.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b3df4f93e5fbbe73ec01ec8d3f68bba73107993a5b1e7519273c32db9b0d5be" +dependencies = [ + "cssparser-macros", + "dtoa-short", + "itoa", + "phf 0.11.2", + "smallvec", +] + +[[package]] +name = "cssparser-macros" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13b588ba4ac1a99f7f2964d24b3d896ddc6bf847ee3855dbd4366f058cfcd331" +dependencies = [ + "quote", + "syn 2.0.38", +] + [[package]] name = "csv" version = "1.3.0" @@ -1491,7 +1537,7 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "convert_case", + "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version", @@ -1629,9 +1675,12 @@ dependencies = [ [[package]] name = "dlv-list" -version = "0.3.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" +checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" +dependencies = [ + "const-random", +] [[package]] name = "doc-comment" @@ -1645,6 +1694,21 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "669a445ee724c5c69b1b06fe0b63e70a1c84bc9bb7d9696cd4f4e3ec45050408" +[[package]] +name = "dtoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" + +[[package]] +name = "dtoa-short" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbaceec3c6e4211c79e7b1800fb9680527106beb2f9c51904a3210c03a448c74" +dependencies = [ + "dtoa", +] + [[package]] name = "ecdsa" version = "0.16.8" @@ -1682,6 +1746,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ego-tree" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a68a4904193147e0a8dec3314640e6db742afd5f6e634f428a6af230d9b3591" + [[package]] name = "either" version = "1.9.0" @@ -1939,6 +2009,16 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" +[[package]] +name = "futf" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df420e2e84819663797d1ec6544b13c5be84629e7bb00dc960d6917db2987843" +dependencies = [ + "mac", + "new_debug_unreachable", +] + [[package]] name = "futures" version = "0.1.31" @@ -2052,6 +2132,15 @@ dependencies = [ "slab", ] +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -2063,6 +2152,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "getopts" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5" +dependencies = [ + "unicode-width", +] + [[package]] name = "getrandom" version = "0.2.10" @@ -2157,9 +2255,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes 1.5.0", "fnv", @@ -2185,9 +2283,12 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash", -] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" [[package]] name = "hashbrown" @@ -2319,6 +2420,20 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "html5ever" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bea68cab48b8459f17cf1c944c67ddc572d272d9f2b274140f223ecb1da4a3b7" +dependencies = [ + "log", + "mac", + "markup5ever", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "http" version = "0.2.9" @@ -2950,6 +3065,26 @@ dependencies = [ "winapi", ] +[[package]] +name = "mac" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4" + +[[package]] +name = "markup5ever" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2629bb1404f3d34c2e921f21fd34ba00b206124c81f65c50b43b6aaefeb016" +dependencies = [ + "log", + "phf 0.10.1", + "phf_codegen", + "string_cache", + "string_cache_codegen", + "tendril", +] + [[package]] name = "matchers" version = "0.1.0" @@ -3215,12 +3350,14 @@ dependencies = [ "hyper", "jsonrpc", "log", + "markup5ever", "minotari_app_grpc", "minotari_app_utilities", "minotari_node_grpc_client", "minotari_wallet_grpc_client", "monero", "reqwest", + "scraper", "serde", "serde_json", "tari_common", @@ -3327,6 +3464,7 @@ dependencies = [ "tari_core", "tari_crypto", "tari_features", + "tari_key_manager", "tari_libtor", "tari_metrics", "tari_p2p", @@ -3567,6 +3705,12 @@ dependencies = [ "tempfile", ] +[[package]] +name = "new_debug_unreachable" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" + [[package]] name = "newtype-ops" version = "0.1.4" @@ -3761,12 +3905,12 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" dependencies = [ - "atomic-polyfill", "critical-section", + "portable-atomic", ] [[package]] @@ -3846,12 +3990,12 @@ dependencies = [ [[package]] name = "ordered-multimap" -version = "0.4.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" +checksum = "4ed8acf08e98e744e5384c8bc63ceb0364e68a6854187221c18df61c4797690e" dependencies = [ "dlv-list", - "hashbrown 0.12.3", + "hashbrown 0.13.2", ] [[package]] @@ -4150,6 +4294,86 @@ dependencies = [ "zeroize", ] +[[package]] +name = "phf" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fabbf1ead8a5bcbc20f5f8b939ee3f5b0f6f281b6ad3468b84656b658b455259" +dependencies = [ + "phf_shared 0.10.0", +] + +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_macros", + "phf_shared 0.11.2", +] + +[[package]] +name = "phf_codegen" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb1c3a8bc4dd4e5cfce29b44ffc14bedd2ee294559a294e2a4d4c9e9a6a13cd" +dependencies = [ + "phf_generator 0.10.0", + "phf_shared 0.10.0", +] + +[[package]] +name = "phf_generator" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6" +dependencies = [ + "phf_shared 0.10.0", + "rand", +] + +[[package]] +name = "phf_generator" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +dependencies = [ + "phf_shared 0.11.2", + "rand", +] + +[[package]] +name = "phf_macros" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +dependencies = [ + "phf_generator 0.11.2", + "phf_shared 0.11.2", + "proc-macro2", + "quote", + "syn 2.0.38", +] + +[[package]] +name = "phf_shared" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" +dependencies = [ + "siphasher", +] + +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "0.4.30" @@ -4286,6 +4510,12 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "portable-atomic" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" + [[package]] name = "powerfmt" version = "0.2.0" @@ -4298,6 +4528,12 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "precomputed-hash" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" + [[package]] name = "prettyplease" version = "0.1.25" @@ -4774,13 +5010,14 @@ dependencies = [ [[package]] name = "ron" -version = "0.7.1" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" +checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ - "base64 0.13.1", - "bitflags 1.3.2", + "base64 0.21.5", + "bitflags 2.4.1", "serde", + "serde_derive", ] [[package]] @@ -4815,9 +5052,9 @@ dependencies = [ [[package]] name = "rust-ini" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df" +checksum = "7e2a3bcec1f113553ef1c88aae6c020a369d03d55b58de9869a0908930385091" dependencies = [ "cfg-if", "ordered-multimap", @@ -4990,6 +5227,22 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "scraper" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b80b33679ff7a0ea53d37f3b39de77ea0c75b12c5805ac43ec0c33b3051af1b" +dependencies = [ + "ahash", + "cssparser", + "ego-tree", + "getopts", + "html5ever", + "once_cell", + "selectors", + "tendril", +] + [[package]] name = "sct" version = "0.7.1" @@ -5049,6 +5302,25 @@ dependencies = [ "libc", ] +[[package]] +name = "selectors" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4eb30575f3638fc8f6815f448d50cb1a2e255b0897985c8c59f4d37b72a07b06" +dependencies = [ + "bitflags 2.4.1", + "cssparser", + "derive_more", + "fxhash", + "log", + "new_debug_unreachable", + "phf 0.10.1", + "phf_codegen", + "precomputed-hash", + "servo_arc", + "smallvec", +] + [[package]] name = "semver" version = "1.0.20" @@ -5141,6 +5413,15 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "servo_arc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d036d71a959e00c77a63538b90a6c2390969f9772b096ea837205c6bd0491a44" +dependencies = [ + "stable_deref_trait", +] + [[package]] name = "sha1" version = "0.6.0" @@ -5248,6 +5529,12 @@ dependencies = [ "rand_core", ] +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + [[package]] name = "slab" version = "0.4.9" @@ -5360,6 +5647,12 @@ dependencies = [ "der", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "stack-buf" version = "0.1.6" @@ -5378,6 +5671,32 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e08d8363704e6c71fc928674353e6b7c23dcea9d82d7012c8faf2a3a025f8d0" +[[package]] +name = "string_cache" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" +dependencies = [ + "new_debug_unreachable", + "once_cell", + "parking_lot 0.12.1", + "phf_shared 0.10.0", + "precomputed-hash", + "serde", +] + +[[package]] +name = "string_cache_codegen" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bb30289b722be4ff74a408c3cc27edeaad656e06cb1fe8fa9231fa59c728988" +dependencies = [ + "phf_generator 0.10.0", + "phf_shared 0.10.0", + "proc-macro2", + "quote", +] + [[package]] name = "strip-ansi-escapes" version = "0.2.0" @@ -6222,6 +6541,17 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "tendril" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d24a120c5fc464a3458240ee02c299ebcb9d67b5249c8848b09d639dca8d7bb0" +dependencies = [ + "futf", + "mac", + "utf-8", +] + [[package]] name = "termcolor" version = "1.3.0" @@ -6487,6 +6817,18 @@ dependencies = [ "toml_edit 0.19.15", ] +[[package]] +name = "toml" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.21.0", +] + [[package]] name = "toml_datetime" version = "0.6.5" @@ -6520,6 +6862,19 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" +dependencies = [ + "indexmap 2.1.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" version = "0.8.3" @@ -6942,6 +7297,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + [[package]] name = "utf8parse" version = "0.2.1" @@ -7457,6 +7818,26 @@ dependencies = [ "time", ] +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.38", +] + [[package]] name = "zeroize" version = "1.6.0" diff --git a/Cross.toml b/Cross.toml index 1dbd46498b..3b82aed0c3 100644 --- a/Cross.toml +++ b/Cross.toml @@ -14,6 +14,8 @@ passthrough = [ "FEATURES", "ROARING_ARCH", "TARI_NETWORK", + "TARI_TARGET_NETWORK", + "TARI_NETWORK_DIR", ] # Don't forget export: diff --git a/applications/minotari_app_grpc/proto/base_node.proto b/applications/minotari_app_grpc/proto/base_node.proto index bb0d6032eb..2c3b9cfd3f 100644 --- a/applications/minotari_app_grpc/proto/base_node.proto +++ b/applications/minotari_app_grpc/proto/base_node.proto @@ -57,6 +57,10 @@ service BaseNode { rpc GetNewBlockTemplate(NewBlockTemplateRequest) returns (NewBlockTemplateResponse); // Construct a new block from a provided template rpc GetNewBlock(NewBlockTemplate) returns (GetNewBlockResult); + // Construct a new block from a provided template + rpc GetNewBlockWithCoinbases(GetNewBlockWithCoinbasesRequest) returns (GetNewBlockResult); + // Construct a new block from a provided template + rpc GetNewBlockTemplateWithCoinbases(GetNewBlockTemplateWithCoinbasesRequest) returns (GetNewBlockResult); // Construct a new block and header blob from a provided template rpc GetNewBlockBlob(NewBlockTemplate) returns (GetNewBlockBlobResult); // Submit a new block for propagation @@ -182,6 +186,28 @@ message NewBlockTemplateRequest{ uint64 max_weight = 2; } +/// return type of NewBlockTemplateRequest +message GetNewBlockTemplateWithCoinbasesRequest{ + PowAlgo algo = 1; + //This field should be moved to optional once optional keyword is standard + uint64 max_weight = 2; + repeated NewBlockCoinbase coinbases = 3; +} + +/// request type of GetNewBlockWithCoinbasesRequest +message GetNewBlockWithCoinbasesRequest{ + NewBlockTemplate new_template = 1; + repeated NewBlockCoinbase coinbases = 2; +} + +message NewBlockCoinbase{ + string address = 1; + uint64 value = 2; + bool stealth_payment= 3; + bool revealed_value_proof= 4; + bytes coinbase_extra =5; +} + // Network difficulty response message NetworkDifficultyResponse { uint64 difficulty = 1; @@ -348,6 +374,7 @@ message GetNewBlockResult{ Block block = 2; bytes merge_mining_hash =3; bytes tari_unique_id =4; + MinerData miner_data = 5; } // This is the message that is returned for a miner after it asks for a new block. diff --git a/applications/minotari_console_wallet/Cargo.toml b/applications/minotari_console_wallet/Cargo.toml index e22e709f68..f4c66935a5 100644 --- a/applications/minotari_console_wallet/Cargo.toml +++ b/applications/minotari_console_wallet/Cargo.toml @@ -33,7 +33,7 @@ tokio = { version = "1.36", features = ["signal"] } blake2 = "0.10" chrono = { version = "0.4.19", default-features = false } clap = { version = "3.2", features = ["derive", "env"] } -config = "0.13.0" +config = "0.14.0" crossterm = { version = "0.25.0" } digest = "0.10" futures = { version = "^0.3.16", default-features = false, features = [ diff --git a/applications/minotari_console_wallet/src/ui/mod.rs b/applications/minotari_console_wallet/src/ui/mod.rs index 93f14443a5..5413c7ee22 100644 --- a/applications/minotari_console_wallet/src/ui/mod.rs +++ b/applications/minotari_console_wallet/src/ui/mod.rs @@ -108,6 +108,7 @@ fn crossterm_loop(mut app: App>) -> Result<(), ExitErro error!(target: LOG_TARGET, "Error drawing interface. {}", e); ExitCode::InterfaceError })?; + #[allow(clippy::blocks_in_conditions)] match events.next().map_err(|e| { error!(target: LOG_TARGET, "Error reading input event: {}", e); ExitCode::InterfaceError diff --git a/applications/minotari_console_wallet/src/ui/state/app_state.rs b/applications/minotari_console_wallet/src/ui/state/app_state.rs index d860286e1e..1ad02af691 100644 --- a/applications/minotari_console_wallet/src/ui/state/app_state.rs +++ b/applications/minotari_console_wallet/src/ui/state/app_state.rs @@ -127,7 +127,7 @@ impl AppState { wallet_connectivity, balance_enquiry_debouncer: BalanceEnquiryDebouncer::new( inner, - Duration::from_secs(5), + wallet_config.balance_enquiry_cooldown_period, output_manager_service, ), wallet_config, diff --git a/applications/minotari_ledger_wallet/Cargo.lock b/applications/minotari_ledger_wallet/Cargo.lock index 75933241a0..17fa7e0271 100644 --- a/applications/minotari_ledger_wallet/Cargo.lock +++ b/applications/minotari_ledger_wallet/Cargo.lock @@ -2,17 +2,6 @@ # It is not intended for manual editing. version = 3 -[[package]] -name = "ahash" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" -dependencies = [ - "cfg-if", - "once_cell", - "version_check", -] - [[package]] name = "atomic-polyfill" version = "1.0.3" @@ -48,47 +37,11 @@ dependencies = [ [[package]] name = "borsh" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" -dependencies = [ - "borsh-derive", - "hashbrown", -] - -[[package]] -name = "borsh-derive" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7" -dependencies = [ - "borsh-derive-internal", - "borsh-schema-derive-internal", - "proc-macro-crate", - "proc-macro2", - "syn 1.0.109", -] - -[[package]] -name = "borsh-derive-internal" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "borsh-schema-derive-internal" -version = "0.10.3" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" +checksum = "4e6cb63579996213e822f6d828b0a47e1d23b1e8708f52d18a6b1af5670dd207" dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", + "cfg_aliases", ] [[package]] @@ -106,6 +59,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "color_quant" version = "1.1.0" @@ -201,15 +160,6 @@ dependencies = [ "weezl", ] -[[package]] -name = "hashbrown" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" -dependencies = [ - "ahash", -] - [[package]] name = "heck" version = "0.4.1" @@ -316,15 +266,6 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" -[[package]] -name = "proc-macro-crate" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" -dependencies = [ - "toml", -] - [[package]] name = "proc-macro2" version = "1.0.66" @@ -374,12 +315,6 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" -[[package]] -name = "serde" -version = "1.0.183" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c" - [[package]] name = "sha3" version = "0.10.8" @@ -487,15 +422,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "toml" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] - [[package]] name = "typenum" version = "1.16.0" diff --git a/applications/minotari_ledger_wallet/Cargo.toml b/applications/minotari_ledger_wallet/Cargo.toml index a3bcd7d58b..adbaeec326 100644 --- a/applications/minotari_ledger_wallet/Cargo.toml +++ b/applications/minotari_ledger_wallet/Cargo.toml @@ -16,7 +16,7 @@ tari_crypto = { version = "0.18", default-features = false } embedded-alloc = "0.5.0" critical-section = { version = "1.1.1" } digest = { version = "0.10", default-features = false } -borsh = { version = "0.10", default-features = false } +borsh = { version = "1.0", default-features = false } blake2 = { version = "0.10", default-features = false } [profile.release] diff --git a/applications/minotari_ledger_wallet/rust-toolchain.toml b/applications/minotari_ledger_wallet/rust-toolchain.toml index 837c756fcb..84247f8a05 100644 --- a/applications/minotari_ledger_wallet/rust-toolchain.toml +++ b/applications/minotari_ledger_wallet/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "nightly-2023-05-17" \ No newline at end of file +channel = "nightly-2024-02-04" \ No newline at end of file diff --git a/applications/minotari_ledger_wallet/rustfmt.toml b/applications/minotari_ledger_wallet/rustfmt.toml index 3bc22cf400..13868eb0c1 100644 --- a/applications/minotari_ledger_wallet/rustfmt.toml +++ b/applications/minotari_ledger_wallet/rustfmt.toml @@ -10,7 +10,7 @@ imports_layout = "HorizontalVertical" imports_granularity = "Crate" match_block_trailing_comma = true max_width = 120 -newline_style = "Native" +newline_style = "Auto" normalize_comments = true overflow_delimited_expr = true reorder_imports = true diff --git a/applications/minotari_merge_mining_proxy/Cargo.toml b/applications/minotari_merge_mining_proxy/Cargo.toml index b7018d09d6..1d4f618deb 100644 --- a/applications/minotari_merge_mining_proxy/Cargo.toml +++ b/applications/minotari_merge_mining_proxy/Cargo.toml @@ -28,7 +28,7 @@ borsh = "1.2" bytes = "1.1" chrono = { version = "0.4.19", default-features = false } clap = { version = "3.2", features = ["derive", "env"] } -config = { version = "0.13.0" } +config = { version = "0.14.0" } crossterm = { version = "0.25.0" } futures = { version = "^0.3.16", features = ["async-await"] } hex = "0.4.2" @@ -44,6 +44,10 @@ tokio = { version = "1.36", features = ["macros"] } tonic = "0.8.3" tracing = "0.1" url = "2.1.1" +scraper = "0.19.0" [build-dependencies] tari_features = { path = "../../common/tari_features", version = "1.0.0-dan.5" } + +[dev-dependencies] +markup5ever = "0.11.0" \ No newline at end of file diff --git a/applications/minotari_merge_mining_proxy/log4rs_sample.yml b/applications/minotari_merge_mining_proxy/log4rs_sample.yml index 9b56009301..7cbebfcec3 100644 --- a/applications/minotari_merge_mining_proxy/log4rs_sample.yml +++ b/applications/minotari_merge_mining_proxy/log4rs_sample.yml @@ -55,4 +55,15 @@ loggers: - stdout - proxy additive: false - \ No newline at end of file + html5ever: + level: error + appenders: + - stdout + - proxy + additive: false + selectors: + level: error + appenders: + - stdout + - proxy + additive: false diff --git a/applications/minotari_merge_mining_proxy/src/block_template_data.rs b/applications/minotari_merge_mining_proxy/src/block_template_data.rs index e0c2ecc52d..fa90b116e2 100644 --- a/applications/minotari_merge_mining_proxy/src/block_template_data.rs +++ b/applications/minotari_merge_mining_proxy/src/block_template_data.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -//! Provides methods for for building template data and storing them with timestamps. +//! Provides methods for building template data and storing them with timestamps. use std::{collections::HashMap, convert::TryFrom, sync::Arc}; diff --git a/applications/minotari_merge_mining_proxy/src/config.rs b/applications/minotari_merge_mining_proxy/src/config.rs index d9418a5554..0f8db4b9a4 100644 --- a/applications/minotari_merge_mining_proxy/src/config.rs +++ b/applications/minotari_merge_mining_proxy/src/config.rs @@ -32,12 +32,23 @@ use tari_common_types::tari_address::TariAddress; use tari_comms::multiaddr::Multiaddr; use tari_core::transactions::transaction_components::RangeProofType; +// The default Monero fail URL for mainnet +const MONERO_FAIL_MAINNET_URL: &str = "https://monero.fail/?chain=monero&network=mainnet&all=true"; + #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] #[allow(clippy::struct_excessive_bools)] pub struct MergeMiningProxyConfig { override_from: Option, - /// URL to monerod + /// Use dynamic monerod URL obtained form the official Monero website (https://monero.fail/) + pub use_dynamic_fail_data: bool, + /// The monero fail URL to get the monerod URLs from - must be pointing to the official Monero website. + /// Valid alternatives are: + /// - mainnet: 'https://monero.fail/?chain=monero&network=mainnet&all=true' + /// - stagenet: `https://monero.fail/?chain=monero&network=stagenet&all=true` + /// - testnet: `https://monero.fail/?chain=monero&network=testnet&all=true` + pub monero_fail_url: String, + /// URL to monerod (you can add your own server here or use public nodes from https://monero.fail/) pub monerod_url: StringList, /// Username for curl pub monerod_username: String, @@ -89,6 +100,8 @@ impl Default for MergeMiningProxyConfig { fn default() -> Self { Self { override_from: None, + use_dynamic_fail_data: true, + monero_fail_url: MONERO_FAIL_MAINNET_URL.into(), monerod_url: StringList::default(), monerod_username: String::new(), monerod_password: String::new(), diff --git a/applications/minotari_merge_mining_proxy/src/error.rs b/applications/minotari_merge_mining_proxy/src/error.rs index fe58c43f3a..afd2d4b56a 100644 --- a/applications/minotari_merge_mining_proxy/src/error.rs +++ b/applications/minotari_merge_mining_proxy/src/error.rs @@ -77,6 +77,8 @@ pub enum MmProxyError { }, #[error("HTTP error: {0}")] HttpError(#[from] hyper::http::Error), + #[error("HTML parse error: {0}")] + HtmlParseError(String), #[error("Could not parse URL: {0}")] UrlParseError(#[from] url::ParseError), #[error("Bincode error: {0}")] diff --git a/applications/minotari_merge_mining_proxy/src/lib.rs b/applications/minotari_merge_mining_proxy/src/lib.rs index 9674852c82..95e975c245 100644 --- a/applications/minotari_merge_mining_proxy/src/lib.rs +++ b/applications/minotari_merge_mining_proxy/src/lib.rs @@ -33,6 +33,7 @@ mod error; mod proxy; mod run_merge_miner; use run_merge_miner::start_merge_miner; +mod monero_fail; pub async fn merge_miner(cli: Cli) -> Result<(), anyhow::Error> { start_merge_miner(cli).await diff --git a/applications/minotari_merge_mining_proxy/src/main.rs b/applications/minotari_merge_mining_proxy/src/main.rs index da4bf48e13..30c1943a74 100644 --- a/applications/minotari_merge_mining_proxy/src/main.rs +++ b/applications/minotari_merge_mining_proxy/src/main.rs @@ -28,6 +28,7 @@ mod cli; mod common; mod config; mod error; +mod monero_fail; mod proxy; mod run_merge_miner; diff --git a/applications/minotari_merge_mining_proxy/src/monero_fail.rs b/applications/minotari_merge_mining_proxy/src/monero_fail.rs new file mode 100644 index 0000000000..1065ce1c80 --- /dev/null +++ b/applications/minotari_merge_mining_proxy/src/monero_fail.rs @@ -0,0 +1,372 @@ +// Copyright 2020, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::time::Duration; + +use log::*; +use scraper::{Html, Selector}; +use tokio::time::timeout; +use url::Url; + +use crate::error::MmProxyError; + +const LOG_TARGET: &str = "minotari_mm_proxy::monero_detect"; + +/// Monero public server information +#[derive(Debug)] +pub struct MonerodEntry { + /// The type of address + pub address_type: String, + /// The URL of the server + pub url: String, + /// The monero blockchain height reported by the server + pub height: u64, + /// Whether the server is currently up + pub up: bool, + /// Whether the server is web compatible + pub web_compatible: bool, + /// The network the server is on (mainnet, stagenet, testnet) + pub network: String, + /// Time since the server was checked + pub last_checked: String, + /// The history of the server being up + pub up_history: Vec, + /// Response time + pub response_time: Option, +} + +/// Get the latest monerod public nodes (by scraping the HTML frm the monero.fail website) that are +/// currently up and has a full history of being up all the time. +#[allow(clippy::too_many_lines)] +pub async fn get_monerod_info( + number_of_entries: usize, + connection_test_timeout: Duration, + monero_fail_url: &str, +) -> Result, MmProxyError> { + let document = get_monerod_html(monero_fail_url).await?; + + // The HTML table definition and an example entry looks like this: + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + + // Define selectors for table elements + let row_selector = + Selector::parse("tr.js-sort-table").map_err(|e| MmProxyError::HtmlParseError(format!("{}", e)))?; + let type_selector = + Selector::parse("td:nth-child(1)").map_err(|e| MmProxyError::HtmlParseError(format!("{}", e)))?; + let url_selector = + Selector::parse("td:nth-child(2) .nodeURL").map_err(|e| MmProxyError::HtmlParseError(format!("{}", e)))?; + let height_selector = + Selector::parse("td:nth-child(3)").map_err(|e| MmProxyError::HtmlParseError(format!("{}", e)))?; + let up_selector = Selector::parse("td:nth-child(4) .dot.glowing-green, td:nth-child(4) .dot.glowing-red") + .map_err(|e| MmProxyError::HtmlParseError(format!("{}", e)))?; + let web_compatible_selector = Selector::parse("td:nth-child(5) img.filter-green, td:nth-child(5) img.filter-red") + .map_err(|e| MmProxyError::HtmlParseError(format!("{}", e)))?; + let network_selector = + Selector::parse("td:nth-child(6)").map_err(|e| MmProxyError::HtmlParseError(format!("{}", e)))?; + let last_checked_selector = + Selector::parse("td:nth-child(7)").map_err(|e| MmProxyError::HtmlParseError(format!("{}", e)))?; + let history_selector = Selector::parse("td:nth-child(8) .dot.glowing-green, td:nth-child(8) .dot.glowing-red") + .map_err(|e| MmProxyError::HtmlParseError(format!("{}", e)))?; + + let mut entries = Vec::new(); + + // Iterate over table rows and extract data + for row in document.select(&row_selector) { + let address_type = match row.select(&type_selector).next() { + Some(val) => val.text().collect::().trim().to_string(), + None => return Err(MmProxyError::HtmlParseError("address type".to_string())), + }; + + let url = match row.select(&url_selector).next() { + Some(val) => val.text().collect::().trim().to_string(), + None => return Err(MmProxyError::HtmlParseError("url".to_string())), + }; + + let height = match row.select(&height_selector).next() { + Some(val) => val.text().collect::().trim().parse::().unwrap_or_default(), + None => return Err(MmProxyError::HtmlParseError("height".to_string())), + }; + + let mut up = false; + let iter = row.select(&up_selector); + for item in iter { + let class = item.value().attr("class").unwrap_or(""); + if class.contains("dot glowing-green") { + up = true; + break; + } + } + + let mut web_compatible = false; + let iter = row.select(&web_compatible_selector); + for item in iter { + let class = item.value().attr("class").unwrap_or(""); + if class.contains("filter-green") { + web_compatible = true; + break; + } + } + + let network = match row.select(&network_selector).next() { + Some(val) => val.text().collect::().trim().to_string(), + None => return Err(MmProxyError::HtmlParseError("network".to_string())), + }; + + let last_checked = match row.select(&last_checked_selector).next() { + Some(val) => val.text().collect::().trim().to_string(), + None => return Err(MmProxyError::HtmlParseError("last checked".to_string())), + }; + + let mut up_history = Vec::new(); + let iter = row.select(&history_selector); + for item in iter { + let class = item.value().attr("class").unwrap_or(""); + up_history.push(class.contains("dot glowing-green")); + } + + let entry = MonerodEntry { + address_type: address_type.to_lowercase(), + url, + height, + up, + web_compatible, + network: network.to_lowercase(), + last_checked, + up_history, + response_time: None, + }; + entries.push(entry); + } + + // Only retain nodes that are currently up and has a full history of being up all the time + let max_history_length = entries.iter().map(|entry| entry.up_history.len()).max().unwrap_or(0); + entries.retain(|entry| { + entry.up && entry.up_history.iter().filter(|&&v| v).collect::>().len() == max_history_length + }); + // Only retain non-tor and non-i2p nodes + entries.retain(|entry| entry.address_type != *"tor" && entry.address_type != *"i2p"); + // Give preference to nodes with the best height + entries.sort_by(|a, b| b.height.cmp(&a.height)); + // Determine connection times - use slightly more nodes than requested + entries.truncate(number_of_entries + 10); + for entry in &mut entries { + let uri = format!("{}/getheight", entry.url).parse::()?; + let start = std::time::Instant::now(); + if (timeout(connection_test_timeout, reqwest::get(uri.clone())).await).is_ok() { + entry.response_time = Some(start.elapsed()); + debug!(target: LOG_TARGET, "Response time '{:.2?}' for Monerod server at: {}", entry.response_time, uri.as_str()); + } else { + debug!(target: LOG_TARGET, "Response time 'n/a' for Monerod server at: {}, timed out", uri.as_str()); + } + } + // Sort by response time + entries.sort_by(|a, b| { + a.response_time + .unwrap_or_else(|| Duration::from_secs(100)) + .cmp(&b.response_time.unwrap_or_else(|| Duration::from_secs(100))) + }); + // Truncate to the requested number of entries + entries.truncate(number_of_entries); + + if entries.is_empty() { + return Err(MmProxyError::HtmlParseError( + "No public monero servers available".to_string(), + )); + } + Ok(entries) +} + +async fn get_monerod_html(url: &str) -> Result { + let body = match reqwest::get(url).await { + Ok(resp) => match resp.text().await { + Ok(html) => html, + Err(e) => { + error!("Failed to fetch monerod info: {}", e); + return Err(MmProxyError::MonerodRequestFailed(e)); + }, + }, + Err(e) => { + error!("Failed to fetch monerod info: {}", e); + return Err(MmProxyError::MonerodRequestFailed(e)); + }, + }; + + Ok(Html::parse_document(&body)) +} + +#[cfg(test)] +mod test { + use std::{ops::Deref, time::Duration}; + + use markup5ever::{local_name, namespace_url, ns, QualName}; + use scraper::Html; + + use crate::{ + config::MergeMiningProxyConfig, + monero_fail::{get_monerod_html, get_monerod_info}, + }; + + #[tokio::test] + async fn test_get_monerod_info() { + // Monero mainnet + let config = MergeMiningProxyConfig::default(); + let entries = get_monerod_info(5, Duration::from_secs(2), &config.monero_fail_url) + .await + .unwrap(); + for (i, entry) in entries.iter().enumerate() { + assert!(entry.up && entry.up_history.iter().all(|&v| v)); + if i > 0 { + assert!( + entry.response_time.unwrap_or_else(|| Duration::from_secs(100)) >= + entries[i - 1].response_time.unwrap_or_else(|| Duration::from_secs(100)) + ); + } + println!("{}: {:?}", i, entry); + } + + // Monero stagenet + const MONERO_FAIL_STAGNET_URL: &str = "https://monero.fail/?chain=monero&network=stagenet&all=true"; + let entries = get_monerod_info(5, Duration::from_secs(2), MONERO_FAIL_STAGNET_URL) + .await + .unwrap(); + for (i, entry) in entries.iter().enumerate() { + assert!(entry.up && entry.up_history.iter().all(|&v| v)); + if i > 0 { + assert!( + entry.response_time.unwrap_or_else(|| Duration::from_secs(100)) >= + entries[i - 1].response_time.unwrap_or_else(|| Duration::from_secs(100)) + ); + } + println!("{}: {:?}", i, entry); + } + + // Monero testnet + const MONERO_FAIL_TESTNET_URL: &str = "https://monero.fail/?chain=monero&network=testnet&all=true"; + let entries = get_monerod_info(5, Duration::from_secs(2), MONERO_FAIL_TESTNET_URL) + .await + .unwrap(); + for (i, entry) in entries.iter().enumerate() { + assert!(entry.up && entry.up_history.iter().all(|&v| v)); + if i > 0 { + assert!( + entry.response_time.unwrap_or_else(|| Duration::from_secs(100)) >= + entries[i - 1].response_time.unwrap_or_else(|| Duration::from_secs(100)) + ); + } + println!("{}: {:?}", i, entry); + } + } + + #[tokio::test] + async fn test_table_structure() { + let config = MergeMiningProxyConfig::default(); + let html_content = get_monerod_html(&config.monero_fail_url).await.unwrap(); + + let table_structure = extract_table_structure(&html_content); + + let expected_structure = vec![ + "Type", + "URL", + "Height", + "Up", + "Web", + "Compatible", + "Network", + "Last Checked", + "History", + ]; + + // Compare the actual and expected table structures + assert_eq!(table_structure, expected_structure); + } + + // Function to extract table structure from the document + fn extract_table_structure(html_document: &Html) -> Vec<&str> { + let mut table_structure = Vec::new(); + if let Some(table) = html_document.tree.root().descendants().find(|n| { + n.value().is_element() && + n.value().as_element().unwrap().name == QualName::new(None, ns!(html), local_name!("table")) + }) { + if let Some(thead) = table.descendants().find(|n| { + n.value().is_element() && + n.value().as_element().unwrap().name == QualName::new(None, ns!(html), local_name!("thead")) + }) { + if let Some(tr) = thead.descendants().find(|n| { + n.value().is_element() && + n.value().as_element().unwrap().name == QualName::new(None, ns!(html), local_name!("tr")) + }) { + for th in tr.descendants().filter(|n| { + n.value().is_element() && + n.value().as_element().unwrap().name == QualName::new(None, ns!(html), local_name!("th")) + }) { + for child in th.children() { + if let Some(text) = child.value().as_text() { + table_structure.push(text.deref().trim()); + } + } + } + } + } + } + table_structure + } +} diff --git a/applications/minotari_merge_mining_proxy/src/proxy.rs b/applications/minotari_merge_mining_proxy/src/proxy.rs index 6e55ca1c36..ccbcb7c6de 100644 --- a/applications/minotari_merge_mining_proxy/src/proxy.rs +++ b/applications/minotari_merge_mining_proxy/src/proxy.rs @@ -31,7 +31,7 @@ use std::{ RwLock, }, task::{Context, Poll}, - time::Instant, + time::{Duration, Instant}, }; use borsh::BorshSerialize; @@ -49,6 +49,7 @@ use tari_core::{ proof_of_work::{monero_rx, monero_rx::FixedByteArray, randomx_difficulty, randomx_factory::RandomXFactory}, }; use tari_utilities::hex::Hex; +use tokio::time::timeout; use tracing::{debug, error, info, instrument, trace, warn}; use crate::{ @@ -653,7 +654,8 @@ impl InnerService { for next_url in iter { let uri = format!("{}{}", next_url, uri.path()).parse::()?; - match reqwest::get(uri.clone()).await { + debug!(target: LOG_TARGET, "Trying to connect to Monerod server at: {}", uri.as_str()); + match timeout(Duration::from_secs(10), reqwest::get(uri.clone())).await { Ok(_) => { let mut lock = self.current_monerod_server.write().expect("Write lock should not fail"); *lock = Some(next_url.to_string()); @@ -662,11 +664,11 @@ impl InnerService { .write() .expect("Write lock should not fail"); *lock = Some(next_url.to_string()); - info!(target: LOG_TARGET, "Monerod server available: {:?}", uri.clone()); + info!(target: LOG_TARGET, "Monerod server available: {}", uri.as_str()); return Ok(uri); }, Err(_) => { - warn!(target: LOG_TARGET, "Monerod server unavailable: {:?}", uri); + warn!(target: LOG_TARGET, "Monerod server unavailable: {}", uri.as_str()); }, } } diff --git a/applications/minotari_merge_mining_proxy/src/run_merge_miner.rs b/applications/minotari_merge_mining_proxy/src/run_merge_miner.rs index ec076c94b6..34ceb2a856 100644 --- a/applications/minotari_merge_mining_proxy/src/run_merge_miner.rs +++ b/applications/minotari_merge_mining_proxy/src/run_merge_miner.rs @@ -34,7 +34,7 @@ use minotari_app_utilities::parse_miner_input::{ }; use minotari_node_grpc_client::{grpc, grpc::base_node_client::BaseNodeClient}; use minotari_wallet_grpc_client::ClientAuthenticationInterceptor; -use tari_common::{load_configuration, DefaultConfigLoader}; +use tari_common::{configuration::StringList, load_configuration, DefaultConfigLoader}; use tari_comms::utils::multiaddr::multiaddr_to_socketaddr; use tari_core::proof_of_work::randomx_factory::RandomXFactory; use tokio::time::Duration; @@ -44,6 +44,7 @@ use crate::{ block_template_data::BlockTemplateRepository, config::MergeMiningProxyConfig, error::MmProxyError, + monero_fail::get_monerod_info, proxy::MergeMiningProxyService, Cli, }; @@ -55,6 +56,12 @@ pub async fn start_merge_miner(cli: Cli) -> Result<(), anyhow::Error> { let cfg = load_configuration(&config_path, true, cli.non_interactive_mode, &cli)?; let mut config = MergeMiningProxyConfig::load_from(&cfg)?; config.set_base_path(cli.common.get_base_path()); + if config.use_dynamic_fail_data { + let entries = get_monerod_info(15, Duration::from_secs(5), &config.monero_fail_url).await?; + if !entries.is_empty() { + config.monerod_url = StringList::from(entries.into_iter().map(|entry| entry.url).collect::>()); + } + } info!(target: LOG_TARGET, "Configuration: {:?}", config); let client = reqwest::Client::builder() diff --git a/applications/minotari_miner/Cargo.toml b/applications/minotari_miner/Cargo.toml index 7d73112b88..c24778a823 100644 --- a/applications/minotari_miner/Cargo.toml +++ b/applications/minotari_miner/Cargo.toml @@ -41,7 +41,7 @@ tonic = { version = "0.8.3", features = ["tls", "tls-roots" ] } [dev-dependencies] prost-types = "0.11.9" chrono = { version = "0.4.19", default-features = false } -config = "0.13.0" +config = "0.14.0" [package.metadata.cargo-machete] ignored = [ diff --git a/applications/minotari_node/Cargo.toml b/applications/minotari_node/Cargo.toml index 36c410c232..67e3352423 100644 --- a/applications/minotari_node/Cargo.toml +++ b/applications/minotari_node/Cargo.toml @@ -22,6 +22,7 @@ tari_storage = { path = "../../infrastructure/storage" } tari_service_framework = { path = "../../base_layer/service_framework" } tari_shutdown = { path = "../../infrastructure/shutdown" } tari_utilities = { version = "0.7" } +tari_key_manager = { path = "../../base_layer/key_manager", features = ["key_manager_service"] } anyhow = "1.0.53" async-trait = "0.1.52" @@ -30,7 +31,7 @@ borsh = "1.2" chrono = { version = "0.4.19", default-features = false } clap = { version = "3.2", features = ["derive", "env"] } console-subscriber = "0.1.8" -config = { version = "0.13.0" } +config = { version = "0.14.0" } crossterm = { version = "0.25.0", features = ["event-stream"] } derive_more = "0.99.17" either = "1.6.1" diff --git a/applications/minotari_node/src/bootstrap.rs b/applications/minotari_node/src/bootstrap.rs index dc76aa290f..9bf7ea1125 100644 --- a/applications/minotari_node/src/bootstrap.rs +++ b/applications/minotari_node/src/bootstrap.rs @@ -20,7 +20,12 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{cmp, str::FromStr, sync::Arc, time::Duration}; +use std::{ + cmp, + str::FromStr, + sync::{Arc, RwLock}, + time::Duration, +}; use log::*; use minotari_app_utilities::{consts, identity_management, identity_management::load_from_json}; @@ -52,6 +57,7 @@ use tari_core::{ mempool::{service::MempoolHandle, Mempool, MempoolServiceInitializer, MempoolSyncInitializer}, proof_of_work::randomx_factory::RandomXFactory, transactions::CryptoFactories, + OutputSmt, }; use tari_p2p::{ auto_update::SoftwareUpdaterService, @@ -81,6 +87,7 @@ pub struct BaseNodeBootstrapper<'a, B> { pub factories: CryptoFactories, pub randomx_factory: RandomXFactory, pub interrupt_signal: ShutdownSignal, + pub smt: Arc>, } impl BaseNodeBootstrapper<'_, B> diff --git a/applications/minotari_node/src/builder.rs b/applications/minotari_node/src/builder.rs index 53b0947442..09d27335c0 100644 --- a/applications/minotari_node/src/builder.rs +++ b/applications/minotari_node/src/builder.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::sync::Arc; +use std::sync::{Arc, RwLock}; use log::*; use tari_common::{ @@ -42,6 +42,7 @@ use tari_core::{ transaction::TransactionFullValidator, DifficultyCalculator, }, + OutputSmt, }; use tari_p2p::{auto_update::SoftwareUpdaterHandle, services::liveness::LivenessHandle}; use tari_service_framework::ServiceHandles; @@ -210,6 +211,7 @@ async fn build_node_context( let factories = CryptoFactories::default(); let randomx_factory = RandomXFactory::new(app_config.base_node.max_randomx_vms); let difficulty_calculator = DifficultyCalculator::new(rules.clone(), randomx_factory.clone()); + let smt = Arc::new(RwLock::new(OutputSmt::new())); let validators = Validators::new( BlockBodyFullValidator::new(rules.clone(), true), HeaderFullValidator::new(rules.clone(), difficulty_calculator.clone()), @@ -226,6 +228,7 @@ async fn build_node_context( validators, app_config.base_node.storage, difficulty_calculator, + smt.clone(), ) .map_err(|err| { if let ChainStorageError::DatabaseResyncRequired(reason) = err { @@ -262,6 +265,7 @@ async fn build_node_context( factories: factories.clone(), randomx_factory, interrupt_signal: interrupt_signal.clone(), + smt, } .bootstrap() .await?; diff --git a/applications/minotari_node/src/config.rs b/applications/minotari_node/src/config.rs index 140c41832b..b24861ac56 100644 --- a/applications/minotari_node/src/config.rs +++ b/applications/minotari_node/src/config.rs @@ -236,6 +236,8 @@ pub enum GrpcMethod { GetNetworkDifficulty, GetNewBlockTemplate, GetNewBlock, + GetNewBlockWithCoinbases, + GetNewBlockTemplateWithCoinbases, GetNewBlockBlob, SubmitBlock, SubmitBlockBlob, diff --git a/applications/minotari_node/src/grpc/base_node_grpc_server.rs b/applications/minotari_node/src/grpc/base_node_grpc_server.rs index e6ec4685d3..10ee2c4d86 100644 --- a/applications/minotari_node/src/grpc/base_node_grpc_server.rs +++ b/applications/minotari_node/src/grpc/base_node_grpc_server.rs @@ -34,7 +34,10 @@ use minotari_app_grpc::{ tari_rpc::{CalcType, Sorting}, }; use minotari_app_utilities::consts; -use tari_common_types::types::{Commitment, FixedHash, PublicKey, Signature}; +use tari_common_types::{ + tari_address::TariAddress, + types::{Commitment, FixedHash, PublicKey, Signature}, +}; use tari_comms::{Bytes, CommsNode}; use tari_core::{ base_node::{ @@ -49,8 +52,25 @@ use tari_core::{ iterators::NonOverlappingIntegerPairIter, mempool::{service::LocalMempoolService, TxStorageResponse}, proof_of_work::PowAlgorithm, - transactions::transaction_components::Transaction, + transactions::{ + generate_coinbase_with_wallet_output, + key_manager::{ + create_memory_db_key_manager, + TariKeyId, + TransactionKeyManagerBranch, + TransactionKeyManagerInterface, + TxoStage, + }, + transaction_components::{ + KernelBuilder, + RangeProofType, + Transaction, + TransactionKernel, + TransactionKernelVersion, + }, + }, }; +use tari_key_manager::key_manager_service::KeyManagerInterface; use tari_p2p::{auto_update::SoftwareUpdaterHandle, services::liveness::LivenessHandle}; use tari_utilities::{hex::Hex, message_format::MessageFormat, ByteArray}; use tokio::task; @@ -123,6 +143,8 @@ impl BaseNodeGrpcServer { let mining_method = [ GrpcMethod::GetVersion, GrpcMethod::GetNewBlockTemplate, + GrpcMethod::GetNewBlockWithCoinbases, + GrpcMethod::GetNewBlockTemplateWithCoinbases, GrpcMethod::GetNewBlock, GrpcMethod::GetNewBlockBlob, GrpcMethod::SubmitBlock, @@ -148,19 +170,18 @@ impl BaseNodeGrpcServer { if self.config.second_layer_grpc_enabled && second_layer_methods.contains(&grpc_method) { return true; } - self.config.grpc_server_allow_methods.contains(&grpc_method) } - fn check_method_enabled(&self, method: GrpcMethod) -> Option, Status>> { + fn check_method_enabled(&self, method: GrpcMethod) -> Result<(), Status> { if !self.is_method_enabled(method) { warn!(target: LOG_TARGET, "`{}` method called but it is not allowed. Allow it in the config file or start the node with a different set of CLI options", method); - return Some(Err(Status::permission_denied(format!( + return Err(Status::permission_denied(format!( "`{}` method not made available", method - )))); + ))); } - None + Ok(()) } } @@ -201,9 +222,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetNetworkDifficulty) { - return value; - } + self.check_method_enabled(GrpcMethod::GetNetworkDifficulty)?; let report_error_flag = self.report_error_flag(); let request = request.into_inner(); debug!( @@ -319,9 +338,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetMempoolTransactions) { - return value; - } + self.check_method_enabled(GrpcMethod::GetMempoolTransactions)?; let report_error_flag = self.report_error_flag(); let _request = request.into_inner(); debug!(target: LOG_TARGET, "Incoming GRPC request for GetMempoolTransactions",); @@ -382,9 +399,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::ListHeaders) { - return value; - } + self.check_method_enabled(GrpcMethod::ListHeaders)?; let report_error_flag = self.report_error_flag(); let request = request.into_inner(); debug!( @@ -555,9 +570,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetNewBlockTemplate) { - return value; - } + self.check_method_enabled(GrpcMethod::GetNewBlockTemplate)?; let report_error_flag = self.report_error_flag(); let request = request.into_inner(); debug!(target: LOG_TARGET, "Incoming GRPC request for get new block template"); @@ -619,9 +632,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetNewBlock) { - return value; - } + self.check_method_enabled(GrpcMethod::GetNewBlock)?; let report_error_flag = self.report_error_flag(); let request = request.into_inner(); debug!(target: LOG_TARGET, "Incoming GRPC request for get new block"); @@ -631,9 +642,446 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { Status::invalid_argument(format!("Malformed block template provided: {}", s)), ) })?; + let algo = block_template.header.pow.pow_algo; + + let mut handler = self.node_service.clone(); + + let new_block = match handler.get_new_block(block_template).await { + Ok(b) => b, + Err(CommsInterfaceError::ChainStorageError(ChainStorageError::InvalidArguments { message, .. })) => { + return Err(obscure_error_if_true( + report_error_flag, + Status::invalid_argument(message), + )); + }, + Err(CommsInterfaceError::ChainStorageError(ChainStorageError::CannotCalculateNonTipMmr(msg))) => { + let status = Status::with_details( + tonic::Code::FailedPrecondition, + msg, + Bytes::from_static(b"CannotCalculateNonTipMmr"), + ); + return Err(obscure_error_if_true(report_error_flag, status)); + }, + Err(e) => { + return Err(obscure_error_if_true( + report_error_flag, + Status::internal(e.to_string()), + )) + }, + }; + let fees = new_block.body.get_total_fee().map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Invalid fees in block".to_string()), + ) + })?; + let gen_hash = handler + .get_header(0) + .await + .map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Tari genesis block not found".to_string()), + ) + })? + .ok_or_else(|| { + obscure_error_if_true( + report_error_flag, + Status::not_found("Tari genesis block not found".to_string()), + ) + })? + .hash() + .to_vec(); + // construct response + let block_hash = new_block.hash().to_vec(); + let mining_hash = match new_block.header.pow.pow_algo { + PowAlgorithm::Sha3x => new_block.header.mining_hash().to_vec(), + PowAlgorithm::RandomX => new_block.header.merge_mining_hash().to_vec(), + }; + let block: Option = Some( + new_block + .try_into() + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e)))?, + ); + let new_template = handler.get_new_block_template(algo, 0).await.map_err(|e| { + warn!( + target: LOG_TARGET, + "Could not get new block template: {}", + e.to_string() + ); + obscure_error_if_true(report_error_flag, Status::internal(e.to_string())) + })?; + + let pow = algo as i32; + + let miner_data = tari_rpc::MinerData { + reward: new_template.reward.into(), + target_difficulty: new_template.target_difficulty.as_u64(), + total_fees: fees.as_u64(), + algo: Some(tari_rpc::PowAlgo { pow_algo: pow }), + }; + + let response = tari_rpc::GetNewBlockResult { + block_hash, + block, + merge_mining_hash: mining_hash, + tari_unique_id: gen_hash, + miner_data: Some(miner_data), + }; + debug!(target: LOG_TARGET, "Sending GetNewBlock response to client"); + Ok(Response::new(response)) + } + + #[allow(clippy::too_many_lines)] + async fn get_new_block_template_with_coinbases( + &self, + request: Request, + ) -> Result, Status> { + if !self.is_method_enabled(GrpcMethod::GetNewBlockTemplateWithCoinbases) { + return Err(Status::permission_denied( + "`GetNewBlockTemplateWithCoinbases` method not made available", + )); + } + debug!(target: LOG_TARGET, "Incoming GRPC request for get new block template with coinbases"); + let report_error_flag = self.report_error_flag(); + let request = request.into_inner(); + let algo = request + .algo + .map(|algo| u64::try_from(algo.pow_algo)) + .ok_or_else(|| obscure_error_if_true(report_error_flag, Status::invalid_argument("PoW algo not provided")))? + .map_err(|e| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument(format!("Invalid PoW algo '{}'", e)), + ) + })?; + + let algo = PowAlgorithm::try_from(algo).map_err(|e| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument(format!("Invalid PoW algo '{}'", e)), + ) + })?; + + let mut handler = self.node_service.clone(); + + let mut new_template = handler + .get_new_block_template(algo, request.max_weight) + .await + .map_err(|e| { + warn!( + target: LOG_TARGET, + "Could not get new block template: {}", + e.to_string() + ); + obscure_error_if_true(report_error_flag, Status::internal(e.to_string())) + })?; + + let pow = algo as i32; + + let miner_data = tari_rpc::MinerData { + reward: new_template.reward.into(), + target_difficulty: new_template.target_difficulty.as_u64(), + total_fees: new_template.total_fees.into(), + algo: Some(tari_rpc::PowAlgo { pow_algo: pow }), + }; + + let mut coinbases: Vec = request.coinbases; + + // let validate the coinbase amounts; + let reward = self + .consensus_rules + .calculate_coinbase_and_fees(new_template.header.height, new_template.body.kernels()) + .map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::internal("Could not calculate the amount of fees in the block".to_string()), + ) + })? + .as_u64(); + let mut total_shares = 0u64; + for coinbase in &coinbases { + total_shares += coinbase.value; + } + let mut remainder = reward - ((reward / total_shares) * total_shares); + for coinbase in &mut coinbases { + coinbase.value *= reward / total_shares; + if remainder > 0 { + coinbase.value += 1; + remainder -= 1; + } + } + + let key_manager = create_memory_db_key_manager(); + let height = new_template.header.height; + // The script key is not used in the Diffie-Hellmann protocol, so we assign default. + let script_key_id = TariKeyId::default(); + + let mut total_excess = Commitment::default(); + let mut total_nonce = PublicKey::default(); + let mut private_keys = Vec::new(); + let mut kernel_message = [0; 32]; + let mut last_kernel = Default::default(); + for coinbase in coinbases { + let address = TariAddress::from_hex(&coinbase.address) + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + let range_proof_type = if coinbase.revealed_value_proof { + RangeProofType::RevealedValue + } else { + RangeProofType::BulletProofPlus + }; + let (_, coinbase_output, coinbase_kernel, wallet_output) = generate_coinbase_with_wallet_output( + 0.into(), + coinbase.value.into(), + height, + &coinbase.coinbase_extra, + &key_manager, + &script_key_id, + &address, + coinbase.stealth_payment, + self.consensus_rules.consensus_constants(height), + range_proof_type, + ) + .await + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + new_template.body.add_output(coinbase_output); + let (new_private_nonce, pub_nonce) = key_manager + .get_next_key(TransactionKeyManagerBranch::KernelNonce.get_branch_key()) + .await + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + total_nonce = &total_nonce + &pub_nonce; + total_excess = &total_excess + &coinbase_kernel.excess; + private_keys.push((wallet_output.spending_key_id, new_private_nonce)); + kernel_message = TransactionKernel::build_kernel_signature_message( + &TransactionKernelVersion::get_current_version(), + coinbase_kernel.fee, + coinbase_kernel.lock_height, + &coinbase_kernel.features, + &None, + ); + last_kernel = coinbase_kernel; + } + let mut kernel_signature = Signature::default(); + for (spending_key_id, nonce) in private_keys { + kernel_signature = &kernel_signature + + &key_manager + .get_partial_txo_kernel_signature( + &spending_key_id, + &nonce, + &total_nonce, + total_excess.as_public_key(), + &TransactionKernelVersion::get_current_version(), + &kernel_message, + &last_kernel.features, + TxoStage::Output, + ) + .await + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + } + let kernel_new = KernelBuilder::new() + .with_fee(0.into()) + .with_features(last_kernel.features) + .with_lock_height(last_kernel.lock_height) + .with_excess(&total_excess) + .with_signature(kernel_signature) + .build() + .unwrap(); + + new_template.body.add_kernel(kernel_new); + new_template.body.sort(); + + let new_block = match handler.get_new_block(new_template).await { + Ok(b) => b, + Err(CommsInterfaceError::ChainStorageError(ChainStorageError::InvalidArguments { message, .. })) => { + return Err(obscure_error_if_true( + report_error_flag, + Status::invalid_argument(message), + )); + }, + Err(CommsInterfaceError::ChainStorageError(ChainStorageError::CannotCalculateNonTipMmr(msg))) => { + let status = Status::with_details( + tonic::Code::FailedPrecondition, + msg, + Bytes::from_static(b"CannotCalculateNonTipMmr"), + ); + return Err(obscure_error_if_true(report_error_flag, status)); + }, + Err(e) => { + return Err(obscure_error_if_true( + report_error_flag, + Status::internal(e.to_string()), + )) + }, + }; + let gen_hash = handler + .get_header(0) + .await + .map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Tari genesis block not found".to_string()), + ) + })? + .ok_or_else(|| { + obscure_error_if_true( + report_error_flag, + Status::not_found("Tari genesis block not found".to_string()), + ) + })? + .hash() + .to_vec(); + // construct response + let block_hash = new_block.hash().to_vec(); + let mining_hash = match new_block.header.pow.pow_algo { + PowAlgorithm::Sha3x => new_block.header.mining_hash().to_vec(), + PowAlgorithm::RandomX => new_block.header.merge_mining_hash().to_vec(), + }; + let block: Option = Some( + new_block + .try_into() + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e)))?, + ); + + let response = tari_rpc::GetNewBlockResult { + block_hash, + block, + merge_mining_hash: mining_hash, + tari_unique_id: gen_hash, + miner_data: Some(miner_data), + }; + debug!(target: LOG_TARGET, "Sending GetNewBlock response to client"); + Ok(Response::new(response)) + } + + #[allow(clippy::too_many_lines)] + async fn get_new_block_with_coinbases( + &self, + request: Request, + ) -> Result, Status> { + if !self.is_method_enabled(GrpcMethod::GetNewBlockWithCoinbases) { + return Err(Status::permission_denied( + "`GetNewBlockWithCoinbasesRequest` method not made available", + )); + } + let report_error_flag = self.report_error_flag(); + let request = request.into_inner(); + debug!(target: LOG_TARGET, "Incoming GRPC request for get new block with coinbases"); + let mut block_template: NewBlockTemplate = request + .new_template + .ok_or(obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Malformed block template provided".to_string()), + ))? + .try_into() + .map_err(|s| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument(format!("Malformed block template provided: {}", s)), + ) + })?; + let coinbases: Vec = request.coinbases; let mut handler = self.node_service.clone(); + // let validate the coinbase amounts; + let reward = self + .consensus_rules + .calculate_coinbase_and_fees(block_template.header.height, block_template.body.kernels()) + .map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::internal("Could not calculate the amount of fees in the block".to_string()), + ) + })?; + let mut amount = 0u64; + for coinbase in &coinbases { + amount += coinbase.value; + } + + if amount != reward.as_u64() { + return Err(obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Malformed coinbase amounts".to_string()), + )); + } + let key_manager = create_memory_db_key_manager(); + let height = block_template.header.height; + // The script key is not used in the Diffie-Hellmann protocol, so we assign default. + let script_key_id = TariKeyId::default(); + + let mut total_excess = Commitment::default(); + let mut total_nonce = PublicKey::default(); + let mut private_keys = Vec::new(); + let mut kernel_message = [0; 32]; + let mut last_kernel = Default::default(); + for coinbase in coinbases { + let address = TariAddress::from_hex(&coinbase.address) + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + let range_proof_type = if coinbase.revealed_value_proof { + RangeProofType::RevealedValue + } else { + RangeProofType::BulletProofPlus + }; + let (_, coinbase_output, coinbase_kernel, wallet_output) = generate_coinbase_with_wallet_output( + 0.into(), + coinbase.value.into(), + height, + &coinbase.coinbase_extra, + &key_manager, + &script_key_id, + &address, + coinbase.stealth_payment, + self.consensus_rules.consensus_constants(height), + range_proof_type, + ) + .await + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + block_template.body.add_output(coinbase_output); + let (new_private_nonce, pub_nonce) = key_manager + .get_next_key(TransactionKeyManagerBranch::KernelNonce.get_branch_key()) + .await + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + total_nonce = &total_nonce + &pub_nonce; + total_excess = &total_excess + &coinbase_kernel.excess; + private_keys.push((wallet_output.spending_key_id, new_private_nonce)); + kernel_message = TransactionKernel::build_kernel_signature_message( + &TransactionKernelVersion::get_current_version(), + coinbase_kernel.fee, + coinbase_kernel.lock_height, + &coinbase_kernel.features, + &None, + ); + last_kernel = coinbase_kernel; + } + let mut kernel_signature = Signature::default(); + for (spending_key_id, nonce) in private_keys { + kernel_signature = &kernel_signature + + &key_manager + .get_partial_txo_kernel_signature( + &spending_key_id, + &nonce, + &total_nonce, + total_excess.as_public_key(), + &TransactionKernelVersion::get_current_version(), + &kernel_message, + &last_kernel.features, + TxoStage::Output, + ) + .await + .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e.to_string())))?; + } + let kernel_new = KernelBuilder::new() + .with_fee(0.into()) + .with_features(last_kernel.features) + .with_lock_height(last_kernel.lock_height) + .with_excess(&total_excess) + .with_signature(kernel_signature) + .build() + .unwrap(); + + block_template.body.add_kernel(kernel_new); + block_template.body.sort(); + let new_block = match handler.get_new_block(block_template).await { Ok(b) => b, Err(CommsInterfaceError::ChainStorageError(ChainStorageError::InvalidArguments { message, .. })) => { @@ -657,6 +1105,13 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { )) }, }; + let fees = new_block.body.get_total_fee().map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Invalid fees in block".to_string()), + ) + })?; + let algo = new_block.header.pow.pow_algo; let gen_hash = handler .get_header(0) .await @@ -686,11 +1141,30 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { .map_err(|e| obscure_error_if_true(report_error_flag, Status::internal(e)))?, ); + let new_template = handler.get_new_block_template(algo, 0).await.map_err(|e| { + warn!( + target: LOG_TARGET, + "Could not get new block template: {}", + e.to_string() + ); + obscure_error_if_true(report_error_flag, Status::internal(e.to_string())) + })?; + + let pow = algo as i32; + + let miner_data = tari_rpc::MinerData { + reward: new_template.reward.into(), + target_difficulty: new_template.target_difficulty.as_u64(), + total_fees: fees.as_u64(), + algo: Some(tari_rpc::PowAlgo { pow_algo: pow }), + }; + let response = tari_rpc::GetNewBlockResult { block_hash, block, merge_mining_hash: mining_hash, tari_unique_id: gen_hash, + miner_data: Some(miner_data), }; debug!(target: LOG_TARGET, "Sending GetNewBlock response to client"); Ok(Response::new(response)) @@ -700,9 +1174,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetNewBlockBlob) { - return value; - } + self.check_method_enabled(GrpcMethod::GetNewBlockBlob)?; let report_error_flag = self.report_error_flag(); let request = request.into_inner(); debug!(target: LOG_TARGET, "Incoming GRPC request for get new block blob"); @@ -785,9 +1257,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::SubmitBlock) { - return value; - } + self.check_method_enabled(GrpcMethod::SubmitBlock)?; let report_error_flag = self.report_error_flag(); let request = request.into_inner(); let block = Block::try_from(request).map_err(|e| { @@ -821,9 +1291,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::SubmitBlockBlob) { - return value; - } + self.check_method_enabled(GrpcMethod::SubmitBlockBlob)?; let report_error_flag = self.report_error_flag(); debug!(target: LOG_TARGET, "Received block blob from miner: {:?}", request); let request = request.into_inner(); @@ -864,9 +1332,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::SubmitTransaction) { - return value; - } + self.check_method_enabled(GrpcMethod::SubmitTransaction)?; let report_error_flag = self.report_error_flag(); let request = request.into_inner(); let txn: Transaction = request @@ -918,9 +1384,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::TransactionState) { - return value; - } + self.check_method_enabled(GrpcMethod::TransactionState)?; let report_error_flag = self.report_error_flag(); let request = request.into_inner(); let excess_sig: Signature = request @@ -1007,9 +1471,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, _request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetPeers) { - return value; - } + self.check_method_enabled(GrpcMethod::GetPeers)?; let report_error_flag = self.report_error_flag(); debug!(target: LOG_TARGET, "Incoming GRPC request for get all peers"); @@ -1042,9 +1504,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetBlocks) { - return value; - } + self.check_method_enabled(GrpcMethod::GetBlocks)?; let report_error_flag = self.report_error_flag(); let request = request.into_inner(); debug!( @@ -1113,9 +1573,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, _request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetTipInfo) { - return value; - } + self.check_method_enabled(GrpcMethod::GetTipInfo)?; let report_error_flag = self.report_error_flag(); debug!(target: LOG_TARGET, "Incoming GRPC request for BN tip data"); @@ -1143,9 +1601,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::SearchKernels) { - return value; - } + self.check_method_enabled(GrpcMethod::SearchKernels)?; let report_error_flag = self.report_error_flag(); debug!(target: LOG_TARGET, "Incoming GRPC request for SearchKernels"); let request = request.into_inner(); @@ -1201,9 +1657,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::SearchUtxos) { - return value; - } + self.check_method_enabled(GrpcMethod::SearchUtxos)?; let report_error_flag = self.report_error_flag(); debug!(target: LOG_TARGET, "Incoming GRPC request for SearchUtxos"); let request = request.into_inner(); @@ -1259,9 +1713,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::FetchMatchingUtxos) { - return value; - } + self.check_method_enabled(GrpcMethod::FetchMatchingUtxos)?; let report_error_flag = self.report_error_flag(); debug!(target: LOG_TARGET, "Incoming GRPC request for FetchMatchingUtxos"); let request = request.into_inner(); @@ -1330,9 +1782,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetBlockTiming) { - return value; - } + self.check_method_enabled(GrpcMethod::GetBlockTiming)?; let report_error_flag = self.report_error_flag(); let request = request.into_inner(); debug!( @@ -1382,9 +1832,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetConstants) { - return value; - } + self.check_method_enabled(GrpcMethod::GetConstants)?; let report_error_flag = self.report_error_flag(); debug!(target: LOG_TARGET, "Incoming GRPC request for GetConstants",); debug!(target: LOG_TARGET, "Sending GetConstants response to client"); @@ -1410,9 +1858,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetBlockSize) { - return value; - } + self.check_method_enabled(GrpcMethod::GetBlockSize)?; let report_error_flag = self.report_error_flag(); get_block_group( self.node_service.clone(), @@ -1427,9 +1873,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetBlockFees) { - return value; - } + self.check_method_enabled(GrpcMethod::GetBlockFees)?; let report_error_flag = self.report_error_flag(); get_block_group( self.node_service.clone(), @@ -1441,9 +1885,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { } async fn get_version(&self, _request: Request) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetVersion) { - return value; - } + self.check_method_enabled(GrpcMethod::GetVersion)?; Ok(Response::new(consts::APP_VERSION.to_string().into())) } @@ -1451,9 +1893,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, _request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::CheckForUpdates) { - return value; - } + self.check_method_enabled(GrpcMethod::CheckForUpdates)?; let mut resp = tari_rpc::SoftwareUpdate::default(); if let Some(ref update) = *self.software_updater.update_notifier().borrow() { @@ -1470,9 +1910,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetTokensInCirculation) { - return value; - } + self.check_method_enabled(GrpcMethod::GetTokensInCirculation)?; let report_error_flag = self.report_error_flag(); debug!(target: LOG_TARGET, "Incoming GRPC request for GetTokensInCirculation",); let request = request.into_inner(); @@ -1530,9 +1968,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, _request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetSyncProgress) { - return value; - } + self.check_method_enabled(GrpcMethod::GetSyncProgress)?; let state = self .state_machine_handle .get_status_info_watch() @@ -1577,9 +2013,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, _request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetSyncInfo) { - return value; - } + self.check_method_enabled(GrpcMethod::GetSyncInfo)?; debug!(target: LOG_TARGET, "Incoming GRPC request for BN sync data"); let response = self .state_machine_handle @@ -1607,9 +2041,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetHeaderByHash) { - return value; - } + self.check_method_enabled(GrpcMethod::GetHeaderByHash)?; let report_error_flag = self.report_error_flag(); let tari_rpc::GetHeaderByHashRequest { hash } = request.into_inner(); let mut node_service = self.node_service.clone(); @@ -1649,9 +2081,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { } async fn identify(&self, _: Request) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::Identify) { - return value; - } + self.check_method_enabled(GrpcMethod::Identify)?; let identity = self.comms.node_identity_ref(); Ok(Response::new(tari_rpc::NodeIdentity { public_key: identity.public_key().to_vec(), @@ -1664,9 +2094,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, _: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetNetworkStatus) { - return value; - } + self.check_method_enabled(GrpcMethod::GetNetworkStatus)?; let report_error_flag = self.report_error_flag(); let status = self .comms @@ -1702,9 +2130,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, _: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::ListConnectedPeers) { - return value; - } + self.check_method_enabled(GrpcMethod::ListConnectedPeers)?; let report_error_flag = self.report_error_flag(); let mut connectivity = self.comms.connectivity(); let peer_manager = self.comms.peer_manager(); @@ -1740,9 +2166,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, _: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetMempoolStats) { - return value; - } + self.check_method_enabled(GrpcMethod::GetMempoolStats)?; let report_error_flag = self.report_error_flag(); let mut mempool_handle = self.mempool_service.clone(); @@ -1764,9 +2188,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetShardKey) { - return value; - } + self.check_method_enabled(GrpcMethod::GetShardKey)?; let request = request.into_inner(); let report_error_flag = self.report_error_flag(); let mut handler = self.node_service.clone(); @@ -1794,9 +2216,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetActiveValidatorNodes) { - return value; - } + self.check_method_enabled(GrpcMethod::GetActiveValidatorNodes)?; let request = request.into_inner(); debug!(target: LOG_TARGET, "Incoming GRPC request for GetActiveValidatorNodes"); @@ -1853,9 +2273,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetTemplateRegistrations) { - return value; - } + self.check_method_enabled(GrpcMethod::GetTemplateRegistrations)?; let request = request.into_inner(); let report_error_flag = self.report_error_flag(); debug!(target: LOG_TARGET, "Incoming GRPC request for GetTemplateRegistrations"); @@ -1937,9 +2355,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { &self, request: Request, ) -> Result, Status> { - if let Some(value) = self.check_method_enabled(GrpcMethod::GetSideChainUtxos) { - return value; - } + self.check_method_enabled(GrpcMethod::GetSideChainUtxos)?; let request = request.into_inner(); let report_error_flag = self.report_error_flag(); debug!(target: LOG_TARGET, "Incoming GRPC request for GetTemplateRegistrations"); diff --git a/applications/minotari_node/src/recovery.rs b/applications/minotari_node/src/recovery.rs index a346e83865..86678675d8 100644 --- a/applications/minotari_node/src/recovery.rs +++ b/applications/minotari_node/src/recovery.rs @@ -25,7 +25,7 @@ use std::{ env::temp_dir, fs, io::{self, Write}, - sync::Arc, + sync::{Arc, RwLock}, }; use anyhow::anyhow; @@ -53,6 +53,7 @@ use tari_core::{ mocks::MockValidator, DifficultyCalculator, }, + OutputSmt, }; use crate::{BaseNodeConfig, DatabaseType}; @@ -97,6 +98,7 @@ pub async fn run_recovery(node_config: &BaseNodeConfig) -> Result<(), anyhow::Er let factories = CryptoFactories::default(); let randomx_factory = RandomXFactory::new(node_config.max_randomx_vms); let difficulty_calculator = DifficultyCalculator::new(rules.clone(), randomx_factory); + let smt = Arc::new(RwLock::new(OutputSmt::new())); let validators = Validators::new( BlockBodyFullValidator::new(rules.clone(), true), HeaderFullValidator::new(rules.clone(), difficulty_calculator.clone()), @@ -114,6 +116,7 @@ pub async fn run_recovery(node_config: &BaseNodeConfig) -> Result<(), anyhow::Er validators, node_config.storage, difficulty_calculator, + smt, )?; do_recovery(db.into(), temp_db).await?; @@ -142,12 +145,14 @@ async fn do_recovery( MockValidator::new(true), MockValidator::new(true), ); + let smt = Arc::new(RwLock::new(OutputSmt::new())); let source_database = BlockchainDatabase::new( source_backend, rules.clone(), validators, BlockchainDatabaseConfig::default(), DifficultyCalculator::new(rules, Default::default()), + smt, )?; let max_height = source_database .get_chain_metadata() diff --git a/base_layer/chat_ffi/chat.h b/base_layer/chat_ffi/chat.h index 654edf1c04..89893c9b37 100644 --- a/base_layer/chat_ffi/chat.h +++ b/base_layer/chat_ffi/chat.h @@ -276,14 +276,14 @@ struct ChatByteVector *read_confirmation_message_id(struct Confirmation *confirm int *error_out); /** - * Get a c_longlong timestamp for the Confirmation + * Get a c_ulonglong timestamp for the Confirmation * * ## Arguments * `confirmation` - A pointer to the Confirmation * `error_out` - Pointer to an int which will be modified * * ## Returns - * `c_longlong` - A uint representation of time since epoch + * `c_ulonglong` - A uint representation of time since epoch. May return 0 on error * * # Safety * The ```confirmation``` When done with the Confirmation it should be destroyed @@ -329,7 +329,7 @@ void add_chat_contact(struct ChatClient *client, struct TariAddress *address, in * `error_out` - Pointer to an int which will be modified * * ## Returns - * `status` - Returns an int representing of the online status + * `status` - Returns an c_uchar representing of the online status * Online = 1, * Offline = 2, * NeverSeen = 3, @@ -338,7 +338,9 @@ void add_chat_contact(struct ChatClient *client, struct TariAddress *address, in * # Safety * The ```address``` should be destroyed after use */ -int check_online_status(struct ChatClient *client, struct TariAddress *receiver, int *error_out); +unsigned char check_online_status(struct ChatClient *client, + struct TariAddress *receiver, + int *error_out); /** * Returns a pointer to a TariAddress @@ -385,14 +387,14 @@ unsigned char read_liveness_data_online_status(struct ContactsLivenessData *live * `error_out` - Pointer to an int which will be modified * * ## Returns - * `c_longlong` - A c_longlong rep of an enum for a contacts online status. May return -1 if an error - * occurs, or 0 if the contact has never been seen + * `c_ulonglong` - A c_longlong rep of timestamp for a contacts last seen status. + * 0 if the contact has never been seen or an error occurs. * * ## Safety * `liveness` should be destroyed eventually */ -long long read_liveness_data_last_seen(struct ContactsLivenessData *liveness, - int *error_out); +unsigned long long read_liveness_data_last_seen(struct ContactsLivenessData *liveness, + int *error_out); /** * Frees memory for a ContactsLivenessData @@ -431,13 +433,13 @@ struct ConversationalistsVector *get_conversationalists(struct ChatClient *clien * `error_out` - Pointer to an int which will be modified * * ## Returns - * `c_int` - The length of the vector. May return -1 if something goes wrong + * `c_uint` - The length of the vector. May return 0 if something goes wrong * * ## Safety * `conversationalists` should be destroyed eventually */ -int conversationalists_vector_len(struct ConversationalistsVector *conversationalists, - int *error_out); +unsigned int conversationalists_vector_len(struct ConversationalistsVector *conversationalists, + int *error_out); /** * Reads the ConversationalistsVector and returns a pointer to a TariAddress at a given position @@ -548,13 +550,12 @@ struct MessageMetadata *chat_metadata_get_at(struct Message *message, * `error_out` - Pointer to an int which will be modified * * ## Returns - * `c_longlong` - The length of the metadata vector for a Message. May return -1 if something goes wrong + * `c_uint` - The length of the metadata vector for a Message. May return 0 if something goes wrong * * ## Safety * `message` should be destroyed eventually */ -long long chat_message_metadata_len(struct Message *message, - int *error_out); +unsigned int chat_message_metadata_len(struct Message *message, int *error_out); /** * Returns a pointer to a ChatByteVector representing the data of the Message @@ -589,21 +590,21 @@ struct ChatByteVector *read_chat_message_body(struct Message *message, int *erro struct TariAddress *read_chat_message_address(struct Message *message, int *error_out); /** - * Returns a c_int representation of the Direction enum + * Returns a c_uchar representation of the Direction enum * * ## Arguments * `message` - A pointer to a Message * `error_out` - Pointer to an int which will be modified * * ## Returns - * `c_int` - A c_int rep of the direction enum. May return -1 if anything goes wrong + * `c_uchar` - A c_uchar rep of the direction enum. May return 0 if anything goes wrong * 0 => Inbound * 1 => Outbound * * ## Safety * `message` should be destroyed eventually */ -int read_chat_message_direction(struct Message *message, int *error_out); +unsigned char read_chat_message_direction(struct Message *message, int *error_out); /** * Returns a c_ulonglong representation of the stored at timestamp as seconds since epoch @@ -674,9 +675,7 @@ struct ChatByteVector *read_chat_message_id(struct Message *message, int *error_ * * ## Arguments * `message` - A pointer to a message - * `metadata_type` - An c_uchar that maps to MessageMetadataType enum - * '0' -> Reply - * '1' -> TokenRequest + * `key` - A pointer to a byte vector containing bytes for the key field * `data` - A pointer to a byte vector containing bytes for the data field * `error_out` - Pointer to an int which will be modified * @@ -687,7 +686,7 @@ struct ChatByteVector *read_chat_message_id(struct Message *message, int *error_ * `message` should be destroyed eventually */ void add_chat_message_metadata(struct Message *message, - unsigned char metadata_type, + struct ChatByteVector *key, struct ChatByteVector *data, int *error_out); @@ -699,14 +698,13 @@ void add_chat_message_metadata(struct Message *message, * `error_out` - Pointer to an int which will be modified * * ## Returns - * `c_int` - An int8 that maps to MessageMetadataType enum. May return -1 if something goes wrong - * '0' -> Reply - * '1' -> TokenRequest + * `*mut ChatByteVector` - A ptr to a ChatByteVector * * ## Safety * `msg_metadata` should be destroyed eventually + * the returned `ChatByteVector` should be destroyed eventually */ -int read_chat_metadata_type(struct MessageMetadata *msg_metadata, int *error_out); +struct ChatByteVector *read_chat_metadata_key(struct MessageMetadata *msg_metadata, int *error_out); /** * Returns a ptr to a ByteVector @@ -759,8 +757,8 @@ void destroy_chat_message_metadata(struct MessageMetadata *ptr); */ struct MessageVector *get_chat_messages(struct ChatClient *client, struct TariAddress *address, - int limit, - int page, + unsigned int limit, + unsigned int page, int *error_out); /** @@ -771,12 +769,12 @@ struct MessageVector *get_chat_messages(struct ChatClient *client, * `error_out` - Pointer to an int which will be modified * * ## Returns - * `c_int` - The length of the metadata vector for a Message. May return -1 if something goes wrong + * `c_uint` - The length of the metadata vector for a Message. May return 0 if something goes wrong * * ## Safety * `messages` should be destroyed eventually */ -int message_vector_len(struct MessageVector *messages, int *error_out); +unsigned int message_vector_len(struct MessageVector *messages, int *error_out); /** * Reads the MessageVector and returns a Message at a given position diff --git a/base_layer/chat_ffi/src/byte_vector.rs b/base_layer/chat_ffi/src/byte_vector.rs index 9a73239e1e..233840c66d 100644 --- a/base_layer/chat_ffi/src/byte_vector.rs +++ b/base_layer/chat_ffi/src/byte_vector.rs @@ -152,3 +152,17 @@ pub unsafe extern "C" fn chat_byte_vector_get_length(vec: *const ChatByteVector, (*vec).0.len() as c_uint } + +pub(crate) unsafe fn process_vector(vector: *mut ChatByteVector, error_out: *mut c_int) -> Vec { + let data_byte_vector_length = chat_byte_vector_get_length(vector, error_out); + let mut bytes: Vec = Vec::new(); + + if data_byte_vector_length > 0 { + for c in 0..data_byte_vector_length { + let byte = chat_byte_vector_get_at(vector, c as c_uint, error_out); + bytes.push(byte); + } + } + + bytes +} diff --git a/base_layer/chat_ffi/src/confirmation.rs b/base_layer/chat_ffi/src/confirmation.rs index d3ef37f127..7f89fd5a29 100644 --- a/base_layer/chat_ffi/src/confirmation.rs +++ b/base_layer/chat_ffi/src/confirmation.rs @@ -101,18 +101,27 @@ pub unsafe extern "C" fn read_confirmation_message_id( let c = &(*confirmation); let data_bytes = c.message_id.clone(); - let len = u32::try_from(data_bytes.len()).expect("Can't cast from usize"); + + let len = match u32::try_from(data_bytes.len()) { + Ok(l) => l, + Err(e) => { + error = LibChatError::from(InterfaceError::ConversionError(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + 0 + }, + }; + chat_byte_vector_create(data_bytes.as_ptr(), len as c_uint, error_out) } -/// Get a c_longlong timestamp for the Confirmation +/// Get a c_ulonglong timestamp for the Confirmation /// /// ## Arguments /// `confirmation` - A pointer to the Confirmation /// `error_out` - Pointer to an int which will be modified /// /// ## Returns -/// `c_longlong` - A uint representation of time since epoch +/// `c_ulonglong` - A uint representation of time since epoch. May return 0 on error /// /// # Safety /// The ```confirmation``` When done with the Confirmation it should be destroyed diff --git a/base_layer/chat_ffi/src/contacts.rs b/base_layer/chat_ffi/src/contacts.rs index 333799a35c..90b7972f8f 100644 --- a/base_layer/chat_ffi/src/contacts.rs +++ b/base_layer/chat_ffi/src/contacts.rs @@ -22,7 +22,7 @@ use std::ptr; -use libc::c_int; +use libc::{c_int, c_uchar}; use tari_chat_client::ChatClient as ChatClientTrait; use tari_common_types::tari_address::TariAddress; @@ -74,7 +74,7 @@ pub unsafe extern "C" fn add_chat_contact(client: *mut ChatClient, address: *mut /// `error_out` - Pointer to an int which will be modified /// /// ## Returns -/// `status` - Returns an int representing of the online status +/// `status` - Returns an c_uchar representing of the online status /// Online = 1, /// Offline = 2, /// NeverSeen = 3, @@ -87,7 +87,7 @@ pub unsafe extern "C" fn check_online_status( client: *mut ChatClient, receiver: *mut TariAddress, error_out: *mut c_int, -) -> c_int { +) -> c_uchar { let mut error = 0; ptr::swap(error_out, &mut error as *mut c_int); @@ -105,7 +105,7 @@ pub unsafe extern "C" fn check_online_status( let result = (*client).runtime.block_on((*client).client.check_online_status(&rec)); match result { - Ok(status) => status.as_u8().into(), + Ok(status) => status.as_u8(), Err(e) => { error = LibChatError::from(InterfaceError::ContactServiceError(e.to_string())).code; ptr::swap(error_out, &mut error as *mut c_int); diff --git a/base_layer/chat_ffi/src/contacts_liveness_data.rs b/base_layer/chat_ffi/src/contacts_liveness_data.rs index ab27f03e35..c1bb9b305a 100644 --- a/base_layer/chat_ffi/src/contacts_liveness_data.rs +++ b/base_layer/chat_ffi/src/contacts_liveness_data.rs @@ -20,9 +20,9 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::ptr; +use std::{convert::TryFrom, ptr}; -use libc::{c_int, c_longlong, c_uchar}; +use libc::{c_int, c_uchar, c_ulonglong}; use tari_common_types::tari_address::TariAddress; use tari_contacts::contacts_service::handle::ContactsLivenessData; @@ -97,8 +97,8 @@ pub unsafe extern "C" fn read_liveness_data_online_status( /// `error_out` - Pointer to an int which will be modified /// /// ## Returns -/// `c_longlong` - A c_longlong rep of an enum for a contacts online status. May return -1 if an error -/// occurs, or 0 if the contact has never been seen +/// `c_ulonglong` - A c_longlong rep of timestamp for a contacts last seen status. +/// 0 if the contact has never been seen or an error occurs. /// /// ## Safety /// `liveness` should be destroyed eventually @@ -106,7 +106,7 @@ pub unsafe extern "C" fn read_liveness_data_online_status( pub unsafe extern "C" fn read_liveness_data_last_seen( liveness: *mut ContactsLivenessData, error_out: *mut c_int, -) -> c_longlong { +) -> c_ulonglong { let mut error = 0; ptr::swap(error_out, &mut error as *mut c_int); @@ -117,7 +117,13 @@ pub unsafe extern "C" fn read_liveness_data_last_seen( } match (*liveness).last_ping_pong_received() { - Some(last_seen) => last_seen.timestamp(), + Some(last_seen) => match u64::try_from(last_seen.timestamp()) { + Ok(ls) => ls as c_ulonglong, + Err(_e) => { + ptr::swap(error_out, &mut error as *mut c_int); + 0 + }, + }, None => 0, } } diff --git a/base_layer/chat_ffi/src/conversationalists.rs b/base_layer/chat_ffi/src/conversationalists.rs index 239d5a0b75..c012956abb 100644 --- a/base_layer/chat_ffi/src/conversationalists.rs +++ b/base_layer/chat_ffi/src/conversationalists.rs @@ -77,7 +77,7 @@ pub unsafe extern "C" fn get_conversationalists( /// `error_out` - Pointer to an int which will be modified /// /// ## Returns -/// `c_int` - The length of the vector. May return -1 if something goes wrong +/// `c_uint` - The length of the vector. May return 0 if something goes wrong /// /// ## Safety /// `conversationalists` should be destroyed eventually @@ -85,18 +85,25 @@ pub unsafe extern "C" fn get_conversationalists( pub unsafe extern "C" fn conversationalists_vector_len( conversationalists: *mut ConversationalistsVector, error_out: *mut c_int, -) -> c_int { +) -> c_uint { let mut error = 0; ptr::swap(error_out, &mut error as *mut c_int); if conversationalists.is_null() { error = LibChatError::from(InterfaceError::NullError("conversationalists".to_string())).code; ptr::swap(error_out, &mut error as *mut c_int); - return -1; + return 0; } let conversationalists = &(*conversationalists); - c_int::try_from(conversationalists.0.len()).unwrap_or(-1) + match c_uint::try_from(conversationalists.0.len()) { + Ok(l) => l, + Err(e) => { + error = LibChatError::from(InterfaceError::ConversionError(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + 0 + }, + } } /// Reads the ConversationalistsVector and returns a pointer to a TariAddress at a given position diff --git a/base_layer/chat_ffi/src/error.rs b/base_layer/chat_ffi/src/error.rs index 48008c4885..69d46ce71b 100644 --- a/base_layer/chat_ffi/src/error.rs +++ b/base_layer/chat_ffi/src/error.rs @@ -36,6 +36,8 @@ pub enum InterfaceError { AllocationError, #[error("An error because the supplied position was out of range")] PositionInvalidError, + #[error("Conversion error: `{0}`")] + ConversionError(String), #[error("The client had an error communication with contact services")] ContactServiceError(String), } @@ -76,6 +78,10 @@ impl From for LibChatError { code: 8, message: format!("{:?}", v), }, + InterfaceError::ConversionError(_) => Self { + code: 9, + message: format!("{:?}", v), + }, } } } diff --git a/base_layer/chat_ffi/src/message.rs b/base_layer/chat_ffi/src/message.rs index c4b6f654bf..9be70ab11b 100644 --- a/base_layer/chat_ffi/src/message.rs +++ b/base_layer/chat_ffi/src/message.rs @@ -22,7 +22,7 @@ use std::{convert::TryFrom, ffi::CStr, ptr}; -use libc::{c_char, c_int, c_longlong, c_uint, c_ulonglong}; +use libc::{c_char, c_int, c_uchar, c_uint, c_ulonglong}; use tari_chat_client::ChatClient as ChatClientTrait; use tari_common_types::tari_address::TariAddress; use tari_contacts::contacts_service::types::{Message, MessageBuilder, MessageMetadata}; @@ -183,25 +183,30 @@ pub unsafe extern "C" fn chat_metadata_get_at( /// `error_out` - Pointer to an int which will be modified /// /// ## Returns -/// `c_longlong` - The length of the metadata vector for a Message. May return -1 if something goes wrong +/// `c_uint` - The length of the metadata vector for a Message. May return 0 if something goes wrong /// /// ## Safety /// `message` should be destroyed eventually #[no_mangle] -pub unsafe extern "C" fn chat_message_metadata_len(message: *mut Message, error_out: *mut c_int) -> c_longlong { +pub unsafe extern "C" fn chat_message_metadata_len(message: *mut Message, error_out: *mut c_int) -> c_uint { let mut error = 0; ptr::swap(error_out, &mut error as *mut c_int); if message.is_null() { error = LibChatError::from(InterfaceError::NullError("message".to_string())).code; ptr::swap(error_out, &mut error as *mut c_int); - return -1; + return 0; } let message = &(*message); - #[allow(clippy::cast_possible_wrap)] - let res = message.metadata.len() as i64; - res + match c_uint::try_from(message.metadata.len()) { + Ok(l) => l, + Err(e) => { + error = LibChatError::from(InterfaceError::ConversionError(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + 0 + }, + } } /// Returns a pointer to a ChatByteVector representing the data of the Message @@ -268,31 +273,38 @@ pub unsafe extern "C" fn read_chat_message_address(message: *mut Message, error_ Box::into_raw(Box::new(address)) } -/// Returns a c_int representation of the Direction enum +/// Returns a c_uchar representation of the Direction enum /// /// ## Arguments /// `message` - A pointer to a Message /// `error_out` - Pointer to an int which will be modified /// /// ## Returns -/// `c_int` - A c_int rep of the direction enum. May return -1 if anything goes wrong +/// `c_uchar` - A c_uchar rep of the direction enum. May return 0 if anything goes wrong /// 0 => Inbound /// 1 => Outbound /// /// ## Safety /// `message` should be destroyed eventually #[no_mangle] -pub unsafe extern "C" fn read_chat_message_direction(message: *mut Message, error_out: *mut c_int) -> c_int { +pub unsafe extern "C" fn read_chat_message_direction(message: *mut Message, error_out: *mut c_int) -> c_uchar { let mut error = 0; ptr::swap(error_out, &mut error as *mut c_int); if message.is_null() { error = LibChatError::from(InterfaceError::NullError("message".to_string())).code; ptr::swap(error_out, &mut error as *mut c_int); - return -1; + return 0; } - c_int::from((*message).direction.as_byte()) + match c_uchar::try_from((*message).direction.as_byte()) { + Ok(d) => d, + Err(e) => { + error = LibChatError::from(InterfaceError::ConversionError(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + 0 + }, + } } /// Returns a c_ulonglong representation of the stored at timestamp as seconds since epoch diff --git a/base_layer/chat_ffi/src/message_metadata.rs b/base_layer/chat_ffi/src/message_metadata.rs index a3e06492c6..c93d4a6398 100644 --- a/base_layer/chat_ffi/src/message_metadata.rs +++ b/base_layer/chat_ffi/src/message_metadata.rs @@ -22,12 +22,12 @@ use std::{convert::TryFrom, ptr}; -use libc::{c_int, c_uchar, c_uint}; -use tari_contacts::contacts_service::types::{Message, MessageMetadata, MessageMetadataType}; +use libc::{c_int, c_uint}; +use tari_contacts::contacts_service::types::{Message, MessageMetadata}; use tari_utilities::ByteArray; use crate::{ - byte_vector::{chat_byte_vector_create, chat_byte_vector_get_at, chat_byte_vector_get_length, ChatByteVector}, + byte_vector::{chat_byte_vector_create, process_vector, ChatByteVector}, error::{InterfaceError, LibChatError}, }; @@ -35,9 +35,7 @@ use crate::{ /// /// ## Arguments /// `message` - A pointer to a message -/// `metadata_type` - An c_uchar that maps to MessageMetadataType enum -/// '0' -> Reply -/// '1' -> TokenRequest +/// `key` - A pointer to a byte vector containing bytes for the key field /// `data` - A pointer to a byte vector containing bytes for the data field /// `error_out` - Pointer to an int which will be modified /// @@ -49,49 +47,26 @@ use crate::{ #[no_mangle] pub unsafe extern "C" fn add_chat_message_metadata( message: *mut Message, - metadata_type: c_uchar, + key: *mut ChatByteVector, data: *mut ChatByteVector, error_out: *mut c_int, ) { let mut error = 0; ptr::swap(error_out, &mut error as *mut c_int); - if message.is_null() { - error = LibChatError::from(InterfaceError::NullError("message".to_string())).code; - ptr::swap(error_out, &mut error as *mut c_int); - return; - } - - let metadata_type = match MessageMetadataType::from_byte(metadata_type) { - Some(t) => t, - None => { - error = LibChatError::from(InterfaceError::InvalidArgument( - "Couldn't convert byte to Metadata type".to_string(), - )) - .code; + for (name, d) in [("key", key), ("data", data)] { + if d.is_null() { + error = LibChatError::from(InterfaceError::NullError(name.to_string())).code; ptr::swap(error_out, &mut error as *mut c_int); return; - }, - }; - - if data.is_null() { - error = LibChatError::from(InterfaceError::NullError("data".to_string())).code; - ptr::swap(error_out, &mut error as *mut c_int); - return; - } - - let chat_byte_vector_length = chat_byte_vector_get_length(data, error_out); - let mut bytes: Vec = Vec::new(); - for c in 0..chat_byte_vector_length { - let byte = chat_byte_vector_get_at(data, c as c_uint, error_out); - assert_eq!(error, 0); - bytes.push(byte); + } } let metadata = MessageMetadata { - metadata_type, - data: bytes, + key: process_vector(key, error_out), + data: process_vector(data, error_out), }; + (*message).push(metadata); } @@ -102,25 +77,37 @@ pub unsafe extern "C" fn add_chat_message_metadata( /// `error_out` - Pointer to an int which will be modified /// /// ## Returns -/// `c_int` - An int8 that maps to MessageMetadataType enum. May return -1 if something goes wrong -/// '0' -> Reply -/// '1' -> TokenRequest +/// `*mut ChatByteVector` - A ptr to a ChatByteVector /// /// ## Safety /// `msg_metadata` should be destroyed eventually +/// the returned `ChatByteVector` should be destroyed eventually #[no_mangle] -pub unsafe extern "C" fn read_chat_metadata_type(msg_metadata: *mut MessageMetadata, error_out: *mut c_int) -> c_int { +pub unsafe extern "C" fn read_chat_metadata_key( + msg_metadata: *mut MessageMetadata, + error_out: *mut c_int, +) -> *mut ChatByteVector { let mut error = 0; ptr::swap(error_out, &mut error as *mut c_int); if msg_metadata.is_null() { error = LibChatError::from(InterfaceError::NullError("message".to_string())).code; ptr::swap(error_out, &mut error as *mut c_int); - return -1; + return ptr::null_mut(); } - let md = &(*msg_metadata); - c_int::from(md.metadata_type.as_byte()) + let data = (*msg_metadata).key.clone(); + let data_bytes = data.as_bytes(); + let len = match c_uint::try_from(data_bytes.len()) { + Ok(num) => num, + Err(_e) => { + error = LibChatError::from(InterfaceError::PositionInvalidError).code; + ptr::swap(error_out, &mut error as *mut c_int); + return ptr::null_mut(); + }, + }; + + chat_byte_vector_create(data_bytes.as_ptr(), len, error_out) } /// Returns a ptr to a ByteVector @@ -190,7 +177,12 @@ mod test { use super::*; use crate::{ - byte_vector::{chat_byte_vector_create, chat_byte_vector_destroy}, + byte_vector::{ + chat_byte_vector_create, + chat_byte_vector_destroy, + chat_byte_vector_get_at, + chat_byte_vector_get_length, + }, message::{chat_metadata_get_at, destroy_chat_message}, }; @@ -201,10 +193,15 @@ mod test { let data = "hello".to_string(); let data_bytes = data.as_bytes(); - let len = u32::try_from(data.len()).expect("Can't cast from usize"); - let data = unsafe { chat_byte_vector_create(data_bytes.as_ptr(), len as c_uint, error_out) }; + let data_len = u32::try_from(data.len()).expect("Can't cast from usize"); + let data = unsafe { chat_byte_vector_create(data_bytes.as_ptr(), data_len as c_uint, error_out) }; - unsafe { add_chat_message_metadata(message_ptr, 0, data, error_out) } + let key = "gif".to_string(); + let key_bytes = key.as_bytes(); + let key_len = u32::try_from(key.len()).expect("Can't cast from usize"); + let key = unsafe { chat_byte_vector_create(key_bytes.as_ptr(), key_len as c_uint, error_out) }; + + unsafe { add_chat_message_metadata(message_ptr, key, data, error_out) } let message = unsafe { Box::from_raw(message_ptr) }; assert_eq!(message.metadata.len(), 1); @@ -232,27 +229,41 @@ mod test { let data_bytes = data.as_bytes(); let len = u32::try_from(data.len()).expect("Can't cast from usize"); let data = chat_byte_vector_create(data_bytes.as_ptr(), len as c_uint, error_out); - let md_type = 0; - add_chat_message_metadata(message_ptr, md_type, data, error_out); + let key = "gif".to_string(); + let key_bytes = key.as_bytes(); + let len = u32::try_from(key.len()).expect("Can't cast from usize"); + let key = chat_byte_vector_create(key_bytes.as_ptr(), len as c_uint, error_out); + + add_chat_message_metadata(message_ptr, key, data, error_out); let metadata_ptr = chat_metadata_get_at(message_ptr, 0, error_out); - let metadata_type = read_chat_metadata_type(metadata_ptr, error_out); - let metadata_byte_vector = read_chat_metadata_data(metadata_ptr, error_out); + let metadata_key_vector = read_chat_metadata_key(metadata_ptr, error_out); + let metadata_key_vector_len = chat_byte_vector_get_length(metadata_key_vector, error_out); + + let mut metadata_key = vec![]; + + for i in 0..metadata_key_vector_len { + metadata_key.push(chat_byte_vector_get_at(metadata_key_vector, i, error_out)); + } + + let metadata_data_vector = read_chat_metadata_data(metadata_ptr, error_out); + let metadata_data_vector_len = chat_byte_vector_get_length(metadata_data_vector, error_out); let mut metadata_data = vec![]; - for i in 0..len { - metadata_data.push(chat_byte_vector_get_at(metadata_byte_vector, i, error_out)); + for i in 0..metadata_data_vector_len { + metadata_data.push(chat_byte_vector_get_at(metadata_data_vector, i, error_out)); } - assert_eq!(metadata_type, i32::from(md_type)); + assert_eq!(metadata_key, key_bytes); assert_eq!(metadata_data, data_bytes); destroy_chat_message_metadata(metadata_ptr); destroy_chat_message(message_ptr); - chat_byte_vector_destroy(metadata_byte_vector); + chat_byte_vector_destroy(metadata_key_vector); + chat_byte_vector_destroy(metadata_data_vector); drop(Box::from_raw(error_out)); } } diff --git a/base_layer/chat_ffi/src/messages.rs b/base_layer/chat_ffi/src/messages.rs index 88fe872179..1e8a87b6d4 100644 --- a/base_layer/chat_ffi/src/messages.rs +++ b/base_layer/chat_ffi/src/messages.rs @@ -25,10 +25,7 @@ use std::{convert::TryFrom, ptr}; use libc::{c_int, c_uint}; use tari_chat_client::ChatClient as ChatClientTrait; use tari_common_types::tari_address::TariAddress; -use tari_contacts::contacts_service::{ - handle::{DEFAULT_MESSAGE_LIMIT, DEFAULT_MESSAGE_PAGE}, - types::Message, -}; +use tari_contacts::contacts_service::types::Message; use crate::{ error::{InterfaceError, LibChatError}, @@ -58,8 +55,8 @@ pub struct MessageVector(pub Vec); pub unsafe extern "C" fn get_chat_messages( client: *mut ChatClient, address: *mut TariAddress, - limit: c_int, - page: c_int, + limit: c_uint, + page: c_uint, error_out: *mut c_int, ) -> *mut MessageVector { let mut error = 0; @@ -75,8 +72,8 @@ pub unsafe extern "C" fn get_chat_messages( ptr::swap(error_out, &mut error as *mut c_int); } - let mlimit = u64::try_from(limit).unwrap_or(DEFAULT_MESSAGE_LIMIT); - let mpage = u64::try_from(page).unwrap_or(DEFAULT_MESSAGE_PAGE); + let mlimit = u64::from(limit); + let mpage = u64::from(page); let result = (*client) .runtime @@ -99,23 +96,30 @@ pub unsafe extern "C" fn get_chat_messages( /// `error_out` - Pointer to an int which will be modified /// /// ## Returns -/// `c_int` - The length of the metadata vector for a Message. May return -1 if something goes wrong +/// `c_uint` - The length of the metadata vector for a Message. May return 0 if something goes wrong /// /// ## Safety /// `messages` should be destroyed eventually #[no_mangle] -pub unsafe extern "C" fn message_vector_len(messages: *mut MessageVector, error_out: *mut c_int) -> c_int { +pub unsafe extern "C" fn message_vector_len(messages: *mut MessageVector, error_out: *mut c_int) -> c_uint { let mut error = 0; ptr::swap(error_out, &mut error as *mut c_int); if messages.is_null() { error = LibChatError::from(InterfaceError::NullError("message".to_string())).code; ptr::swap(error_out, &mut error as *mut c_int); - return -1; + return 0; } let messages = &(*messages); - c_int::try_from(messages.0.len()).unwrap_or(-1) + match c_uint::try_from(messages.0.len()) { + Ok(l) => l, + Err(e) => { + error = LibChatError::from(InterfaceError::ConversionError(e.to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + 0 + }, + } } /// Reads the MessageVector and returns a Message at a given position diff --git a/base_layer/common_types/src/tx_id.rs b/base_layer/common_types/src/tx_id.rs index 476de9e27d..c59d693b8e 100644 --- a/base_layer/common_types/src/tx_id.rs +++ b/base_layer/common_types/src/tx_id.rs @@ -66,7 +66,7 @@ impl Hash for TxId { impl PartialEq for TxId { fn eq(&self, other: &Self) -> bool { - self.0.eq(&other.0) + self.0 == other.0 } } diff --git a/base_layer/contacts/proto/message.proto b/base_layer/contacts/proto/message.proto index 6770cb65b7..92c3b7d7fc 100644 --- a/base_layer/contacts/proto/message.proto +++ b/base_layer/contacts/proto/message.proto @@ -18,14 +18,10 @@ enum DirectionEnum { } message MessageMetadata { - MessageTypeEnum metadata_type = 1; + bytes key = 1; bytes data = 2; } -enum MessageTypeEnum { - TokenRequest = 0; -} - message Confirmation { bytes message_id = 1; uint64 timestamp = 2; diff --git a/base_layer/contacts/src/chat_client/Cargo.toml b/base_layer/contacts/src/chat_client/Cargo.toml index 2ffdc72137..d9fd8bd99e 100644 --- a/base_layer/contacts/src/chat_client/Cargo.toml +++ b/base_layer/contacts/src/chat_client/Cargo.toml @@ -22,7 +22,7 @@ tari_storage = { path = "../../../../infrastructure/storage" } anyhow = "1.0.41" async-trait = "0.1.52" -config = { version = "0.13.0" } +config = { version = "0.14.0" } diesel = { version = "2.0.3", features = ["sqlite", "r2d2", "serde_json", "chrono", "64-column-tables"] } lmdb-zero = "0.4.4" log = "0.4.17" diff --git a/base_layer/contacts/src/chat_client/src/client.rs b/base_layer/contacts/src/chat_client/src/client.rs index 0e33079075..801f318fc7 100644 --- a/base_layer/contacts/src/chat_client/src/client.rs +++ b/base_layer/contacts/src/chat_client/src/client.rs @@ -34,7 +34,7 @@ use tari_comms::{peer_manager::PeerFeatures, CommsNode, NodeIdentity}; use tari_contacts::contacts_service::{ handle::ContactsServiceHandle, service::ContactOnlineStatus, - types::{Message, MessageBuilder, MessageMetadata, MessageMetadataType}, + types::{Message, MessageBuilder, MessageMetadata}, }; use tari_shutdown::Shutdown; @@ -45,7 +45,7 @@ const LOG_TARGET: &str = "contacts::chat_client"; #[async_trait] pub trait ChatClient { async fn add_contact(&self, address: &TariAddress) -> Result<(), Error>; - fn add_metadata(&self, message: Message, metadata_type: MessageMetadataType, data: String) -> Message; + fn add_metadata(&self, message: Message, metadata_type: String, data: String) -> Message; async fn check_online_status(&self, address: &TariAddress) -> Result; fn create_message(&self, receiver: &TariAddress, message: String) -> Message; async fn get_messages(&self, sender: &TariAddress, limit: u64, page: u64) -> Result, Error>; @@ -198,9 +198,9 @@ impl ChatClient for Client { MessageBuilder::new().address(receiver.clone()).message(message).build() } - fn add_metadata(&self, mut message: Message, metadata_type: MessageMetadataType, data: String) -> Message { + fn add_metadata(&self, mut message: Message, key: String, data: String) -> Message { let metadata = MessageMetadata { - metadata_type, + key: key.into_bytes(), data: data.into_bytes(), }; diff --git a/base_layer/contacts/src/contacts_service/types/message.rs b/base_layer/contacts/src/contacts_service/types/message.rs index 9dac4657ad..d259f8da78 100644 --- a/base_layer/contacts/src/contacts_service/types/message.rs +++ b/base_layer/contacts/src/contacts_service/types/message.rs @@ -70,38 +70,17 @@ impl Direction { #[derive(Clone, Debug, Default, Deserialize, Serialize)] pub struct MessageMetadata { - pub metadata_type: MessageMetadataType, + pub key: Vec, pub data: Vec, } -#[repr(u8)] -#[derive(FromPrimitive, Debug, Copy, Clone, Default, Deserialize, Serialize, PartialEq)] -pub enum MessageMetadataType { - Reply = 0, - #[default] - TokenRequest = 1, -} - -impl MessageMetadataType { - pub fn as_byte(self) -> u8 { - self as u8 - } - - pub fn from_byte(value: u8) -> Option { - FromPrimitive::from_u8(value) - } -} - impl TryFrom for Message { type Error = String; fn try_from(message: proto::Message) -> Result { let mut metadata = vec![]; for m in message.metadata { - match MessageMetadata::try_from(m) { - Ok(md) => metadata.push(md), - Err(e) => return Err(e), - } + metadata.push(m.into()); } Ok(Self { @@ -138,19 +117,11 @@ impl From for OutboundDomainMessage { } } -impl TryFrom for MessageMetadata { - type Error = String; - - fn try_from(md: proto::MessageMetadata) -> Result { - if let Some(md_type) = - MessageMetadataType::from_byte(u8::try_from(md.metadata_type).map_err(|e| e.to_string())?) - { - Ok(Self { - data: md.data, - metadata_type: md_type, - }) - } else { - Err("Not a valid metadata type".into()) +impl From for MessageMetadata { + fn from(md: proto::MessageMetadata) -> Self { + Self { + data: md.data, + key: md.key, } } } @@ -159,7 +130,7 @@ impl From for proto::MessageMetadata { fn from(md: MessageMetadata) -> Self { Self { data: md.data, - metadata_type: i32::from(md.metadata_type.as_byte()), + key: md.key, } } } diff --git a/base_layer/contacts/src/contacts_service/types/mod.rs b/base_layer/contacts/src/contacts_service/types/mod.rs index 16fdb38a46..3faa818886 100644 --- a/base_layer/contacts/src/contacts_service/types/mod.rs +++ b/base_layer/contacts/src/contacts_service/types/mod.rs @@ -24,7 +24,7 @@ mod contact; pub use contact::Contact; mod message; -pub use message::{Direction, Message, MessageMetadata, MessageMetadataType}; +pub use message::{Direction, Message, MessageMetadata}; mod message_builder; pub use message_builder::MessageBuilder; diff --git a/base_layer/core/Cargo.toml b/base_layer/core/Cargo.toml index 62e1862a1e..40d1f2a57c 100644 --- a/base_layer/core/Cargo.toml +++ b/base_layer/core/Cargo.toml @@ -103,7 +103,7 @@ tari_test_utils = { path = "../../infrastructure/test_utils" } curve25519-dalek = { package = "tari-curve25519-dalek", version = "4.0.3" } # SQLite required for the integration tests libsqlite3-sys = { version = "0.25.1", features = ["bundled"] } -config = { version = "0.13.0" } +config = { version = "0.14.0" } env_logger = "0.7.0" tempfile = "3.1.0" toml = { version = "0.5" } diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index c039f120ea..da100c5e16 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -344,6 +344,7 @@ where B: BlockchainBackend + 'static "A peer has requested a block with hash {}", block_hex ); + #[allow(clippy::blocks_in_conditions)] let maybe_block = match self .blockchain_db .fetch_block_by_hash(hash, true) diff --git a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs index 308045ad07..796337cffa 100644 --- a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs @@ -321,7 +321,8 @@ impl<'a, B: BlockchainBackend + 'static> BlockSynchronizer<'a, B> { let validator = self.block_validator.clone(); let res = task::spawn_blocking(move || { let txn = db.db_read_access()?; - validator.validate_body(&*txn, &task_block) + let smt = db.smt().clone(); + validator.validate_body(&*txn, &task_block, smt) }) .await?; @@ -367,7 +368,7 @@ impl<'a, B: BlockchainBackend + 'static> BlockSynchronizer<'a, B> { self.db .write_transaction() .delete_orphan(header_hash) - .insert_tip_block_body(block.clone()) + .insert_tip_block_body(block.clone(), self.db.inner().smt()) .set_best_block( block.height(), header_hash, diff --git a/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs index bdc4cb019c..6e12642639 100644 --- a/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs @@ -298,11 +298,14 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { debug!(target: LOG_TARGET, "Synchronizing kernels"); self.synchronize_kernels(sync_peer.clone(), client, to_header).await?; debug!(target: LOG_TARGET, "Synchronizing outputs"); + let cloned_backup_smt = self.db.inner().smt_read_access()?.clone(); match self.synchronize_outputs(sync_peer, client, to_header).await { Ok(_) => Ok(()), Err(err) => { // We need to clean up the outputs let _ = self.clean_up_failed_output_sync(to_header).await; + let mut smt = self.db.inner().smt_write_access()?; + *smt = cloned_backup_smt; Err(err) }, } @@ -618,7 +621,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let mut utxo_counter = 0u64; let mut stxo_counter = 0u64; let timer = Instant::now(); - let mut output_smt = db.fetch_tip_smt().await?; + let mut output_smt = (*db.inner().smt_write_access()?).clone(); let mut last_sync_timer = Instant::now(); let mut avg_latency = RollingAverageTime::new(20); @@ -766,8 +769,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { txn.commit().await?; } } - // This has a very low probability of failure - db.set_tip_smt(output_smt).await?; + let mut writing_lock_output_smt = db.inner().smt_write_access()?; + *writing_lock_output_smt = output_smt; debug!( target: LOG_TARGET, "Finished syncing TXOs: {} unspent and {} spent downloaded in {:.2?}", diff --git a/base_layer/core/src/base_node/sync/rpc/service.rs b/base_layer/core/src/base_node/sync/rpc/service.rs index 1c35d079c4..81db956111 100644 --- a/base_layer/core/src/base_node/sync/rpc/service.rs +++ b/base_layer/core/src/base_node/sync/rpc/service.rs @@ -110,6 +110,7 @@ impl BaseNodeSyncRpcService { #[tari_comms::async_trait] impl BaseNodeSyncService for BaseNodeSyncRpcService { #[instrument(level = "trace", name = "sync_rpc::sync_blocks", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn sync_blocks( &self, request: Request, @@ -273,6 +274,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } #[instrument(level = "trace", name = "sync_rpc::sync_headers", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn sync_headers( &self, request: Request, @@ -373,6 +375,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } #[instrument(level = "trace", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn get_header_by_height( &self, request: Request, @@ -389,6 +392,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } #[instrument(level = "debug", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn find_chain_split( &self, request: Request, @@ -452,6 +456,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } #[instrument(level = "trace", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn get_chain_metadata(&self, _: Request<()>) -> Result, RpcStatus> { let chain_metadata = self .db() @@ -462,6 +467,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } #[instrument(level = "trace", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn sync_kernels( &self, request: Request, @@ -588,6 +594,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } #[instrument(level = "trace", skip(self), err)] + #[allow(clippy::blocks_in_conditions)] async fn sync_utxos(&self, request: Request) -> Result, RpcStatus> { let req = request.message(); let peer_node_id = request.context().peer_node_id(); diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 219981f5eb..96721141ad 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -19,7 +19,12 @@ // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{mem, ops::RangeBounds, sync::Arc, time::Instant}; +use std::{ + mem, + ops::RangeBounds, + sync::{Arc, RwLock}, + time::Instant, +}; use log::*; use primitive_types::U256; @@ -222,10 +227,6 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_tip_header() -> ChainHeader, "fetch_tip_header"); - make_async_fn!(fetch_tip_smt() -> OutputSmt, "fetch_tip_smt"); - - make_async_fn!(set_tip_smt(smt: OutputSmt) -> (), "set_tip_smt"); - make_async_fn!(insert_valid_headers(headers: Vec) -> (), "insert_valid_headers"); //---------------------------------- Block --------------------------------------------// @@ -395,8 +396,8 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { self } - pub fn insert_tip_block_body(&mut self, block: Arc) -> &mut Self { - self.transaction.insert_tip_block_body(block); + pub fn insert_tip_block_body(&mut self, block: Arc, smt: Arc>) -> &mut Self { + self.transaction.insert_tip_block_body(block, smt); self } diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index 8944761aec..87542c49ec 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -91,7 +91,7 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_outputs_in_block_with_spend_state( &self, header_hash: &HashOutput, - spend_status_at_header: Option, + spend_status_at_header: Option<&HashOutput>, ) -> Result, ChainStorageError>; /// Fetch a specific output. Returns the output @@ -192,6 +192,6 @@ pub trait BlockchainBackend: Send + Sync { start_height: u64, end_height: u64, ) -> Result, ChainStorageError>; - /// Returns the tip utxo smt - fn fetch_tip_smt(&self) -> Result; + /// Calculates the tip utxo smt + fn calculate_tip_smt(&self) -> Result; } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 62bd2aadbb..4b987c1604 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -209,6 +209,7 @@ pub struct BlockchainDatabase { consensus_manager: ConsensusManager, difficulty_calculator: Arc, disable_add_block_flag: Arc, + smt: Arc>, } #[allow(clippy::ptr_arg)] @@ -222,6 +223,7 @@ where B: BlockchainBackend validators: Validators, config: BlockchainDatabaseConfig, difficulty_calculator: DifficultyCalculator, + smt: Arc>, ) -> Result { debug!(target: LOG_TARGET, "BlockchainDatabase config: {:?}", config); let is_empty = db.is_empty()?; @@ -232,6 +234,7 @@ where B: BlockchainBackend consensus_manager, difficulty_calculator: Arc::new(difficulty_calculator), disable_add_block_flag: Arc::new(AtomicBool::new(false)), + smt, }; let genesis_block = Arc::new(blockchain_db.consensus_manager.get_genesis_block()); if is_empty { @@ -241,8 +244,6 @@ where B: BlockchainBackend genesis_block.block().body.to_counts_string() ); let mut txn = DbTransaction::new(); - let smt = OutputSmt::new(); - txn.insert_tip_smt(smt); blockchain_db.write(txn)?; txn = DbTransaction::new(); blockchain_db.insert_block(genesis_block.clone())?; @@ -272,7 +273,9 @@ where B: BlockchainBackend .into(), )); } else { - // block has been added + // lets load the smt into memory + let mut smt = blockchain_db.smt_write_access()?; + *smt = blockchain_db.db_write_access()?.calculate_tip_smt()?; } if config.cleanup_orphans_at_startup { match blockchain_db.cleanup_all_orphans() { @@ -328,6 +331,30 @@ where B: BlockchainBackend }) } + pub fn smt_write_access(&self) -> Result, ChainStorageError> { + self.smt.write().map_err(|e| { + error!( + target: LOG_TARGET, + "An attempt to get a write lock on the smt failed. {:?}", e + ); + ChainStorageError::AccessError("write lock on smt".into()) + }) + } + + pub fn smt(&self) -> Arc> { + self.smt.clone() + } + + pub fn smt_read_access(&self) -> Result, ChainStorageError> { + self.smt.read().map_err(|e| { + error!( + target: LOG_TARGET, + "An attempt to get a read lock on the smt failed. {:?}", e + ); + ChainStorageError::AccessError("read lock on smt".into()) + }) + } + #[cfg(test)] pub fn test_db_write_access(&self) -> Result, ChainStorageError> { self.db.write().map_err(|e| { @@ -414,8 +441,7 @@ where B: BlockchainBackend hashes: Vec, ) -> Result>, ChainStorageError> { let db = self.db_read_access()?; - let smt = db.fetch_tip_smt()?; - + let smt = self.smt_read_access()?; let mut result = Vec::with_capacity(hashes.len()); for hash in hashes { let output = db.fetch_output(&hash)?; @@ -476,7 +502,7 @@ where B: BlockchainBackend spend_status_at_header: Option, ) -> Result, ChainStorageError> { let db = self.db_read_access()?; - db.fetch_outputs_in_block_with_spend_state(&header_hash, spend_status_at_header) + db.fetch_outputs_in_block_with_spend_state(&header_hash, spend_status_at_header.as_ref()) } pub fn fetch_outputs_in_block(&self, header_hash: HashOutput) -> Result, ChainStorageError> { @@ -673,18 +699,6 @@ where B: BlockchainBackend db.fetch_tip_header() } - pub fn fetch_tip_smt(&self) -> Result { - let db = self.db_read_access()?; - db.fetch_tip_smt() - } - - pub fn set_tip_smt(&self, smt: OutputSmt) -> Result<(), ChainStorageError> { - let mut db = self.db_write_access()?; - let mut txn = DbTransaction::new(); - txn.insert_tip_smt(smt); - db.write(txn) - } - /// Fetches the last header that was added, might be past the tip, as the block body between this last header and /// actual tip might not have been added yet pub fn fetch_last_header(&self) -> Result { @@ -873,7 +887,15 @@ where B: BlockchainBackend .ok_or(ChainStorageError::UnexpectedResult("Timestamp overflowed".to_string()))?; } let mut block = Block { header, body }; - let roots = calculate_mmr_roots(&*db, self.rules(), &block)?; + let mut smt = self.smt_write_access()?; + let roots = match calculate_mmr_roots(&*db, self.rules(), &block, &mut smt) { + Ok(v) => v, + Err(e) => { + // some error happend, lets rewind the smt + *smt = db.calculate_tip_smt()?; + return Err(e); + }, + }; block.header.kernel_mr = roots.kernel_mr; block.header.kernel_mmr_size = roots.kernel_mmr_size; block.header.input_mr = roots.input_mr; @@ -892,7 +914,15 @@ where B: BlockchainBackend "calculate_mmr_roots expected a sorted block body, however the block body was not sorted".to_string(), )); }; - let mmr_roots = calculate_mmr_roots(&*db, self.rules(), &block)?; + let mut smt = self.smt_write_access()?; + let mmr_roots = match calculate_mmr_roots(&*db, self.rules(), &block, &mut smt) { + Ok(v) => v, + Err(e) => { + // some error happend, lets reset the smt to its starting state + *smt = db.calculate_tip_smt()?; + return Err(e); + }, + }; Ok((block, mmr_roots)) } @@ -999,6 +1029,7 @@ where B: BlockchainBackend &*self.validators.header, self.consensus_manager.chain_strength_comparer(), candidate_block, + self.smt(), )?; // If blocks were added and the node is in pruned mode, perform pruning @@ -1048,7 +1079,7 @@ where B: BlockchainBackend let mut db = self.db_write_access()?; let mut txn = DbTransaction::new(); - insert_best_block(&mut txn, block, &self.consensus_manager)?; + insert_best_block(&mut txn, block, &self.consensus_manager, self.smt())?; db.write(txn) } @@ -1179,8 +1210,9 @@ where B: BlockchainBackend /// The operation will fail if /// * The block height is in the future pub fn rewind_to_height(&self, height: u64) -> Result>, ChainStorageError> { + let smt = self.smt().clone(); let mut db = self.db_write_access()?; - rewind_to_height(&mut *db, height) + rewind_to_height(&mut *db, height, smt) } /// Rewind the blockchain state to the block hash making the block at that hash the new tip. @@ -1191,7 +1223,7 @@ where B: BlockchainBackend /// * The block hash is before the horizon block height determined by the pruning horizon pub fn rewind_to_hash(&self, hash: BlockHash) -> Result>, ChainStorageError> { let mut db = self.db_write_access()?; - rewind_to_hash(&mut *db, hash) + rewind_to_hash(&mut *db, hash, self.smt.clone()) } /// This method will compare all chain tips the node currently knows about. This includes @@ -1207,6 +1239,7 @@ where B: BlockchainBackend &*self.validators.block, self.consensus_manager.chain_strength_comparer(), &self.consensus_manager, + self.smt(), )?; Ok(()) } @@ -1312,6 +1345,8 @@ pub fn calculate_mmr_roots( db: &T, rules: &ConsensusManager, block: &Block, + // we dont want to clone the SMT, so we rather change it and change it back after we are done. + output_smt: &mut OutputSmt, ) -> Result { let header = &block.header; let body = &block.body; @@ -1336,17 +1371,18 @@ pub fn calculate_mmr_roots( })?; let mut kernel_mmr = PrunedKernelMmr::new(kernels); - let mut output_smt = db.fetch_tip_smt()?; let mut input_mmr = PrunedInputMmr::new(PrunedHashSet::default()); for kernel in body.kernels() { kernel_mmr.push(kernel.hash().to_vec())?; } + let mut outputs_to_remove = Vec::new(); for output in body.outputs() { if !output.is_burned() { let smt_key = NodeKey::try_from(output.commitment.as_bytes())?; let smt_node = ValueHash::try_from(output.smt_hash(header.height).as_slice())?; + outputs_to_remove.push(smt_key.clone()); if let Err(e) = output_smt.insert(smt_key, smt_node) { error!( target: LOG_TARGET, @@ -1358,12 +1394,12 @@ pub fn calculate_mmr_roots( } } + let mut outputs_to_add = Vec::new(); for input in body.inputs() { input_mmr.push(input.canonical_hash().to_vec())?; - let smt_key = NodeKey::try_from(input.commitment()?.as_bytes())?; match output_smt.delete(&smt_key)? { - DeleteResult::Deleted(_value_hash) => {}, + DeleteResult::Deleted(value_hash) => outputs_to_add.push((smt_key, value_hash)), DeleteResult::KeyNotFound => { error!( target: LOG_TARGET, @@ -1399,6 +1435,37 @@ pub fn calculate_mmr_roots( validator_node_mr, validator_node_size: validator_node_size as u64, }; + // We have made changes to the SMT that we dont want, sp lets rewind the SMT back to tip again as we want to have + // the SMT at tip. + for output in outputs_to_add { + if output_smt.insert(output.0.clone(), output.1).is_err() { + error!( + target: LOG_TARGET, + "Output commitment({}) already in SMT", + output.0, + ); + return Err(ChainStorageError::AccessError(format!( + "Could not add output ({}) in SMT", + output.0 + ))); + } + } + for output in outputs_to_remove { + match output_smt.delete(&output)? { + DeleteResult::Deleted(_value_hash) => {}, + DeleteResult::KeyNotFound => { + error!( + target: LOG_TARGET, + "Could not find input({}) in SMT when reseting back to tip", + output, + ); + return Err(ChainStorageError::AccessError(format!( + "Could not find input({}) in SMT when reseting back to tip", + output + ))); + }, + }; + } Ok(mmr_roots) } @@ -1534,6 +1601,7 @@ fn add_block( header_validator: &dyn HeaderChainLinkedValidator, chain_strength_comparer: &dyn ChainStrengthComparer, candidate_block: Arc, + smt: Arc>, ) -> Result { handle_possible_reorg( db, @@ -1543,6 +1611,7 @@ fn add_block( header_validator, chain_strength_comparer, candidate_block, + smt, ) } @@ -1551,6 +1620,7 @@ fn insert_best_block( txn: &mut DbTransaction, block: Arc, consensus: &ConsensusManager, + smt: Arc>, ) -> Result<(), ChainStorageError> { let block_hash = block.accumulated_data().hash; debug!( @@ -1574,7 +1644,7 @@ fn insert_best_block( let accumulated_difficulty = block.accumulated_data().total_accumulated_difficulty; let expected_prev_best_block = block.block().header.prev_hash; txn.insert_chain_header(block.to_chain_header()) - .insert_tip_block_body(block) + .insert_tip_block_body(block, smt) .set_best_block( height, block_hash, @@ -1750,6 +1820,7 @@ fn check_for_valid_height(db: &T, height: u64) -> Result<( fn rewind_to_height( db: &mut T, target_height: u64, + smt: Arc>, ) -> Result>, ChainStorageError> { let last_header = db.fetch_last_header()?; @@ -1819,7 +1890,7 @@ fn rewind_to_height( let block = fetch_block(db, last_block_height - h, false)?; let block = Arc::new(block.try_into_chain_block()?); let block_hash = *block.hash(); - txn.delete_tip_block(block_hash); + txn.delete_tip_block(block_hash, smt.clone()); txn.delete_header(last_block_height - h); if !prune_past_horizon && !db.contains(&DbKey::OrphanBlock(*block.hash()))? { // Because we know we will remove blocks we can't recover, this will be a destructive rewind, so we @@ -1876,7 +1947,7 @@ fn rewind_to_height( let header = fetch_header(db, last_block_height - h - steps_back)?; // Although we do not have this full block, this method will remove all remaining data that is linked to // the specific header hash - txn.delete_tip_block(header.hash()); + txn.delete_tip_block(header.hash(), smt.clone()); db.write(txn)?; } } @@ -1887,6 +1958,7 @@ fn rewind_to_height( fn rewind_to_hash( db: &mut T, block_hash: BlockHash, + smt: Arc>, ) -> Result>, ChainStorageError> { let block_hash_hex = block_hash.to_hex(); let target_header = fetch_header_by_block_hash(&*db, block_hash)?.ok_or(ChainStorageError::ValueNotFound { @@ -1894,7 +1966,7 @@ fn rewind_to_hash( field: "block_hash", value: block_hash_hex, })?; - rewind_to_height(db, target_header.height) + rewind_to_height(db, target_header.height, smt) } // Checks whether we should add the block as an orphan. If it is the case, the orphan block is added and the chain @@ -1907,13 +1979,21 @@ fn handle_possible_reorg( header_validator: &dyn HeaderChainLinkedValidator, chain_strength_comparer: &dyn ChainStrengthComparer, candidate_block: Arc, + smt: Arc>, ) -> Result { let timer = Instant::now(); let height = candidate_block.header.height; let hash = candidate_block.header.hash(); insert_orphan_and_find_new_tips(db, candidate_block, header_validator, consensus_manager)?; let after_orphans = timer.elapsed(); - let res = swap_to_highest_pow_chain(db, config, block_validator, chain_strength_comparer, consensus_manager); + let res = swap_to_highest_pow_chain( + db, + config, + block_validator, + chain_strength_comparer, + consensus_manager, + smt, + ); trace!( target: LOG_TARGET, "[handle_possible_reorg] block #{}, insert_orphans in {:.2?}, swap_to_highest in {:.2?} '{}'", @@ -1933,8 +2013,9 @@ fn reorganize_chain( fork_hash: HashOutput, new_chain_from_fork: &VecDeque>, consensus: &ConsensusManager, + smt: Arc>, ) -> Result>, ChainStorageError> { - let removed_blocks = rewind_to_hash(backend, fork_hash)?; + let removed_blocks = rewind_to_hash(backend, fork_hash, smt.clone())?; debug!( target: LOG_TARGET, "Validate and add {} chain block(s) from block {}. Rewound blocks: [{}]", @@ -1951,7 +2032,7 @@ fn reorganize_chain( let block_hash = *block.hash(); txn.delete_orphan(block_hash); let chain_metadata = backend.fetch_chain_metadata()?; - if let Err(e) = block_validator.validate_body_with_metadata(backend, block, &chain_metadata) { + if let Err(e) = block_validator.validate_body_with_metadata(backend, block, &chain_metadata, smt.clone()) { warn!( target: LOG_TARGET, "Orphan block {} ({}) failed validation during chain reorg: {:?}", @@ -1970,11 +2051,21 @@ fn reorganize_chain( backend.write(txn)?; info!(target: LOG_TARGET, "Restoring previous chain after failed reorg."); - restore_reorged_chain(backend, fork_hash, removed_blocks, consensus)?; + restore_reorged_chain(backend, fork_hash, removed_blocks, consensus, smt.clone())?; return Err(e.into()); } - insert_best_block(&mut txn, block.clone(), consensus)?; + if let Err(e) = insert_best_block(&mut txn, block.clone(), consensus, smt.clone()) { + let mut write_smt = smt.write().map_err(|e| { + error!( + target: LOG_TARGET, + "reorganize_chain could not get a write lock on the smt. {:?}", e + ); + ChainStorageError::AccessError("write lock on smt".into()) + })?; + *write_smt = backend.calculate_tip_smt()?; + return Err(e); + } // Failed to store the block - this should typically never happen unless there is a bug in the validator // (e.g. does not catch a double spend). In any case, we still need to restore the chain to a // good state before returning. @@ -1984,7 +2075,7 @@ fn reorganize_chain( "Failed to commit reorg chain: {:?}. Restoring last chain.", e ); - restore_reorged_chain(backend, fork_hash, removed_blocks, consensus)?; + restore_reorged_chain(backend, fork_hash, removed_blocks, consensus, smt)?; return Err(e); } } @@ -1998,12 +2089,13 @@ fn swap_to_highest_pow_chain( block_validator: &dyn CandidateBlockValidator, chain_strength_comparer: &dyn ChainStrengthComparer, consensus: &ConsensusManager, + smt: Arc>, ) -> Result { let metadata = db.fetch_chain_metadata()?; // lets clear out all remaining headers that dont have a matching block // rewind to height will first delete the headers, then try delete from blocks, if we call this to the current // height it will only trim the extra headers with no blocks - rewind_to_height(db, metadata.best_block_height())?; + rewind_to_height(db, metadata.best_block_height(), smt.clone())?; let strongest_orphan_tips = db.fetch_strongest_orphan_chain_tips()?; if strongest_orphan_tips.is_empty() { // we have no orphan chain tips, we have trimmed remaining headers, we are on the best tip we have, so lets @@ -2054,7 +2146,7 @@ fn swap_to_highest_pow_chain( .prev_hash; let num_added_blocks = reorg_chain.len(); - let removed_blocks = reorganize_chain(db, block_validator, fork_hash, &reorg_chain, consensus)?; + let removed_blocks = reorganize_chain(db, block_validator, fork_hash, &reorg_chain, consensus, smt)?; let num_removed_blocks = removed_blocks.len(); // reorg is required when any blocks are removed or more than one are added @@ -2107,8 +2199,9 @@ fn restore_reorged_chain( to_hash: HashOutput, previous_chain: Vec>, consensus: &ConsensusManager, + smt: Arc>, ) -> Result<(), ChainStorageError> { - let invalid_chain = rewind_to_hash(db, to_hash)?; + let invalid_chain = rewind_to_hash(db, to_hash, smt.clone())?; debug!( target: LOG_TARGET, "Removed {} blocks during chain restore: {:?}.", @@ -2122,7 +2215,7 @@ fn restore_reorged_chain( for block in previous_chain.into_iter().rev() { txn.delete_orphan(block.accumulated_data().hash); - insert_best_block(&mut txn, block, consensus)?; + insert_best_block(&mut txn, block, consensus, smt.clone())?; } db.write(txn)?; Ok(()) @@ -2519,6 +2612,7 @@ impl Clone for BlockchainDatabase { consensus_manager: self.consensus_manager.clone(), difficulty_calculator: self.difficulty_calculator.clone(), disable_add_block_flag: self.disable_add_block_flag.clone(), + smt: self.smt.clone(), } } } @@ -2618,7 +2712,7 @@ mod test { .try_into_chain_block() .map(Arc::new) .unwrap(); - let mut smt = db.fetch_tip_smt().unwrap(); + let mut smt = db.smt_read_access().unwrap().clone(); let (_, chain) = create_orphan_chain( &db, &[("A->GB", 1, 120), ("B->A", 1, 120), ("C->B", 1, 120)], @@ -2648,7 +2742,7 @@ mod test { // Create reorg chain // we only need a smt, this one will not be technically correct, but due to the use of mockvalidators(true), // they will pass all mr tests - let mut smt = db.fetch_tip_smt().unwrap(); + let mut smt = db.smt_read_access().unwrap().clone(); let fork_root = mainchain.get("B").unwrap().clone(); let (_, reorg_chain) = create_orphan_chain( &db, @@ -2694,7 +2788,7 @@ mod test { .try_into_chain_block() .map(Arc::new) .unwrap(); - let mut smt = db.fetch_tip_smt().unwrap(); + let mut smt = db.smt_read_access().unwrap().clone(); let (_, chain) = create_chained_blocks(&[("A->GB", 1u64, 120u64)], genesis_block, &mut smt).await; let block = chain.get("A").unwrap().clone(); let mut access = db.db_write_access().unwrap(); @@ -2712,7 +2806,7 @@ mod test { let (_, main_chain) = create_main_chain(&db, &[("A->GB", 1, 120), ("B->A", 1, 120)]).await; let block_b = main_chain.get("B").unwrap().clone(); - let mut smt = db.fetch_tip_smt().unwrap(); + let mut smt = db.smt_read_access().unwrap().clone(); let (_, orphan_chain) = create_chained_blocks( &[("C2->GB", 1, 120), ("D2->C2", 1, 120), ("E2->D2", 1, 120)], block_b, @@ -2740,7 +2834,7 @@ mod test { let (_, main_chain) = create_main_chain(&db, &[("A->GB", 1, 120)]).await; let fork_root = main_chain.get("A").unwrap().clone(); - let mut smt = db.fetch_tip_smt().unwrap(); + let mut smt = db.smt_read_access().unwrap().clone(); let (_, orphan_chain) = create_chained_blocks(&[("B2->GB", 1, 120)], fork_root, &mut smt).await; let mut access = db.db_write_access().unwrap(); @@ -2780,7 +2874,7 @@ mod test { let fork_root_1 = main_chain.get("A").unwrap().clone(); // we only need a smt, this one will not be technically correct, but due to the use of mockvalidators(true), // they will pass all mr tests - let mut smt = db.fetch_tip_smt().unwrap(); + let mut smt = db.smt_read_access().unwrap().clone(); let (_, orphan_chain_1) = create_chained_blocks( &[("B2->GB", 1, 120), ("C2->B2", 1, 120), ("D2->C2", 1, 120)], @@ -2867,7 +2961,7 @@ mod test { #[tokio::test] async fn it_links_many_orphan_branches_to_main_chain() { let test = TestHarness::setup(); - let mut smt = test.db.fetch_tip_smt().unwrap(); + let mut smt = test.db.smt_read_access().unwrap().clone(); let (_, main_chain) = create_main_chain(&test.db, block_specs!(["1a->GB"], ["2a->1a"], ["3a->2a"], ["4a->3a"])).await; let genesis = main_chain.get("GB").unwrap().clone(); @@ -2963,7 +3057,7 @@ mod test { let test = TestHarness::setup(); // This test assumes a MTC of 11 assert_eq!(test.consensus.consensus_constants(0).median_timestamp_count(), 11); - let mut smt = test.db.fetch_tip_smt().unwrap(); + let mut smt = test.db.smt_read_access().unwrap().clone(); let (_, main_chain) = create_main_chain( &test.db, block_specs!( @@ -3055,7 +3149,7 @@ mod test { #[tokio::test] async fn it_errors_if_reorging_to_an_invalid_height() { let test = TestHarness::setup(); - let mut smt = test.db.fetch_tip_smt().unwrap(); + let mut smt = test.db.smt_read_access().unwrap().clone(); let (_, main_chain) = create_main_chain(&test.db, block_specs!(["1a->GB"], ["2a->1a"], ["3a->2a"], ["4a->3a"])).await; @@ -3077,7 +3171,7 @@ mod test { #[tokio::test] async fn it_allows_orphan_blocks_with_any_height() { let test = TestHarness::setup(); - let mut smt = test.db.fetch_tip_smt().unwrap(); + let mut smt = test.db.smt_read_access().unwrap().clone(); let (_, main_chain) = create_main_chain( &test.db, block_specs!(["1a->GB", difficulty: Difficulty::from_u64(2).unwrap()]), @@ -3195,7 +3289,7 @@ mod test { #[tokio::test] async fn test_handle_possible_reorg_case6_orphan_chain_link() { let db = create_new_blockchain(); - let mut smt = db.fetch_tip_smt().unwrap(); + let mut smt = db.smt_read_access().unwrap().clone(); let (_, mainchain) = create_main_chain(&db, &[ ("A->GB", 1, 120), ("B->A", 1, 120), @@ -3221,6 +3315,7 @@ mod test { // Add true orphans let mut access = db.db_write_access().unwrap(); + let smt = db.smt().clone(); let result = handle_possible_reorg( &mut *access, &Default::default(), @@ -3229,11 +3324,13 @@ mod test { &mock_validator, &*chain_strength_comparer, reorg_chain.get("E2").unwrap().to_arc_block(), + smt, ) .unwrap(); result.assert_orphaned(); // Test adding a duplicate orphan + let smt = db.smt().clone(); let result = handle_possible_reorg( &mut *access, &Default::default(), @@ -3242,10 +3339,12 @@ mod test { &mock_validator, &*chain_strength_comparer, reorg_chain.get("E2").unwrap().to_arc_block(), + smt, ) .unwrap(); result.assert_orphaned(); + let smt = db.smt().clone(); let result = handle_possible_reorg( &mut *access, &Default::default(), @@ -3254,6 +3353,7 @@ mod test { &mock_validator, &*chain_strength_comparer, reorg_chain.get("D2").unwrap().to_arc_block(), + smt, ) .unwrap(); result.assert_orphaned(); @@ -3261,6 +3361,7 @@ mod test { let tip = access.fetch_last_header().unwrap(); assert_eq!(&tip, mainchain.get("D").unwrap().header()); + let smt = db.smt().clone(); let result = handle_possible_reorg( &mut *access, &Default::default(), @@ -3269,6 +3370,7 @@ mod test { &mock_validator, &*chain_strength_comparer, reorg_chain.get("C2").unwrap().to_arc_block(), + smt, ) .unwrap(); result.assert_reorg(3, 2); @@ -3293,13 +3395,14 @@ mod test { let chain_strength_comparer = strongest_chain().by_sha3x_difficulty().build(); // we only need a smt, this one will not be technically correct, but due to the use of mockvalidators(true), // they will pass all mr tests - let mut smt = db.fetch_tip_smt().unwrap(); + let mut smt = db.smt_read_access().unwrap().clone(); let fork_block = mainchain.get("C").unwrap().clone(); let (_, reorg_chain) = create_chained_blocks(&[("D2->GB", 1, 120), ("E2->D2", 2, 120)], fork_block, &mut smt).await; // Add true orphans let mut access = db.db_write_access().unwrap(); + let smt = db.smt().clone(); let result = handle_possible_reorg( &mut *access, &Default::default(), @@ -3308,10 +3411,12 @@ mod test { &mock_validator, &*chain_strength_comparer, reorg_chain.get("E2").unwrap().to_arc_block(), + smt, ) .unwrap(); result.assert_orphaned(); + let smt = db.smt().clone(); let _error = handle_possible_reorg( &mut *access, &Default::default(), @@ -3320,6 +3425,7 @@ mod test { &mock_validator, &*chain_strength_comparer, reorg_chain.get("D2").unwrap().to_arc_block(), + smt, ) .unwrap_err(); @@ -3553,6 +3659,7 @@ mod test { pub fn handle_possible_reorg(&self, block: Arc) -> Result { let mut access = self.db_write_access(); + let smt = self.db.smt().clone(); handle_possible_reorg( &mut *access, &self.config, @@ -3561,6 +3668,7 @@ mod test { &*self.header_validator, &*self.chain_strength_comparer, block, + smt, ) } } @@ -3577,8 +3685,10 @@ mod test { .try_into_chain_block() .map(Arc::new) .unwrap(); - let mut smt = test.db.fetch_tip_smt().unwrap(); - let (block_names, chain) = create_chained_blocks(blocks, genesis_block, &mut smt).await; + let (block_names, chain) = { + let mut smt = test.db.smt_read_access().unwrap().clone(); + create_chained_blocks(blocks, genesis_block, &mut smt).await + }; let mut results = vec![]; for name in block_names { diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index bb619f1ca5..2650bba36a 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -23,7 +23,7 @@ use std::{ fmt, fmt::{Display, Error, Formatter}, - sync::Arc, + sync::{Arc, RwLock}, }; use primitive_types::U256; @@ -81,8 +81,8 @@ impl DbTransaction { } /// Delete a block - pub fn delete_tip_block(&mut self, block_hash: HashOutput) -> &mut Self { - self.operations.push(WriteOperation::DeleteTipBlock(block_hash)); + pub fn delete_tip_block(&mut self, block_hash: HashOutput, smt: Arc>) -> &mut Self { + self.operations.push(WriteOperation::DeleteTipBlock(block_hash, smt)); self } @@ -171,8 +171,8 @@ impl DbTransaction { /// Add the BlockHeader and contents of a `Block` (i.e. inputs, outputs and kernels) to the database. /// If the `BlockHeader` already exists, then just the contents are updated along with the relevant accumulated /// data. - pub fn insert_tip_block_body(&mut self, block: Arc) -> &mut Self { - self.operations.push(WriteOperation::InsertTipBlockBody { block }); + pub fn insert_tip_block_body(&mut self, block: Arc, smt: Arc>) -> &mut Self { + self.operations.push(WriteOperation::InsertTipBlockBody { block, smt }); self } @@ -279,11 +279,6 @@ impl DbTransaction { self.operations.push(WriteOperation::ClearAllReorgs); self } - - pub fn insert_tip_smt(&mut self, smt: OutputSmt) -> &mut Self { - self.operations.push(WriteOperation::InsertTipSmt { smt }); - self - } } #[derive(Debug)] @@ -296,6 +291,7 @@ pub enum WriteOperation { }, InsertTipBlockBody { block: Arc, + smt: Arc>, }, InsertKernel { header_hash: HashOutput, @@ -315,7 +311,7 @@ pub enum WriteOperation { }, DeleteHeader(u64), DeleteOrphan(HashOutput), - DeleteTipBlock(HashOutput), + DeleteTipBlock(HashOutput, Arc>), DeleteOrphanChainTip(HashOutput), InsertOrphanChainTip(HashOutput, U256), InsertMoneroSeedHeight(Vec, u64), @@ -356,9 +352,6 @@ pub enum WriteOperation { reorg: Reorg, }, ClearAllReorgs, - InsertTipSmt { - smt: OutputSmt, - }, } impl fmt::Display for WriteOperation { @@ -375,7 +368,7 @@ impl fmt::Display for WriteOperation { InsertChainHeader { header } => { write!(f, "InsertChainHeader(#{} {})", header.height(), header.hash()) }, - InsertTipBlockBody { block } => write!( + InsertTipBlockBody { block, smt: _ } => write!( f, "InsertTipBlockBody({}, {})", block.accumulated_data().hash, @@ -408,7 +401,7 @@ impl fmt::Display for WriteOperation { InsertOrphanChainTip(hash, total_accumulated_difficulty) => { write!(f, "InsertOrphanChainTip({}, {})", hash, total_accumulated_difficulty) }, - DeleteTipBlock(hash) => write!(f, "DeleteTipBlock({})", hash), + DeleteTipBlock(hash, _) => write!(f, "DeleteTipBlock({})", hash), InsertMoneroSeedHeight(data, height) => { write!(f, "Insert Monero seed string {} for height: {}", data.to_hex(), height) }, @@ -454,13 +447,6 @@ impl fmt::Display for WriteOperation { SetHorizonData { .. } => write!(f, "Set horizon data"), InsertReorg { .. } => write!(f, "Insert reorg"), ClearAllReorgs => write!(f, "Clear all reorgs"), - InsertTipSmt { smt: output_smt } => { - write!( - f, - "Inserting sparse merkle tree with root: {}", - output_smt.unsafe_hash() - ) - }, } } } diff --git a/base_layer/core/src/chain_storage/error.rs b/base_layer/core/src/chain_storage/error.rs index f9551e336c..7261030149 100644 --- a/base_layer/core/src/chain_storage/error.rs +++ b/base_layer/core/src/chain_storage/error.rs @@ -118,7 +118,7 @@ pub enum ChainStorageError { #[error("Key {key} in {table_name} already exists")] KeyExists { table_name: &'static str, key: String }, #[error("Database resize required")] - DbResizeRequired, + DbResizeRequired(Option), #[error("DB transaction was too large ({0} operations)")] DbTransactionTooLarge(usize), #[error("DB needs to be resynced: {0}")] @@ -183,7 +183,7 @@ impl ChainStorageError { _err @ ChainStorageError::IoError(_) | _err @ ChainStorageError::CannotCalculateNonTipMmr(_) | _err @ ChainStorageError::KeyExists { .. } | - _err @ ChainStorageError::DbResizeRequired | + _err @ ChainStorageError::DbResizeRequired(_) | _err @ ChainStorageError::DbTransactionTooLarge(_) | _err @ ChainStorageError::DatabaseResyncRequired(_) | _err @ ChainStorageError::BlockError(_) | @@ -213,7 +213,7 @@ impl From for ChainStorageError { field: "", value: "".to_string(), }, - Code(error::MAP_FULL) => ChainStorageError::DbResizeRequired, + Code(error::MAP_FULL) => ChainStorageError::DbResizeRequired(None), _ => ChainStorageError::AccessError(err.to_string()), } } diff --git a/base_layer/core/src/chain_storage/lmdb_db/helpers.rs b/base_layer/core/src/chain_storage/lmdb_db/helpers.rs index 1bf6b2e60d..064b7be75b 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/helpers.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/helpers.rs @@ -20,23 +20,53 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use std::time::Instant; + use lmdb_zero::error; use log::*; use serde::{de::DeserializeOwned, Serialize}; +use tari_storage::lmdb_store::BYTES_PER_MB; use crate::chain_storage::ChainStorageError; pub const LOG_TARGET: &str = "c::cs::lmdb_db::lmdb"; -pub fn serialize(data: &T) -> Result, ChainStorageError> +/// Serialize the given data into a byte vector +/// Note: +/// `size_hint` is given as an option as checking what the serialized would be is expensive +/// for large data structures at ~30% overhead +pub fn serialize(data: &T, size_hint: Option) -> Result, ChainStorageError> where T: Serialize { - let size = bincode::serialized_size(&data).map_err(|e| ChainStorageError::AccessError(e.to_string()))?; - #[allow(clippy::cast_possible_truncation)] - let mut buf = Vec::with_capacity(size as usize); + let start = Instant::now(); + let mut buf = if let Some(size) = size_hint { + Vec::with_capacity(size) + } else { + let size = bincode::serialized_size(&data).map_err(|e| ChainStorageError::AccessError(e.to_string()))?; + #[allow(clippy::cast_possible_truncation)] + Vec::with_capacity(size as usize) + }; + let check_time = start.elapsed(); bincode::serialize_into(&mut buf, data).map_err(|e| { error!(target: LOG_TARGET, "Could not serialize lmdb: {:?}", e); ChainStorageError::AccessError(e.to_string()) })?; + if buf.len() >= BYTES_PER_MB { + let serialize_time = start.elapsed() - check_time; + trace!( + "lmdb_replace - {} MB, serialize check in {:.2?}, serialize in {:.2?}", + buf.len() / BYTES_PER_MB, + check_time, + serialize_time + ); + } + if let Some(size) = size_hint { + if buf.len() > size { + warn!( + target: LOG_TARGET, + "lmdb_replace - Serialized size hint was too small. Expected {}, got {}", size, buf.len() + ); + } + } Ok(buf) } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs index ce660df04a..2b104b7289 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::fmt::Debug; +use std::{fmt::Debug, time::Instant}; use lmdb_zero::{ del, @@ -37,6 +37,7 @@ use lmdb_zero::{ }; use log::*; use serde::{de::DeserializeOwned, Serialize}; +use tari_storage::lmdb_store::BYTES_PER_MB; use tari_utilities::hex::to_hex; use crate::chain_storage::{ @@ -62,7 +63,7 @@ where K: AsLmdbBytes + ?Sized + Debug, V: Serialize + Debug, { - let val_buf = serialize(val)?; + let val_buf = serialize(val, None)?; match txn.access().put(db, key, &val_buf, put::NOOVERWRITE) { Ok(_) => { trace!( @@ -86,7 +87,7 @@ where target: LOG_TARGET, "Could not insert {} bytes with key '{}' into '{}' ({:?})", val_buf.len(), to_hex(key.as_lmdb_bytes()), table_name, err ); - Err(ChainStorageError::DbResizeRequired) + Err(ChainStorageError::DbResizeRequired(Some(val_buf.len()))) }, Err(e) => { error!( @@ -112,11 +113,11 @@ where K: AsLmdbBytes + ?Sized, V: Serialize, { - let val_buf = serialize(val)?; + let val_buf = serialize(val, None)?; txn.access().put(db, key, &val_buf, put::Flags::empty()).map_err(|e| { if let lmdb_zero::Error::Code(code) = &e { if *code == lmdb_zero::error::MAP_FULL { - return ChainStorageError::DbResizeRequired; + return ChainStorageError::DbResizeRequired(Some(val_buf.len())); } } error!( @@ -128,16 +129,23 @@ where } /// Inserts or replaces the item at the given key. If the key does not exist, a new entry is created -pub fn lmdb_replace(txn: &WriteTransaction<'_>, db: &Database, key: &K, val: &V) -> Result<(), ChainStorageError> +pub fn lmdb_replace( + txn: &WriteTransaction<'_>, + db: &Database, + key: &K, + val: &V, + size_hint: Option, +) -> Result<(), ChainStorageError> where K: AsLmdbBytes + ?Sized, V: Serialize, { - let val_buf = serialize(val)?; - txn.access().put(db, key, &val_buf, put::Flags::empty()).map_err(|e| { + let val_buf = serialize(val, size_hint)?; + let start = Instant::now(); + let res = txn.access().put(db, key, &val_buf, put::Flags::empty()).map_err(|e| { if let lmdb_zero::Error::Code(code) = &e { if *code == lmdb_zero::error::MAP_FULL { - return ChainStorageError::DbResizeRequired; + return ChainStorageError::DbResizeRequired(Some(val_buf.len())); } } error!( @@ -145,7 +153,16 @@ where "Could not replace value in lmdb transaction: {:?}", e ); ChainStorageError::AccessError(e.to_string()) - }) + }); + if val_buf.len() >= BYTES_PER_MB { + let write_time = start.elapsed(); + trace!( + "lmdb_replace - {} MB, lmdb write in {:.2?}", + val_buf.len() / BYTES_PER_MB, + write_time + ); + } + res } /// Deletes the given key. An error is returned if the key does not exist @@ -175,7 +192,7 @@ where K: AsLmdbBytes + ?Sized, V: Serialize, { - txn.access().del_item(db, key, &serialize(value)?)?; + txn.access().del_item(db, key, &serialize(value, None)?)?; Ok(()) } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 83b09922e1..3e9d2a893b 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -20,7 +20,17 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{convert::TryFrom, fmt, fs, fs::File, ops::Deref, path::Path, sync::Arc, time::Instant}; +use std::{ + cmp::max, + convert::TryFrom, + fmt, + fs, + fs::File, + ops::Deref, + path::Path, + sync::{Arc, RwLock}, + time::Instant, +}; use fs2::FileExt; use lmdb_zero::{open, ConstTransaction, Database, Environment, ReadTransaction, WriteTransaction}; @@ -33,7 +43,7 @@ use tari_common_types::{ types::{BlockHash, Commitment, FixedHash, HashOutput, PublicKey, Signature}, }; use tari_mmr::sparse_merkle_tree::{DeleteResult, NodeKey, ValueHash}; -use tari_storage::lmdb_store::{db, LMDBBuilder, LMDBConfig, LMDBStore}; +use tari_storage::lmdb_store::{db, LMDBBuilder, LMDBConfig, LMDBStore, BYTES_PER_MB}; use tari_utilities::{ hex::{to_hex, Hex}, ByteArray, @@ -140,7 +150,6 @@ const LMDB_DB_REORGS: &str = "reorgs"; const LMDB_DB_VALIDATOR_NODES: &str = "validator_nodes"; const LMDB_DB_VALIDATOR_NODES_MAPPING: &str = "validator_nodes_mapping"; const LMDB_DB_TEMPLATE_REGISTRATIONS: &str = "template_registrations"; -const LMDB_DB_TIP_UTXO_SMT: &str = "tip_utxo_smt"; /// HeaderHash(32), mmr_pos(8), hash(32) type KernelKey = CompositeKey<72>; @@ -190,7 +199,6 @@ pub fn create_lmdb_database>( .add_database(LMDB_DB_VALIDATOR_NODES, flags) .add_database(LMDB_DB_VALIDATOR_NODES_MAPPING, flags) .add_database(LMDB_DB_TEMPLATE_REGISTRATIONS, flags | db::DUPSORT) - .add_database(LMDB_DB_TIP_UTXO_SMT, flags) .build() .map_err(|err| ChainStorageError::CriticalError(format!("Could not create LMDB store:{}", err)))?; debug!(target: LOG_TARGET, "LMDB database creation successful"); @@ -249,8 +257,6 @@ pub struct LMDBDatabase { reorgs: DatabaseRef, /// Maps -> ActiveValidatorNode validator_nodes: DatabaseRef, - /// Stores the sparse merkle tree of the utxo set on tip - tip_utxo_smt: DatabaseRef, /// Maps -> VN Shard Key validator_nodes_mapping: DatabaseRef, /// Maps CodeTemplateRegistration -> TemplateRegistration @@ -293,7 +299,6 @@ impl LMDBDatabase { reorgs: get_database(store, LMDB_DB_REORGS)?, validator_nodes: get_database(store, LMDB_DB_VALIDATOR_NODES)?, validator_nodes_mapping: get_database(store, LMDB_DB_VALIDATOR_NODES_MAPPING)?, - tip_utxo_smt: get_database(store, LMDB_DB_TIP_UTXO_SMT)?, template_registrations: get_database(store, LMDB_DB_TEMPLATE_REGISTRATIONS)?, env, env_config: store.env_config(), @@ -322,16 +327,18 @@ impl LMDBDatabase { fn apply_db_transaction(&mut self, txn: &DbTransaction) -> Result<(), ChainStorageError> { #[allow(clippy::enum_glob_use)] use WriteOperation::*; + + let number_of_operations = txn.operations().len(); let write_txn = self.write_transaction()?; - for op in txn.operations() { - trace!(target: LOG_TARGET, "[apply_db_transaction] WriteOperation: {}", op); + for (i, op) in txn.operations().iter().enumerate() { + trace!(target: LOG_TARGET, "[apply_db_transaction] WriteOperation: {} ({} of {})", op, i + 1, number_of_operations); match op { InsertOrphanBlock(block) => self.insert_orphan_block(&write_txn, block)?, InsertChainHeader { header } => { self.insert_header(&write_txn, header.header(), header.accumulated_data())?; }, - InsertTipBlockBody { block } => { - self.insert_tip_block_body(&write_txn, block.header(), block.block().body.clone())?; + InsertTipBlockBody { block, smt } => { + self.insert_tip_block_body(&write_txn, block.header(), block.block().body.clone(), smt.clone())?; }, InsertKernel { header_hash, @@ -374,8 +381,8 @@ impl LMDBDatabase { "orphan_chain_tips_db", )?; }, - DeleteTipBlock(hash) => { - self.delete_tip_block_body(&write_txn, hash)?; + DeleteTipBlock(hash, smt) => { + self.delete_tip_block_body(&write_txn, hash, smt.clone())?; }, InsertMoneroSeedHeight(data, height) => { self.insert_monero_seed_height(&write_txn, data, *height)?; @@ -476,14 +483,11 @@ impl LMDBDatabase { self.insert_bad_block_and_cleanup(&write_txn, hash, *height, reason.to_string())?; }, InsertReorg { reorg } => { - lmdb_replace(&write_txn, &self.reorgs, &reorg.local_time.timestamp(), &reorg)?; + lmdb_replace(&write_txn, &self.reorgs, &reorg.local_time.timestamp(), &reorg, None)?; }, ClearAllReorgs => { lmdb_clear(&write_txn, &self.reorgs)?; }, - InsertTipSmt { smt } => { - self.insert_tip_smt(&write_txn, smt)?; - }, } } write_txn.commit()?; @@ -493,38 +497,38 @@ impl LMDBDatabase { fn all_dbs(&self) -> [(&'static str, &DatabaseRef); 26] { [ - ("metadata_db", &self.metadata_db), - ("headers_db", &self.headers_db), - ("header_accumulated_data_db", &self.header_accumulated_data_db), - ("block_accumulated_data_db", &self.block_accumulated_data_db), - ("block_hashes_db", &self.block_hashes_db), - ("utxos_db", &self.utxos_db), - ("inputs_db", &self.inputs_db), - ("txos_hash_to_index_db", &self.txos_hash_to_index_db), - ("kernels_db", &self.kernels_db), - ("kernel_excess_index", &self.kernel_excess_index), - ("kernel_excess_sig_index", &self.kernel_excess_sig_index), - ("kernel_mmr_size_index", &self.kernel_mmr_size_index), - ("utxo_commitment_index", &self.utxo_commitment_index), - ("contract_index", &self.contract_index), - ("unique_id_index", &self.unique_id_index), + (LMDB_DB_METADATA, &self.metadata_db), + (LMDB_DB_HEADERS, &self.headers_db), + (LMDB_DB_HEADER_ACCUMULATED_DATA, &self.header_accumulated_data_db), + (LMDB_DB_BLOCK_ACCUMULATED_DATA, &self.block_accumulated_data_db), + (LMDB_DB_BLOCK_HASHES, &self.block_hashes_db), + (LMDB_DB_UTXOS, &self.utxos_db), + (LMDB_DB_INPUTS, &self.inputs_db), + (LMDB_DB_TXOS_HASH_TO_INDEX, &self.txos_hash_to_index_db), + (LMDB_DB_KERNELS, &self.kernels_db), + (LMDB_DB_KERNEL_EXCESS_INDEX, &self.kernel_excess_index), + (LMDB_DB_KERNEL_EXCESS_SIG_INDEX, &self.kernel_excess_sig_index), + (LMDB_DB_KERNEL_MMR_SIZE_INDEX, &self.kernel_mmr_size_index), + (LMDB_DB_UTXO_COMMITMENT_INDEX, &self.utxo_commitment_index), + (LMDB_DB_CONTRACT_ID_INDEX, &self.contract_index), + (LMDB_DB_UNIQUE_ID_INDEX, &self.unique_id_index), ( - "deleted_txo_hash_to_header_index", + LMDB_DB_DELETED_TXO_HASH_TO_HEADER_INDEX, &self.deleted_txo_hash_to_header_index, ), - ("orphans_db", &self.orphans_db), + (LMDB_DB_ORPHANS, &self.orphans_db), ( - "orphan_header_accumulated_data_db", + LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA, &self.orphan_header_accumulated_data_db, ), - ("monero_seed_height_db", &self.monero_seed_height_db), - ("orphan_chain_tips_db", &self.orphan_chain_tips_db), - ("orphan_parent_map_index", &self.orphan_parent_map_index), - ("bad_blocks", &self.bad_blocks), - ("reorgs", &self.reorgs), - ("validator_nodes", &self.validator_nodes), - ("validator_nodes_mapping", &self.validator_nodes_mapping), - ("template_registrations", &self.template_registrations), + (LMDB_DB_MONERO_SEED_HEIGHT, &self.monero_seed_height_db), + (LMDB_DB_ORPHAN_CHAIN_TIPS, &self.orphan_chain_tips_db), + (LMDB_DB_ORPHAN_PARENT_MAP_INDEX, &self.orphan_parent_map_index), + (LMDB_DB_BAD_BLOCK_LIST, &self.bad_blocks), + (LMDB_DB_REORGS, &self.reorgs), + (LMDB_DB_VALIDATOR_NODES, &self.validator_nodes), + (LMDB_DB_VALIDATOR_NODES_MAPPING, &self.validator_nodes_mapping), + (LMDB_DB_TEMPLATE_REGISTRATIONS, &self.template_registrations), ] } @@ -568,7 +572,7 @@ impl LMDBDatabase { mined_height: header_height, mined_timestamp: header_timestamp, }, - "utxos_db", + LMDB_DB_UTXOS, )?; Ok(()) @@ -708,7 +712,7 @@ impl LMDBDatabase { k: MetadataKey, v: &MetadataValue, ) -> Result<(), ChainStorageError> { - lmdb_replace(txn, &self.metadata_db, &k.as_u32(), v)?; + lmdb_replace(txn, &self.metadata_db, &k.as_u32(), v, None)?; Ok(()) } @@ -881,6 +885,7 @@ impl LMDBDatabase { &self, write_txn: &WriteTransaction<'_>, block_hash: &HashOutput, + smt: Arc>, ) -> Result<(), ChainStorageError> { let hash_hex = block_hash.to_hex(); debug!(target: LOG_TARGET, "Deleting block `{}`", hash_hex); @@ -903,12 +908,19 @@ impl LMDBDatabase { &height, "block_accumulated_data_db", )?; - let mut smt = self.fetch_tip_smt()?; - self.delete_block_inputs_outputs(write_txn, block_hash.as_slice(), height, &mut smt)?; + let mut output_smt = smt.write().map_err(|e| { + error!( + target: LOG_TARGET, + "delete_tip_block_body could not get a write lock on the smt. {:?}", e + ); + ChainStorageError::AccessError("write lock on smt".into()) + })?; + + self.delete_block_inputs_outputs(write_txn, block_hash.as_slice(), height, &mut output_smt)?; let new_tip_header = self.fetch_chain_header_by_height(prev_height)?; - let root = FixedHash::try_from(smt.hash().as_slice())?; + let root = FixedHash::try_from(output_smt.hash().as_slice())?; if root != new_tip_header.header().output_mr { error!( target: LOG_TARGET, @@ -920,7 +932,7 @@ impl LMDBDatabase { "Deleting block, new smt root did not match expected smt root".to_string(), )); } - self.insert_tip_smt(write_txn, &smt)?; + self.delete_block_kernels(write_txn, block_hash.as_slice())?; Ok(()) @@ -1174,7 +1186,15 @@ impl LMDBDatabase { txn: &WriteTransaction<'_>, header: &BlockHeader, body: AggregateBody, + smt: Arc>, ) -> Result<(), ChainStorageError> { + let mut output_smt = smt.write().map_err(|e| { + error!( + target: LOG_TARGET, + "insert_tip_block_body could not get a write lock on the smt. {:?}", e + ); + ChainStorageError::AccessError("write lock on smt".into()) + })?; if self.fetch_block_accumulated_data(txn, header.height + 1)?.is_some() { return Err(ChainStorageError::InvalidOperation(format!( "Attempted to insert block at height {} while next block already exists", @@ -1239,13 +1259,6 @@ impl LMDBDatabase { ); self.insert_kernel(txn, &block_hash, &kernel, pos)?; } - let k = MetadataKey::TipSmt; - let mut output_smt: OutputSmt = - lmdb_get(txn, &self.tip_utxo_smt, &k.as_u32())?.ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "Output_smt", - field: "tip", - value: "".to_string(), - })?; for output in outputs { trace!( @@ -1342,7 +1355,6 @@ impl LMDBDatabase { header.height, &BlockAccumulatedData::new(kernel_mmr.get_pruned_hash_set()?, total_kernel_sum), )?; - self.insert_tip_smt(txn, &output_smt)?; Ok(()) } @@ -1410,11 +1422,6 @@ impl LMDBDatabase { ) } - fn insert_tip_smt(&self, txn: &WriteTransaction<'_>, smt: &OutputSmt) -> Result<(), ChainStorageError> { - let k = MetadataKey::TipSmt; - lmdb_replace(txn, &self.tip_utxo_smt, &k.as_u32(), smt) - } - fn update_block_accumulated_data( &self, write_txn: &WriteTransaction<'_>, @@ -1438,7 +1445,13 @@ impl LMDBDatabase { block_accum_data.kernels = kernel_hash_set; } - lmdb_replace(write_txn, &self.block_accumulated_data_db, &height, &block_accum_data)?; + lmdb_replace( + write_txn, + &self.block_accumulated_data_db, + &height, + &block_accum_data, + None, + )?; Ok(()) } @@ -1450,7 +1463,7 @@ impl LMDBDatabase { ) -> Result<(), ChainStorageError> { let current_height = lmdb_get(write_txn, &self.monero_seed_height_db, seed)?.unwrap_or(std::u64::MAX); if height < current_height { - lmdb_replace(write_txn, &self.monero_seed_height_db, seed, &height)?; + lmdb_replace(write_txn, &self.monero_seed_height_db, seed, &height, None)?; }; Ok(()) } @@ -1493,7 +1506,7 @@ impl LMDBDatabase { buffer.copy_from_slice(&key_bytes[0..32]); let key = OutputKey::new(&FixedHash::from(buffer), &input.output_hash())?; debug!(target: LOG_TARGET, "Pruning output from 'utxos_db': key '{}'", key.0); - lmdb_delete(write_txn, &self.utxos_db, &key.convert_to_comp_key(), "utxos_db")?; + lmdb_delete(write_txn, &self.utxos_db, &key.convert_to_comp_key(), LMDB_DB_UTXOS)?; }; // From 'txos_hash_to_index_db::utxos_db' debug!( @@ -1505,7 +1518,7 @@ impl LMDBDatabase { write_txn, &self.txos_hash_to_index_db, input.output_hash().as_slice(), - "utxos_db", + LMDB_DB_UTXOS, )?; } @@ -1535,14 +1548,14 @@ impl LMDBDatabase { write_txn, &self.txos_hash_to_index_db, output_hash.as_slice(), - "utxos_db", + LMDB_DB_UTXOS, )?; let mut buffer = [0u8; 32]; buffer.copy_from_slice(&key_bytes[0..32]); let key = OutputKey::new(&FixedHash::from(buffer), output_hash)?; debug!(target: LOG_TARGET, "Pruning output from 'utxos_db': key '{}'", key.0); - lmdb_delete(write_txn, &self.utxos_db, &key.convert_to_comp_key(), "utxos_db")?; + lmdb_delete(write_txn, &self.utxos_db, &key.convert_to_comp_key(), LMDB_DB_UTXOS)?; }, None => return Err(ChainStorageError::InvalidOperation("Output key not found".to_string())), } @@ -1608,7 +1621,7 @@ impl LMDBDatabase { #[cfg(not(test))] const CLEAN_BAD_BLOCKS_BEFORE_REL_HEIGHT: u64 = 0; - lmdb_replace(txn, &self.bad_blocks, hash.deref(), &(height, reason))?; + lmdb_replace(txn, &self.bad_blocks, hash.deref(), &(height, reason), None)?; // Clean up bad blocks that are far from the tip let metadata = fetch_metadata(txn, &self.metadata_db)?; let deleted_before_height = metadata @@ -1760,10 +1773,35 @@ impl BlockchainBackend for LMDBDatabase { return Ok(()); } + // Ensure there will be enough space in the database to insert the block and replace the SMT before it is + // attempted; this is more efficient than relying on an error if the LMDB environment map size was reached with + // the write operation, with cleanup, resize and re-try afterwards. + let block_operations = txn.operations().iter().filter(|op| { + matches!(op, WriteOperation::InsertOrphanBlock { .. }) || + matches!(op, WriteOperation::InsertTipBlockBody { .. }) || + matches!(op, WriteOperation::InsertChainOrphanBlock { .. }) + }); + let count = block_operations.count(); + if count > 0 { + let (mapsize, size_used_bytes, size_left_bytes) = LMDBStore::get_stats(&self.env)?; + trace!( + target: LOG_TARGET, + "[apply_db_transaction] Block insert operations: {}, mapsize: {} MB, used: {} MB, remaining: {} MB", + count, mapsize / BYTES_PER_MB, size_used_bytes / BYTES_PER_MB, size_left_bytes / BYTES_PER_MB + ); + unsafe { + LMDBStore::resize_if_required( + &self.env, + &self.env_config, + Some(max(self.env_config.grow_size_bytes(), 128 * BYTES_PER_MB)), + )?; + } + } + let mark = Instant::now(); - // Resize this many times before assuming something is not right - const MAX_RESIZES: usize = 5; - for i in 0..MAX_RESIZES { + // Resize this many times before assuming something is not right (up to 1 GB) + let max_resizes = 1024 * BYTES_PER_MB / self.env_config.grow_size_bytes(); + for i in 0..max_resizes { let num_operations = txn.operations().len(); match self.apply_db_transaction(&txn) { Ok(_) => { @@ -1776,7 +1814,7 @@ impl BlockchainBackend for LMDBDatabase { return Ok(()); }, - Err(ChainStorageError::DbResizeRequired) => { + Err(ChainStorageError::DbResizeRequired(size_that_could_not_be_written)) => { info!( target: LOG_TARGET, "Database resize required (resized {} time(s) in this transaction)", @@ -1787,7 +1825,7 @@ impl BlockchainBackend for LMDBDatabase { // BlockchainDatabase, so we know there are no other threads taking out LMDB transactions when this // is called. unsafe { - LMDBStore::resize(&self.env, &self.env_config)?; + LMDBStore::resize(&self.env, &self.env_config, size_that_could_not_be_written)?; } }, Err(e) => { @@ -2021,19 +2059,19 @@ impl BlockchainBackend for LMDBDatabase { fn fetch_outputs_in_block_with_spend_state( &self, - previous_header_hash: &HashOutput, - spend_status_at_header: Option, + header_hash: &HashOutput, + spend_status_at_header: Option<&HashOutput>, ) -> Result, ChainStorageError> { let txn = self.read_transaction()?; let mut outputs: Vec<(TransactionOutput, bool)> = - lmdb_fetch_matching_after::(&txn, &self.utxos_db, previous_header_hash.deref())? + lmdb_fetch_matching_after::(&txn, &self.utxos_db, header_hash.deref())? .into_iter() .map(|row| (row.output, false)) .collect(); if let Some(header_hash) = spend_status_at_header { let header_height = - self.fetch_height_from_hash(&txn, &header_hash)? + self.fetch_height_from_hash(&txn, header_hash)? .ok_or(ChainStorageError::ValueNotFound { entity: "Header", field: "hash", @@ -2518,18 +2556,41 @@ impl BlockchainBackend for LMDBDatabase { Ok(result) } - fn fetch_tip_smt(&self) -> Result { - let txn = self.read_transaction()?; - let k = MetadataKey::TipSmt; - let val: Option = lmdb_get(&txn, &self.tip_utxo_smt, &k.as_u32())?; - match val { - Some(smt) => Ok(smt), - _ => Err(ChainStorageError::ValueNotFound { - entity: "TipSmt", - field: "TipSmt", - value: "".to_string(), - }), + fn calculate_tip_smt(&self) -> Result { + let start = Instant::now(); + let metadata = self.fetch_chain_metadata()?; + let mut smt = OutputSmt::new(); + trace!( + target: LOG_TARGET, + "Calculating new smt at height: #{}", + metadata.pruned_height(), + ); + for height in 0..=metadata.best_block_height() { + let header = self.fetch_chain_header_by_height(height)?; + let outputs = + self.fetch_outputs_in_block_with_spend_state(header.hash(), Some(metadata.best_block_hash()))?; + for output in outputs { + if !output.1 && !output.0.is_burned() { + let smt_key = NodeKey::try_from(output.0.commitment.as_bytes())?; + let smt_node = ValueHash::try_from(output.0.smt_hash(header.header().height).as_slice())?; + if let Err(e) = smt.insert(smt_key, smt_node) { + error!( + target: LOG_TARGET, + "Output commitment({}) already in SMT", + output.0.commitment.to_hex(), + ); + return Err(e.into()); + } + } + } } + trace!( + target: LOG_TARGET, + "Finished calculating new smt (size: {}), took: #{}s", + smt.size(), + start.elapsed().as_millis() + ); + Ok(smt) } } @@ -2655,7 +2716,6 @@ enum MetadataKey { HorizonData, BestBlockTimestamp, MigrationVersion, - TipSmt, } impl MetadataKey { @@ -2676,7 +2736,6 @@ impl fmt::Display for MetadataKey { MetadataKey::HorizonData => write!(f, "Database info"), MetadataKey::BestBlockTimestamp => write!(f, "Chain tip block timestamp"), MetadataKey::MigrationVersion => write!(f, "Migration version"), - MetadataKey::TipSmt => write!(f, "Chain tip Sparse Merkle Tree version"), } } } @@ -2734,6 +2793,7 @@ fn run_migrations(db: &LMDBDatabase) -> Result<(), ChainStorageError> { &db.metadata_db, &k.as_u32(), &MetadataValue::MigrationVersion(MIGRATION_VERSION), + None, )?; txn.commit()?; } diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index bc816adfac..a7fa5e4c01 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -428,7 +428,7 @@ mod fetch_total_size_stats { let _block_and_outputs = add_many_chained_blocks(2, &db, &key_manager).await; let stats = db.fetch_total_size_stats().unwrap(); assert_eq!( - stats.sizes().iter().find(|s| s.name == "utxos_db").unwrap().num_entries, + stats.sizes().iter().find(|s| s.name == "utxos").unwrap().num_entries, genesis_output_count + 2 ); } diff --git a/base_layer/core/src/covenants/decoder.rs b/base_layer/core/src/covenants/decoder.rs index c4b7e54386..9a8aef6ec0 100644 --- a/base_layer/core/src/covenants/decoder.rs +++ b/base_layer/core/src/covenants/decoder.rs @@ -95,6 +95,7 @@ pub(super) trait CovenantReadExt: io::Read { impl CovenantReadExt for R { /// Reads next byte code + #[allow(clippy::unused_io_amount)] fn read_next_byte_code(&mut self) -> Result, io::Error> { let mut buf = [0u8; 1]; loop { diff --git a/base_layer/core/src/mempool/priority/prioritized_transaction.rs b/base_layer/core/src/mempool/priority/prioritized_transaction.rs index d656a8cfd6..0c78db88b7 100644 --- a/base_layer/core/src/mempool/priority/prioritized_transaction.rs +++ b/base_layer/core/src/mempool/priority/prioritized_transaction.rs @@ -42,7 +42,13 @@ pub struct FeePriority(Vec); impl FeePriority { pub fn new(transaction: &Transaction, insert_epoch: u64, weight: u64) -> Result { - let fee_per_byte = transaction.body.get_total_fee()?.as_u64().saturating_mul(1000) / weight; + let fee_per_byte = transaction + .body + .get_total_fee()? + .as_u64() + .saturating_mul(1000) + .checked_div(weight) + .ok_or(TransactionError::ZeroWeight)?; // Big-endian used here, the MSB is in the starting index. The ordering for Vec is taken from elements left // to right and the unconfirmed pool expects the lowest priority to be sorted lowest to highest in the // BTreeMap @@ -95,7 +101,13 @@ impl PrioritizedTransaction { Ok(Self { key, priority: FeePriority::new(&transaction, insert_epoch, weight)?, - fee_per_byte: transaction.body.get_total_fee()?.as_u64().saturating_mul(1000) / weight, + fee_per_byte: transaction + .body + .get_total_fee()? + .as_u64() + .saturating_mul(1000) + .checked_div(weight) + .ok_or(TransactionError::ZeroWeight)?, weight, transaction, dependent_output_hashes: dependent_outputs.unwrap_or_default(), @@ -162,4 +174,37 @@ mod tests { assert!(p2 > p1); } + + #[test] + fn prioritized_from_empty_transaction() { + let weighting = TransactionWeight::latest(); + match PrioritizedTransaction::new( + 0, + &weighting, + Arc::new(Transaction::new( + vec![], + vec![], + vec![], + Default::default(), + Default::default(), + )), + None, + ) { + Ok(_) => panic!("Empty transaction should not be valid"), + Err(e) => assert_eq!(e, TransactionError::ZeroWeight), + } + } + + #[test] + fn fee_priority_with_zero_weight() { + let weight = 0; + match FeePriority::new( + &Transaction::new(vec![], vec![], vec![], Default::default(), Default::default()), + SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(), + weight, + ) { + Ok(_) => panic!("Empty transaction should not be valid"), + Err(e) => assert_eq!(e, TransactionError::ZeroWeight), + } + } } diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 400ae27e6b..255401feda 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -25,7 +25,7 @@ use std::{ fs, ops::Deref, path::{Path, PathBuf}, - sync::Arc, + sync::{Arc, RwLock}, }; use tari_common::configuration::Network; @@ -108,20 +108,23 @@ pub fn create_custom_blockchain(rules: ConsensusManager) -> BlockchainDatabase, + smt: Arc>, ) -> BlockchainDatabase { - create_store_with_consensus_and_validators_and_config(rules, validators, BlockchainDatabaseConfig::default()) + create_store_with_consensus_and_validators_and_config(rules, validators, BlockchainDatabaseConfig::default(), smt) } pub fn create_store_with_consensus_and_validators_and_config( rules: ConsensusManager, validators: Validators, config: BlockchainDatabaseConfig, + smt: Arc>, ) -> BlockchainDatabase { let backend = create_test_db(); BlockchainDatabase::new( @@ -130,6 +133,7 @@ pub fn create_store_with_consensus_and_validators_and_config( validators, config, DifficultyCalculator::new(rules, Default::default()), + smt, ) .unwrap() } @@ -141,7 +145,8 @@ pub fn create_store_with_consensus(rules: ConsensusManager) -> BlockchainDatabas MockValidator::new(true), BlockBodyInternalConsistencyValidator::new(rules.clone(), false, factories), ); - create_store_with_consensus_and_validators(rules, validators) + let smt = Arc::new(RwLock::new(OutputSmt::new())); + create_store_with_consensus_and_validators(rules, validators, smt) } pub fn create_test_blockchain_db() -> BlockchainDatabase { let rules = create_consensus_rules(); @@ -280,7 +285,7 @@ impl BlockchainBackend for TempDatabase { fn fetch_outputs_in_block_with_spend_state( &self, header_hash: &HashOutput, - spend_status_at_header: Option, + spend_status_at_header: Option<&HashOutput>, ) -> Result, ChainStorageError> { self.db .as_ref() @@ -434,8 +439,8 @@ impl BlockchainBackend for TempDatabase { .fetch_template_registrations(start_height, end_height) } - fn fetch_tip_smt(&self) -> Result { - self.db.as_ref().unwrap().fetch_tip_smt() + fn calculate_tip_smt(&self) -> Result { + self.db.as_ref().unwrap().calculate_tip_smt() } } @@ -498,8 +503,10 @@ pub async fn create_main_chain>( .try_into_chain_block() .map(Arc::new) .unwrap(); - let mut smt = db.fetch_tip_smt().unwrap(); - let (names, chain) = create_chained_blocks(blocks, genesis_block, &mut smt).await; + let (names, chain) = { + let mut smt = db.smt_read_access().unwrap().clone(); + create_chained_blocks(blocks, genesis_block, &mut smt).await + }; names.iter().for_each(|name| { let block = chain.get(name).unwrap(); db.add_block(block.to_arc_block()).unwrap(); @@ -569,7 +576,7 @@ impl TestBlockchain { wallet_payment_address, range_proof_type: RangeProofType::BulletProofPlus, }; - let smt = blockchain.db.fetch_tip_smt().unwrap(); + let smt = blockchain.db.smt_read_access().unwrap().clone(); blockchain.chain.push(("GB", genesis, smt)); blockchain @@ -606,9 +613,9 @@ impl TestBlockchain { Ok(()) } - pub async fn with_validators(validators: Validators) -> Self { + pub async fn with_validators(validators: Validators, smt: Arc>) -> Self { let rules = ConsensusManager::builder(Network::LocalNet).build().unwrap(); - let db = create_store_with_consensus_and_validators(rules.clone(), validators); + let db = create_store_with_consensus_and_validators(rules.clone(), validators, smt); Self::new(db, rules).await } @@ -648,7 +655,7 @@ impl TestBlockchain { block: Arc, ) -> Result { let result = self.db.add_block(block.to_arc_block())?; - let smt = self.db.fetch_tip_smt().unwrap(); + let smt = self.db.smt().read().unwrap().clone(); self.chain.push((name, block, smt)); Ok(result) } diff --git a/base_layer/core/src/transactions/coinbase_builder.rs b/base_layer/core/src/transactions/coinbase_builder.rs index cb2f27087f..ead4800baa 100644 --- a/base_layer/core/src/transactions/coinbase_builder.rs +++ b/base_layer/core/src/transactions/coinbase_builder.rs @@ -729,7 +729,7 @@ mod test { TransactionKeyManagerInterface, TxoStage, }, - transaction_components::{RangeProofType, TransactionKernelVersion}, + transaction_components::{KernelBuilder, RangeProofType, TransactionKernelVersion}, }; #[tokio::test] @@ -863,4 +863,134 @@ mod test { ) .unwrap(); } + + #[tokio::test] + #[allow(clippy::too_many_lines)] + #[allow(clippy::identity_op)] + async fn multi_coinbase_amount() { + // We construct two txs both valid with a single coinbase. We then add a duplicate coinbase utxo to the one, and + // a duplicate coinbase kernel to the other one. + let (builder, rules, factories, key_manager) = get_builder(); + let p = TestParams::new(&key_manager).await; + // We just want some small amount here. + let missing_fee = rules.emission_schedule().block_reward(4200000) + (2 * uT); + let wallet_payment_address = TariAddress::default(); + let builder = builder + .with_block_height(42) + .with_fees(1 * uT) + .with_spend_key_id(p.spend_key_id.clone()) + .with_encryption_key_id(TariKeyId::default()) + .with_sender_offset_key_id(p.sender_offset_key_id.clone()) + .with_script_key_id(p.script_key_id.clone()) + .with_script(one_sided_payment_script(wallet_payment_address.public_key())) + .with_range_proof_type(RangeProofType::RevealedValue); + let (tx1, wo1) = builder + .build(rules.consensus_constants(0), rules.emission_schedule()) + .await + .unwrap(); + + // we calculate a duplicate tx here so that we can have a coinbase with the correct fee amount + let block_reward = rules.emission_schedule().block_reward(42) + missing_fee; + let builder = CoinbaseBuilder::new(key_manager.clone()); + let builder = builder + .with_block_height(4200000) + .with_fees(1 * uT) + .with_spend_key_id(p.spend_key_id.clone()) + .with_encryption_key_id(TariKeyId::default()) + .with_sender_offset_key_id(p.sender_offset_key_id) + .with_script_key_id(p.script_key_id) + .with_script(one_sided_payment_script(wallet_payment_address.public_key())) + .with_range_proof_type(RangeProofType::RevealedValue); + let (tx2, wo2) = builder + .build(rules.consensus_constants(0), rules.emission_schedule()) + .await + .unwrap(); + + let coinbase1 = tx1.body.outputs()[0].clone(); + let coinbase2 = tx2.body.outputs()[0].clone(); + let mut kernel_1 = tx1.body.kernels()[0].clone(); + let kernel_2 = tx2.body.kernels()[0].clone(); + let excess = &kernel_1.excess + &kernel_2.excess; + kernel_1.excess = &kernel_1.excess + &kernel_2.excess; + kernel_1.excess_sig = &kernel_1.excess_sig + &kernel_2.excess_sig; + let mut body1 = AggregateBody::new(Vec::new(), vec![coinbase1, coinbase2], vec![kernel_1.clone()]); + body1.sort(); + + body1 + .check_coinbase_output( + block_reward, + rules.consensus_constants(0).coinbase_min_maturity(), + &factories, + 42, + ) + .unwrap(); + body1.verify_kernel_signatures().unwrap_err(); + + // lets create a new kernel with a correct signature + let (new_nonce1, nonce1) = key_manager + .get_next_key(TransactionKeyManagerBranch::KernelNonce.get_branch_key()) + .await + .unwrap(); + let (new_nonce2, nonce2) = key_manager + .get_next_key(TransactionKeyManagerBranch::KernelNonce.get_branch_key()) + .await + .unwrap(); + let nonce = &nonce1 + &nonce2; + let kernel_message = TransactionKernel::build_kernel_signature_message( + &TransactionKernelVersion::get_current_version(), + kernel_1.fee, + kernel_1.lock_height, + &kernel_1.features, + &None, + ); + + let mut kernel_signature = key_manager + .get_partial_txo_kernel_signature( + &wo1.spending_key_id, + &new_nonce1, + &nonce, + excess.as_public_key(), + &TransactionKernelVersion::get_current_version(), + &kernel_message, + &kernel_1.features, + TxoStage::Output, + ) + .await + .unwrap(); + kernel_signature = &kernel_signature + + &key_manager + .get_partial_txo_kernel_signature( + &wo2.spending_key_id, + &new_nonce2, + &nonce, + excess.as_public_key(), + &TransactionKernelVersion::get_current_version(), + &kernel_message, + &kernel_1.features, + TxoStage::Output, + ) + .await + .unwrap(); + let kernel_new = KernelBuilder::new() + .with_fee(0.into()) + .with_features(kernel_1.features) + .with_lock_height(kernel_1.lock_height) + .with_excess(&excess) + .with_signature(kernel_signature) + .build() + .unwrap(); + + let mut body2 = AggregateBody::new(Vec::new(), body1.outputs().clone(), vec![kernel_new]); + body2.sort(); + + body2 + .check_coinbase_output( + block_reward, + rules.consensus_constants(0).coinbase_min_maturity(), + &factories, + 42, + ) + .unwrap(); + body2.verify_kernel_signatures().unwrap(); + } } diff --git a/base_layer/core/src/transactions/transaction_components/error.rs b/base_layer/core/src/transactions/transaction_components/error.rs index f2d4b84b34..ab038bffa5 100644 --- a/base_layer/core/src/transactions/transaction_components/error.rs +++ b/base_layer/core/src/transactions/transaction_components/error.rs @@ -75,6 +75,8 @@ pub enum TransactionError { EncryptedDataError(String), #[error("Ledger device error: {0}")] LedgerDeviceError(#[from] LedgerDeviceError), + #[error("Transaction has a zero weight, not possible")] + ZeroWeight, } impl From for TransactionError { diff --git a/base_layer/core/src/transactions/transaction_components/transaction_kernel.rs b/base_layer/core/src/transactions/transaction_components/transaction_kernel.rs index 3f482f0c05..85d60c166f 100644 --- a/base_layer/core/src/transactions/transaction_components/transaction_kernel.rs +++ b/base_layer/core/src/transactions/transaction_components/transaction_kernel.rs @@ -51,7 +51,7 @@ use crate::{ /// [Mimblewimble TLU post](https://tlu.tarilabs.com/protocols/mimblewimble-1/sources/PITCHME.link.html?highlight=mimblewimble#mimblewimble). /// The kernel also tracks other transaction metadata, such as the lock height for the transaction (i.e. the earliest /// this transaction can be mined) and the transaction fee, in cleartext. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize, Default)] pub struct TransactionKernel { pub version: TransactionKernelVersion, /// Options for a kernel's structure or use diff --git a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs index 8c21d4c0a1..a96691dd75 100644 --- a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs +++ b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs @@ -502,7 +502,7 @@ where KM: TransactionKeyManagerInterface return self.build_err("Fee is less than the minimum"); } - let change_output_pair = match { change_output } { + let change_output_pair = match change_output { Some((output, sender_offset_key_id)) => { if self.sender_custom_outputs.len() >= MAX_TRANSACTION_OUTPUTS { return self.build_err("Too many outputs in transaction"); diff --git a/base_layer/core/src/transactions/weight.rs b/base_layer/core/src/transactions/weight.rs index 8d9fa9d87a..72f91f0409 100644 --- a/base_layer/core/src/transactions/weight.rs +++ b/base_layer/core/src/transactions/weight.rs @@ -163,4 +163,11 @@ mod test { ); } } + + #[test] + fn empty_body_weight() { + let weighting = TransactionWeight::latest(); + let body = AggregateBody::empty(); + assert_eq!(weighting.calculate_body(&body).unwrap(), 0); + } } diff --git a/base_layer/core/src/validation/aggregate_body/aggregate_body_chain_validator.rs b/base_layer/core/src/validation/aggregate_body/aggregate_body_chain_validator.rs index 1a48727ff6..1ffdb5be0b 100644 --- a/base_layer/core/src/validation/aggregate_body/aggregate_body_chain_validator.rs +++ b/base_layer/core/src/validation/aggregate_body/aggregate_body_chain_validator.rs @@ -102,25 +102,41 @@ fn validate_input_not_pruned( let mut inputs: Vec = body.inputs().clone(); for input in &mut inputs { if input.is_compact() { - let output_mined_info = db - .fetch_output(&input.output_hash())? - .ok_or(ValidationError::UnknownInput)?; + let output = match db.fetch_output(&input.output_hash()) { + Ok(val) => match val { + Some(output_mined_info) => output_mined_info.output, + None => { + let input_output_hash = input.output_hash(); + if let Some(found) = body.outputs().iter().find(|o| o.hash() == input_output_hash) { + found.clone() + } else { + warn!( + target: LOG_TARGET, + "Input not found in database or block, commitment: {}, hash: {}", + input.commitment()?.to_hex(), input_output_hash.to_hex() + ); + return Err(ValidationError::UnknownInput); + } + }, + }, + Err(e) => return Err(ValidationError::from(e)), + }; - let rp_hash = match output_mined_info.output.proof { + let rp_hash = match output.proof { Some(proof) => proof.hash(), None => FixedHash::zero(), }; input.add_output_data( - output_mined_info.output.version, - output_mined_info.output.features, - output_mined_info.output.commitment, - output_mined_info.output.script, - output_mined_info.output.sender_offset_public_key, - output_mined_info.output.covenant, - output_mined_info.output.encrypted_data, - output_mined_info.output.metadata_signature, + output.version, + output.features, + output.commitment, + output.script, + output.sender_offset_public_key, + output.covenant, + output.encrypted_data, + output.metadata_signature, rp_hash, - output_mined_info.output.minimum_value_promise, + output.minimum_value_promise, ); } } @@ -172,11 +188,16 @@ fn check_inputs_are_utxos(db: &B, body: &AggregateBody) -> } let output_hashes = output_hashes.as_ref().unwrap(); - let output_hash = input.output_hash(); - if output_hashes.iter().any(|output| output == &output_hash) { + let input_output_hash = input.output_hash(); + if output_hashes.iter().any(|val| val == &input_output_hash) { continue; } - not_found_inputs.push(output_hash); + warn!( + target: LOG_TARGET, + "Input not found in database, commitment: {}, hash: {}", + input.commitment()?.to_hex(), input_output_hash.to_hex() + ); + not_found_inputs.push(input_output_hash); }, Err(err) => { return Err(err); diff --git a/base_layer/core/src/validation/block_body/block_body_full_validator.rs b/base_layer/core/src/validation/block_body/block_body_full_validator.rs index c16924fc25..5e02ced3e9 100644 --- a/base_layer/core/src/validation/block_body/block_body_full_validator.rs +++ b/base_layer/core/src/validation/block_body/block_body_full_validator.rs @@ -20,13 +20,16 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use std::sync::{Arc, RwLock}; + +use log::error; use tari_common_types::chain_metadata::ChainMetadata; use tari_utilities::hex::Hex; use super::BlockBodyInternalConsistencyValidator; use crate::{ blocks::{Block, ChainBlock}, - chain_storage::{self, BlockchainBackend}, + chain_storage::{self, BlockchainBackend, ChainStorageError}, consensus::ConsensusManager, transactions::CryptoFactories, validation::{ @@ -36,8 +39,11 @@ use crate::{ CandidateBlockValidator, ValidationError, }, + OutputSmt, }; +const LOG_TARGET: &str = "c::val::block_body_full_validator"; + pub struct BlockBodyFullValidator { consensus_manager: ConsensusManager, block_internal_validator: BlockBodyInternalConsistencyValidator, @@ -62,6 +68,7 @@ impl BlockBodyFullValidator { backend: &B, block: &Block, metadata_option: Option<&ChainMetadata>, + smt: Arc>, ) -> Result { if let Some(metadata) = metadata_option { validate_block_metadata(block, metadata)?; @@ -78,8 +85,15 @@ impl BlockBodyFullValidator { // validate the internal consistency of the block body self.block_internal_validator.validate(&block)?; - // validate the merkle mountain range roots - let mmr_roots = chain_storage::calculate_mmr_roots(backend, &self.consensus_manager, &block)?; + // validate the merkle mountain range roots+ + let mut output_smt = smt.write().map_err(|e| { + error!( + target: LOG_TARGET, + "Validator could not get a write lock on the smt {:?}", e + ); + ChainStorageError::AccessError("write lock on smt".into()) + })?; + let mmr_roots = chain_storage::calculate_mmr_roots(backend, &self.consensus_manager, &block, &mut output_smt)?; check_mmr_roots(&block.header, &mmr_roots)?; Ok(block) @@ -92,15 +106,16 @@ impl CandidateBlockValidator for BlockBodyFullValidator backend: &B, block: &ChainBlock, metadata: &ChainMetadata, + smt: Arc>, ) -> Result<(), ValidationError> { - self.validate(backend, block.block(), Some(metadata))?; + self.validate(backend, block.block(), Some(metadata), smt)?; Ok(()) } } impl BlockBodyValidator for BlockBodyFullValidator { - fn validate_body(&self, backend: &B, block: &Block) -> Result { - self.validate(backend, block, None) + fn validate_body(&self, backend: &B, block: &Block, smt: Arc>) -> Result { + self.validate(backend, block, None, smt) } } diff --git a/base_layer/core/src/validation/block_body/test.rs b/base_layer/core/src/validation/block_body/test.rs index b1a706a521..a8fa2290e7 100644 --- a/base_layer/core/src/validation/block_body/test.rs +++ b/base_layer/core/src/validation/block_body/test.rs @@ -99,7 +99,8 @@ async fn it_passes_if_large_output_block_is_valid() { let txn = blockchain.db().db_read_access().unwrap(); let start = Instant::now(); - assert!(validator.validate_body(&*txn, &block).is_ok()); + let smt = blockchain.db().smt().clone(); + assert!(validator.validate_body(&*txn, &block, smt).is_ok()); let finished = start.elapsed(); // this here here for benchmarking purposes. // we can extrapolate full block validation by multiplying the time by 4.6, this we get from the max_weight /weight @@ -133,7 +134,8 @@ async fn it_validates_when_a_coinbase_is_spent() { block.header.validator_node_size = mmr_roots.validator_node_size; let txn = blockchain.db().db_read_access().unwrap(); - assert!(validator.validate_body(&*txn, &block).is_ok()); + let smt = blockchain.db().smt().clone(); + assert!(validator.validate_body(&*txn, &block, smt).is_ok()); } #[tokio::test] @@ -175,7 +177,8 @@ async fn it_passes_if_large_block_is_valid() { let txn = blockchain.db().db_read_access().unwrap(); let start = Instant::now(); - validator.validate_body(&*txn, &block).unwrap(); + let smt = blockchain.db().smt(); + validator.validate_body(&*txn, &block, smt).unwrap(); // assert!(validator.validate_body(&*txn, &block).is_ok()); let finished = start.elapsed(); // this here here for benchmarking purposes. @@ -203,7 +206,8 @@ async fn it_passes_if_block_is_valid() { block.header.validator_node_size = mmr_roots.validator_node_size; let txn = blockchain.db().db_read_access().unwrap(); - assert!(validator.validate_body(&*txn, &block).is_ok()); + let smt = blockchain.db().smt(); + assert!(validator.validate_body(&*txn, &block, smt).is_ok()); } #[tokio::test] @@ -214,7 +218,8 @@ async fn it_checks_the_coinbase_reward() { .create_chained_block(block_spec!("A", parent: "GB", reward: 10 * T, )) .await; let txn = blockchain.db().db_read_access().unwrap(); - let err = validator.validate_body(&*txn, block.block()).unwrap_err(); + let smt = blockchain.db().smt(); + let err = validator.validate_body(&*txn, block.block(), smt).unwrap_err(); println!("err {:?}", err); assert!(matches!( err, @@ -256,9 +261,9 @@ async fn it_allows_multiple_coinbases() { .create_unmined_block(block_spec!("A2", parent: "GB", skip_coinbase: true,)) .await; let block = blockchain.mine_block("GB", block, Difficulty::min()); - + let smt = blockchain.db().smt(); let txn = blockchain.db().db_read_access().unwrap(); - let err = validator.validate_body(&*txn, block.block()).unwrap_err(); + let err = validator.validate_body(&*txn, block.block(), smt).unwrap_err(); assert!(matches!( err, ValidationError::BlockError(BlockValidationError::TransactionError(TransactionError::NoCoinbase)) @@ -288,7 +293,8 @@ async fn it_checks_duplicate_kernel() { ) .await; let txn = blockchain.db().db_read_access().unwrap(); - let err = validator.validate_body(&*txn, block.block()).unwrap_err(); + let smt = blockchain.db().smt(); + let err = validator.validate_body(&*txn, block.block(), smt).unwrap_err(); assert!(matches!(err, ValidationError::DuplicateKernelError(_))); } @@ -321,7 +327,8 @@ async fn it_checks_double_spends() { ) .await; let txn = blockchain.db().db_read_access().unwrap(); - let err = validator.validate_body(&*txn, block.block()).unwrap_err(); + let smt = blockchain.db().smt(); + let err = validator.validate_body(&*txn, block.block(), smt).unwrap_err(); assert!(matches!(err, ValidationError::ContainsSTxO)); } @@ -342,7 +349,8 @@ async fn it_checks_input_maturity() { ) .await; let txn = blockchain.db().db_read_access().unwrap(); - let err = validator.validate_body(&*txn, block.block()).unwrap_err(); + let smt = blockchain.db().smt(); + let err = validator.validate_body(&*txn, block.block(), smt).unwrap_err(); assert!(matches!( err, ValidationError::TransactionError(TransactionError::InputMaturity) @@ -370,7 +378,8 @@ async fn it_checks_txo_sort_order() { let block = blockchain.mine_block("A", block, Difficulty::min()); let txn = blockchain.db().db_read_access().unwrap(); - let err = validator.validate_body(&*txn, block.block()).unwrap_err(); + let smt = blockchain.db().smt(); + let err = validator.validate_body(&*txn, block.block(), smt).unwrap_err(); assert!(matches!(err, ValidationError::UnsortedOrDuplicateOutput)); } @@ -396,7 +405,8 @@ async fn it_limits_the_script_byte_size() { let (block, _) = blockchain.create_next_tip(block_spec!("B", transactions: txs)).await; let txn = blockchain.db().db_read_access().unwrap(); - let err = validator.validate_body(&*txn, block.block()).unwrap_err(); + let smt = blockchain.db().smt(); + let err = validator.validate_body(&*txn, block.block(), smt).unwrap_err(); assert!(matches!(err, ValidationError::TariScriptExceedsMaxSize { .. })); } @@ -421,7 +431,8 @@ async fn it_rejects_invalid_input_metadata() { let (block, _) = blockchain.create_next_tip(block_spec!("B", transactions: txs)).await; let txn = blockchain.db().db_read_access().unwrap(); - let err = validator.validate_body(&*txn, block.block()).unwrap_err(); + let smt = blockchain.db().smt(); + let err = validator.validate_body(&*txn, block.block(), smt).unwrap_err(); assert!(matches!(err, ValidationError::UnknownInputs(_))); } @@ -449,8 +460,9 @@ async fn it_rejects_zero_conf_double_spends() { let (unmined, _) = blockchain .create_unmined_block(block_spec!("2", parent: "1", transactions: transactions)) .await; + let smt = blockchain.db().smt(); let txn = blockchain.db().db_read_access().unwrap(); - let err = validator.validate_body(&*txn, &unmined).unwrap_err(); + let err = validator.validate_body(&*txn, &unmined, smt).unwrap_err(); assert!(matches!(err, ValidationError::UnsortedOrDuplicateInput)); } @@ -484,7 +496,10 @@ mod body_only { let metadata = blockchain.db().get_chain_metadata().unwrap(); let db = blockchain.db().db_read_access().unwrap(); - let err = validator.validate(&*db, block.block(), Some(&metadata)).unwrap_err(); + let smt = blockchain.db().smt(); + let err = validator + .validate(&*db, block.block(), Some(&metadata), smt) + .unwrap_err(); assert!(matches!(err, ValidationError::UnknownInputs(_))); } } diff --git a/base_layer/core/src/validation/helpers.rs b/base_layer/core/src/validation/helpers.rs index cc6706fa5d..4ceac26a70 100644 --- a/base_layer/core/src/validation/helpers.rs +++ b/base_layer/core/src/validation/helpers.rs @@ -163,7 +163,8 @@ pub fn is_all_unique_and_sorted<'a, I: IntoIterator, T: PartialOrd true } -/// This function checks that an input is a valid spendable UTXO +/// This function checks that an input is a valid spendable UTXO in the database. It cannot confirm +/// zero confermation transactions. pub fn check_input_is_utxo(db: &B, input: &TransactionInput) -> Result<(), ValidationError> { let output_hash = input.output_hash(); if let Some(utxo_hash) = db.fetch_unspent_output_hash_by_commitment(input.commitment()?)? { @@ -203,7 +204,7 @@ pub fn check_input_is_utxo(db: &B, input: &TransactionInpu warn!( target: LOG_TARGET, - "Validation failed due to input: {} which does not exist yet", input + "Input ({}, {}) does not exist in the database yet", input.commitment()?.to_hex(), output_hash.to_hex() ); Err(ValidationError::UnknownInput) } diff --git a/base_layer/core/src/validation/mocks.rs b/base_layer/core/src/validation/mocks.rs index 83bcf28006..e19a801cfa 100644 --- a/base_layer/core/src/validation/mocks.rs +++ b/base_layer/core/src/validation/mocks.rs @@ -23,6 +23,7 @@ use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, + RwLock, }; use tari_common_types::{chain_metadata::ChainMetadata, types::Commitment}; @@ -42,6 +43,7 @@ use crate::{ test_helpers::create_consensus_rules, transactions::transaction_components::Transaction, validation::{error::ValidationError, DifficultyCalculator, FinalHorizonStateValidation}, + OutputSmt, }; #[derive(Clone)] @@ -70,7 +72,7 @@ impl MockValidator { } impl BlockBodyValidator for MockValidator { - fn validate_body(&self, _: &B, block: &Block) -> Result { + fn validate_body(&self, _: &B, block: &Block, _: Arc>) -> Result { if self.is_valid.load(Ordering::SeqCst) { Ok(block.clone()) } else { @@ -82,7 +84,13 @@ impl BlockBodyValidator for MockValidator { } impl CandidateBlockValidator for MockValidator { - fn validate_body_with_metadata(&self, _: &B, _: &ChainBlock, _: &ChainMetadata) -> Result<(), ValidationError> { + fn validate_body_with_metadata( + &self, + _: &B, + _: &ChainBlock, + _: &ChainMetadata, + _: Arc>, + ) -> Result<(), ValidationError> { if self.is_valid.load(Ordering::SeqCst) { Ok(()) } else { diff --git a/base_layer/core/src/validation/traits.rs b/base_layer/core/src/validation/traits.rs index d4cdd97244..87e14e1125 100644 --- a/base_layer/core/src/validation/traits.rs +++ b/base_layer/core/src/validation/traits.rs @@ -1,9 +1,12 @@ +use std::sync::{Arc, RwLock}; + // Copyright 2019. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the +// following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the @@ -12,13 +15,14 @@ // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +// DAMAGE. use tari_common_types::{chain_metadata::ChainMetadata, types::Commitment}; use tari_utilities::epoch_time::EpochTime; @@ -28,12 +32,13 @@ use crate::{ proof_of_work::{AchievedTargetDifficulty, Difficulty}, transactions::transaction_components::Transaction, validation::error::ValidationError, + OutputSmt, }; /// A validator that determines if a block body is valid, assuming that the header has already been /// validated pub trait BlockBodyValidator: Send + Sync { - fn validate_body(&self, backend: &B, block: &Block) -> Result; + fn validate_body(&self, backend: &B, block: &Block, smt: Arc>) -> Result; } /// A validator that validates a body after it has been determined to be a valid orphan @@ -43,6 +48,7 @@ pub trait CandidateBlockValidator: Send + Sync { backend: &B, block: &ChainBlock, metadata: &ChainMetadata, + smt: Arc>, ) -> Result<(), ValidationError>; } diff --git a/base_layer/core/tests/helpers/nodes.rs b/base_layer/core/tests/helpers/nodes.rs index 207373969c..c22e65ac18 100644 --- a/base_layer/core/tests/helpers/nodes.rs +++ b/base_layer/core/tests/helpers/nodes.rs @@ -20,7 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{path::Path, sync::Arc, time::Duration}; +use std::{ + path::Path, + sync::{Arc, RwLock}, + time::Duration, +}; use rand::rngs::OsRng; use tari_common::configuration::Network; @@ -60,6 +64,7 @@ use tari_core::{ HeaderChainLinkedValidator, InternalConsistencyValidator, }, + OutputSmt, }; use tari_p2p::{ comms_connector::{pubsub_connector, InboundDomainConnector}, @@ -202,10 +207,12 @@ impl BaseNodeBuilder { let consensus_manager = self .consensus_manager .unwrap_or_else(|| ConsensusManagerBuilder::new(network).build().unwrap()); + let smt = Arc::new(RwLock::new(OutputSmt::new())); let blockchain_db = create_store_with_consensus_and_validators_and_config( consensus_manager.clone(), validators, blockchain_db_config, + smt.clone(), ); let mempool_validator = TransactionChainLinkedValidator::new(blockchain_db.clone(), consensus_manager.clone()); let mempool = Mempool::new( diff --git a/base_layer/core/tests/helpers/sample_blockchains.rs b/base_layer/core/tests/helpers/sample_blockchains.rs index 40c82fb429..0270c7d600 100644 --- a/base_layer/core/tests/helpers/sample_blockchains.rs +++ b/base_layer/core/tests/helpers/sample_blockchains.rs @@ -21,6 +21,8 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // +use std::sync::{Arc, RwLock}; + use tari_common::configuration::Network; use tari_core::{ blocks::ChainBlock, @@ -34,6 +36,7 @@ use tari_core::{ }, txn_schema, validation::DifficultyCalculator, + OutputSmt, }; use crate::helpers::block_builders::{create_genesis_block, generate_new_block}; @@ -254,12 +257,14 @@ pub async fn create_new_blockchain_lmdb( .build() .unwrap(); let db = TempDatabase::new(); + let smt = Arc::new(RwLock::new(OutputSmt::new())); let db = BlockchainDatabase::new( db, consensus_manager.clone(), validators, config, DifficultyCalculator::new(consensus_manager.clone(), Default::default()), + smt, ) .unwrap(); (db, vec![block0], vec![vec![output]], consensus_manager, key_manager) diff --git a/base_layer/core/tests/helpers/sync.rs b/base_layer/core/tests/helpers/sync.rs index 93a77d1fbc..8dace2f6d9 100644 --- a/base_layer/core/tests/helpers/sync.rs +++ b/base_layer/core/tests/helpers/sync.rs @@ -20,7 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{sync::Arc, time::Duration}; +use std::{ + sync::{Arc, RwLock}, + time::Duration, +}; use tari_common::configuration::Network; use tari_common_types::types::HashOutput; @@ -55,6 +58,7 @@ use tari_core::{ }, txn_schema, validation::mocks::MockValidator, + OutputSmt, }; use tari_p2p::{services::liveness::LivenessConfig, P2pConfig}; use tari_shutdown::Shutdown; @@ -217,8 +221,14 @@ pub enum WhatToDelete { // Private helper function to setup a delete a block transaction. // Note: This private function will panic if the index is out of bounds - caller function's responsibility. -fn delete_block(txn: &mut DbTransaction, node: &NodeInterfaces, blocks: &[ChainBlock], index: usize) { - txn.delete_tip_block(*blocks[index].hash()); +fn delete_block( + txn: &mut DbTransaction, + node: &NodeInterfaces, + blocks: &[ChainBlock], + index: usize, + smt: Arc>, +) { + txn.delete_tip_block(*blocks[index].hash(), smt); txn.delete_orphan(*blocks[index].hash()); txn.set_best_block( blocks[index + 1].height(), @@ -235,6 +245,7 @@ pub fn delete_some_blocks_and_headers( instruction: WhatToDelete, node: &NodeInterfaces, ) { + let smt = node.blockchain_db.smt().clone(); if blocks_with_anchor.is_empty() || blocks_with_anchor.len() < 2 { panic!("blocks must have at least 2 elements"); } @@ -244,11 +255,11 @@ pub fn delete_some_blocks_and_headers( let mut txn = DbTransaction::new(); match instruction { WhatToDelete::BlocksAndHeaders => { - delete_block(&mut txn, node, &blocks, i); + delete_block(&mut txn, node, &blocks, i, smt.clone()); txn.delete_header(blocks[i].height()); }, WhatToDelete::Blocks => { - delete_block(&mut txn, node, &blocks, i); + delete_block(&mut txn, node, &blocks, i, smt.clone()); }, WhatToDelete::Headers => { txn.delete_header(blocks[i].height()); diff --git a/base_layer/core/tests/tests/block_validation.rs b/base_layer/core/tests/tests/block_validation.rs index b07c94af1e..c0e4d50c44 100644 --- a/base_layer/core/tests/tests/block_validation.rs +++ b/base_layer/core/tests/tests/block_validation.rs @@ -20,7 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{iter, sync::Arc}; +use std::{ + iter, + sync::{Arc, RwLock}, +}; use borsh::BorshSerialize; use monero::{blockdata::block::Block as MoneroBlock, consensus::Encodable}; @@ -70,6 +73,7 @@ use tari_core::{ InternalConsistencyValidator, ValidationError, }, + OutputSmt, }; use tari_key_manager::key_manager_service::KeyManagerInterface; use tari_script::{inputs, script}; @@ -122,9 +126,11 @@ async fn test_monero_blocks() { let gen_hash = *cm.get_genesis_block().hash(); let difficulty_calculator = DifficultyCalculator::new(cm.clone(), RandomXFactory::default()); let header_validator = HeaderFullValidator::new(cm.clone(), difficulty_calculator); + let smt = Arc::new(RwLock::new(OutputSmt::new())); let db = create_store_with_consensus_and_validators( cm.clone(), Validators::new(MockValidator::new(true), header_validator, MockValidator::new(true)), + smt, ); let block_0 = db.fetch_block(0, true).unwrap().try_into_chain_block().unwrap(); let (block_1_t, _) = chain_block_with_new_coinbase(&block_0, vec![], &cm, None, &key_manager).await; @@ -320,7 +326,8 @@ async fn inputs_are_not_malleable() { let validator = BlockBodyFullValidator::new(blockchain.consensus_manager().clone(), true); let txn = blockchain.store().db_read_access().unwrap(); - let err = validator.validate_body(&*txn, &block).unwrap_err(); + let smt = blockchain.store().smt(); + let err = validator.validate_body(&*txn, &block, smt).unwrap_err(); // All validations pass, except the Input MMR. unpack_enum!(ValidationError::BlockError(err) = err); @@ -348,6 +355,7 @@ async fn test_orphan_validator() { let orphan_validator = BlockBodyInternalConsistencyValidator::new(rules.clone(), false, factories.clone()); let difficulty_calculator = DifficultyCalculator::new(rules.clone(), Default::default()); + let smt = Arc::new(RwLock::new(OutputSmt::new())); let validators = Validators::new( BlockBodyFullValidator::new(rules.clone(), true), HeaderFullValidator::new(rules.clone(), difficulty_calculator.clone()), @@ -359,6 +367,7 @@ async fn test_orphan_validator() { validators, BlockchainDatabaseConfig::default(), difficulty_calculator, + smt, ) .unwrap(); // we have created the blockchain, lets create a second valid block @@ -490,6 +499,7 @@ async fn test_orphan_body_validation() { .unwrap(); let backend = create_test_db(); let difficulty_calculator = DifficultyCalculator::new(rules.clone(), Default::default()); + let smt = Arc::new(RwLock::new(OutputSmt::new())); let body_only_validator = BlockBodyFullValidator::new(rules.clone(), true); let header_validator = HeaderFullValidator::new(rules.clone(), difficulty_calculator.clone()); let validators = Validators::new( @@ -503,6 +513,7 @@ async fn test_orphan_body_validation() { validators, BlockchainDatabaseConfig::default(), DifficultyCalculator::new(rules.clone(), Default::default()), + smt, ) .unwrap(); // we have created the blockchain, lets create a second valid block @@ -543,9 +554,10 @@ OutputFeatures::default()), let chain_block = ChainBlock::try_construct(Arc::new(new_block), accumulated_data).unwrap(); let metadata = db.get_chain_metadata().unwrap(); + let smt = db.smt().clone(); // this block should be okay assert!(body_only_validator - .validate_body_with_metadata(&*db.db_read_access().unwrap(), &chain_block, &metadata) + .validate_body_with_metadata(&*db.db_read_access().unwrap(), &chain_block, &metadata, smt) .is_ok()); // lets break the chain sequence @@ -614,8 +626,9 @@ OutputFeatures::default()), let chain_block = ChainBlock::try_construct(Arc::new(new_block), accumulated_data).unwrap(); let metadata = db.get_chain_metadata().unwrap(); + let smt = db.smt().clone(); assert!(body_only_validator - .validate_body_with_metadata(&*db.db_read_access().unwrap(), &chain_block, &metadata) + .validate_body_with_metadata(&*db.db_read_access().unwrap(), &chain_block, &metadata, smt) .is_err()); // lets check duplicate txos @@ -646,8 +659,9 @@ OutputFeatures::default()), let chain_block = ChainBlock::try_construct(Arc::new(new_block), accumulated_data).unwrap(); let metadata = db.get_chain_metadata().unwrap(); + let smt = db.smt(); assert!(body_only_validator - .validate_body_with_metadata(&*db.db_read_access().unwrap(), &chain_block, &metadata) + .validate_body_with_metadata(&*db.db_read_access().unwrap(), &chain_block, &metadata, smt) .is_err()); // check mmr roots @@ -676,8 +690,9 @@ OutputFeatures::default()), let chain_block = ChainBlock::try_construct(Arc::new(new_block), accumulated_data).unwrap(); let metadata = db.get_chain_metadata().unwrap(); + let smt = db.smt(); assert!(body_only_validator - .validate_body_with_metadata(&*db.db_read_access().unwrap(), &chain_block, &metadata) + .validate_body_with_metadata(&*db.db_read_access().unwrap(), &chain_block, &metadata, smt) .is_err()); } @@ -707,6 +722,7 @@ async fn test_header_validation() { let backend = create_test_db(); let difficulty_calculator = DifficultyCalculator::new(rules.clone(), Default::default()); let header_validator = HeaderFullValidator::new(rules.clone(), difficulty_calculator.clone()); + let smt = Arc::new(RwLock::new(OutputSmt::new())); let validators = Validators::new( BlockBodyFullValidator::new(rules.clone(), true), HeaderFullValidator::new(rules.clone(), difficulty_calculator.clone()), @@ -718,6 +734,7 @@ async fn test_header_validation() { validators, BlockchainDatabaseConfig::default(), difficulty_calculator, + smt, ) .unwrap(); // we have created the blockchain, lets create a second valid block @@ -820,6 +837,7 @@ async fn test_block_sync_body_validator() { .unwrap(); let backend = create_test_db(); let difficulty_calculator = DifficultyCalculator::new(rules.clone(), Default::default()); + let smt = Arc::new(RwLock::new(OutputSmt::new())); let validators = Validators::new( BlockBodyFullValidator::new(rules.clone(), true), HeaderFullValidator::new(rules.clone(), difficulty_calculator), @@ -832,6 +850,7 @@ async fn test_block_sync_body_validator() { validators, BlockchainDatabaseConfig::default(), DifficultyCalculator::new(rules.clone(), Default::default()), + smt, ) .unwrap(); let validator = BlockBodyFullValidator::new(rules.clone(), true); @@ -866,7 +885,8 @@ async fn test_block_sync_body_validator() { let err = { // `MutexGuard` cannot be held across an `await` point let txn = db.db_read_access().unwrap(); - validator.validate_body(&*txn, &new_block).unwrap_err() + let smt = db.smt(); + validator.validate_body(&*txn, &new_block, smt).unwrap_err() }; assert!( matches!( @@ -885,7 +905,8 @@ async fn test_block_sync_body_validator() { { // `MutexGuard` cannot be held across an `await` point let txn = db.db_read_access().unwrap(); - validator.validate_body(&*txn, &new_block).unwrap(); + let smt = db.smt(); + validator.validate_body(&*txn, &new_block, smt).unwrap(); } // lets break the block weight @@ -910,7 +931,8 @@ async fn test_block_sync_body_validator() { let err = { // `MutexGuard` cannot be held across an `await` point let txn = db.db_read_access().unwrap(); - validator.validate_body(&*txn, &new_block).unwrap_err() + let smt = db.smt(); + validator.validate_body(&*txn, &new_block, smt).unwrap_err() }; assert!( matches!( @@ -929,7 +951,8 @@ async fn test_block_sync_body_validator() { { // `MutexGuard` cannot be held across an `await` point let txn = db.db_read_access().unwrap(); - validator.validate_body(&*txn, &new_block).unwrap_err(); + let smt = db.smt(); + validator.validate_body(&*txn, &new_block, smt).unwrap_err(); } // lets break the sorting @@ -941,7 +964,8 @@ async fn test_block_sync_body_validator() { { // `MutexGuard` cannot be held across an `await` point let txn = db.db_read_access().unwrap(); - validator.validate_body(&*txn, &new_block).unwrap_err(); + let smt = db.smt(); + validator.validate_body(&*txn, &new_block, smt).unwrap_err(); } // lets have unknown inputs; @@ -978,7 +1002,8 @@ async fn test_block_sync_body_validator() { { // `MutexGuard` cannot be held across an `await` point let txn = db.db_read_access().unwrap(); - validator.validate_body(&*txn, &new_block).unwrap_err(); + let smt = db.smt(); + validator.validate_body(&*txn, &new_block, smt).unwrap_err(); } // lets check duplicate txos @@ -992,7 +1017,8 @@ async fn test_block_sync_body_validator() { { // `MutexGuard` cannot be held across an `await` point let txn = db.db_read_access().unwrap(); - validator.validate_body(&*txn, &new_block).unwrap_err(); + let smt = db.smt(); + validator.validate_body(&*txn, &new_block, smt).unwrap_err(); } // let break coinbase value @@ -1015,7 +1041,8 @@ async fn test_block_sync_body_validator() { { // `MutexGuard` cannot be held across an `await` point let txn = db.db_read_access().unwrap(); - validator.validate_body(&*txn, &new_block).unwrap_err(); + let smt = db.smt(); + validator.validate_body(&*txn, &new_block, smt).unwrap_err(); } // let break coinbase lock height @@ -1038,7 +1065,8 @@ async fn test_block_sync_body_validator() { { // `MutexGuard` cannot be held across an `await` point let txn = db.db_read_access().unwrap(); - validator.validate_body(&*txn, &new_block).unwrap(); + let smt = db.smt(); + validator.validate_body(&*txn, &new_block, smt).unwrap(); } // lets break accounting @@ -1050,7 +1078,8 @@ async fn test_block_sync_body_validator() { { // `MutexGuard` cannot be held across an `await` point let txn = db.db_read_access().unwrap(); - validator.validate_body(&*txn, &new_block).unwrap_err(); + let smt = db.smt(); + validator.validate_body(&*txn, &new_block, smt).unwrap_err(); } // lets the mmr root @@ -1060,7 +1089,8 @@ async fn test_block_sync_body_validator() { { // `MutexGuard` cannot be held across an `await` point let txn = db.db_read_access().unwrap(); - validator.validate_body(&*txn, &new_block).unwrap_err(); + let smt = db.smt(); + validator.validate_body(&*txn, &new_block, smt).unwrap_err(); } } @@ -1098,6 +1128,7 @@ async fn add_block_with_large_block() { .unwrap(); let backend = create_test_db(); let difficulty_calculator = DifficultyCalculator::new(rules.clone(), Default::default()); + let smt = Arc::new(RwLock::new(OutputSmt::new())); let validators = Validators::new( BlockBodyFullValidator::new(rules.clone(), false), HeaderFullValidator::new(rules.clone(), difficulty_calculator), @@ -1110,6 +1141,7 @@ async fn add_block_with_large_block() { validators, BlockchainDatabaseConfig::default(), DifficultyCalculator::new(rules.clone(), Default::default()), + smt, ) .unwrap(); // lets make our big block (1 -> 5) * 12 @@ -1157,6 +1189,7 @@ async fn add_block_with_large_many_output_block() { .unwrap(); let backend = create_test_db(); let difficulty_calculator = DifficultyCalculator::new(rules.clone(), Default::default()); + let smt = Arc::new(RwLock::new(OutputSmt::new())); let validators = Validators::new( BlockBodyFullValidator::new(rules.clone(), false), HeaderFullValidator::new(rules.clone(), difficulty_calculator), @@ -1169,6 +1202,7 @@ async fn add_block_with_large_many_output_block() { validators, BlockchainDatabaseConfig::default(), DifficultyCalculator::new(rules.clone(), Default::default()), + smt, ) .unwrap(); // lets make our big block (1 -> 5) * 12 diff --git a/base_layer/core/tests/tests/node_comms_interface.rs b/base_layer/core/tests/tests/node_comms_interface.rs index 4480cfce56..576702189a 100644 --- a/base_layer/core/tests/tests/node_comms_interface.rs +++ b/base_layer/core/tests/tests/node_comms_interface.rs @@ -20,6 +20,8 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use std::sync::{Arc, RwLock}; + use tari_common::configuration::Network; use tari_comms::test_utils::mocks::create_connectivity_mock; use tari_core::{ @@ -59,6 +61,7 @@ use tari_core::{ }, txn_schema, validation::{mocks::MockValidator, transaction::TransactionChainLinkedValidator}, + OutputSmt, }; use tari_key_manager::key_manager_service::KeyManagerInterface; use tari_script::{inputs, script, ExecutionStack}; @@ -441,7 +444,9 @@ async fn inbound_fetch_blocks_before_horizon_height() { pruning_interval: 1, ..Default::default() }; - let store = create_store_with_consensus_and_validators_and_config(consensus_manager.clone(), validators, config); + let smt = Arc::new(RwLock::new(OutputSmt::new())); + let store = + create_store_with_consensus_and_validators_and_config(consensus_manager.clone(), validators, config, smt); let mempool_validator = TransactionChainLinkedValidator::new(store.clone(), consensus_manager.clone()); let mempool = Mempool::new( MempoolConfig::default(), diff --git a/base_layer/key_manager/src/key_manager_service/storage/sqlite_db/key_manager_state.rs b/base_layer/key_manager/src/key_manager_service/storage/sqlite_db/key_manager_state.rs index 888744edb3..be569018c5 100644 --- a/base_layer/key_manager/src/key_manager_service/storage/sqlite_db/key_manager_state.rs +++ b/base_layer/key_manager/src/key_manager_service/storage/sqlite_db/key_manager_state.rs @@ -27,7 +27,7 @@ use chrono::{NaiveDateTime, Utc}; use diesel::{prelude::*, SqliteConnection}; use tari_common_sqlite::util::diesel_ext::ExpectedRowsExtension; use tari_common_types::encryption::{decrypt_bytes_integral_nonce, encrypt_bytes_integral_nonce}; -use tari_utilities::Hidden; +use tari_utilities::{ByteArray, Hidden}; use crate::{ key_manager_service::{ @@ -147,9 +147,15 @@ impl KeyManagerStateSql { impl Encryptable for KeyManagerStateSql { fn domain(&self, field_name: &'static str) -> Vec { - [Self::KEY_MANAGER, self.branch_seed.as_bytes(), field_name.as_bytes()] - .concat() - .to_vec() + // Because there are two variable-length inputs in the concatenation, we prepend the length of the first + [ + Self::KEY_MANAGER, + (self.branch_seed.len() as u64).to_le_bytes().as_bytes(), + self.branch_seed.as_bytes(), + field_name.as_bytes(), + ] + .concat() + .to_vec() } fn encrypt(mut self, cipher: &XChaCha20Poly1305) -> Result { @@ -172,9 +178,15 @@ impl Encryptable for KeyManagerStateSql { impl Encryptable for NewKeyManagerStateSql { fn domain(&self, field_name: &'static str) -> Vec { - [Self::KEY_MANAGER, self.branch_seed.as_bytes(), field_name.as_bytes()] - .concat() - .to_vec() + // Because there are two variable-length inputs in the concatenation, we prepend the length of the first + [ + Self::KEY_MANAGER, + (self.branch_seed.len() as u64).to_le_bytes().as_bytes(), + self.branch_seed.as_bytes(), + field_name.as_bytes(), + ] + .concat() + .to_vec() } fn encrypt(mut self, cipher: &XChaCha20Poly1305) -> Result { diff --git a/base_layer/key_manager/src/lib.rs b/base_layer/key_manager/src/lib.rs index fc09da6a72..6118226c6e 100644 --- a/base_layer/key_manager/src/lib.rs +++ b/base_layer/key_manager/src/lib.rs @@ -139,7 +139,7 @@ mod tests { Hidden::hide("olá".to_string()), ]); - let vec_words = vec![ + let vec_words = [ "hi".to_string(), "niao".to_string(), "hola".to_string(), diff --git a/base_layer/mmr/src/backend.rs b/base_layer/mmr/src/backend.rs index 9d215743a2..69235daf01 100644 --- a/base_layer/mmr/src/backend.rs +++ b/base_layer/mmr/src/backend.rs @@ -83,7 +83,7 @@ impl ArrayLike for Vec { } fn get(&self, index: usize) -> Result, Self::Error> { - Ok((self as &[Self::Value]).get(index).map(Clone::clone)) + Ok((self as &[Self::Value]).get(index).cloned()) } fn clear(&mut self) -> Result<(), Self::Error> { diff --git a/base_layer/p2p/Cargo.toml b/base_layer/p2p/Cargo.toml index 0edcd4a27e..e04c1269f8 100644 --- a/base_layer/p2p/Cargo.toml +++ b/base_layer/p2p/Cargo.toml @@ -41,7 +41,7 @@ webpki = "0.22" [dev-dependencies] tari_test_utils = { path = "../../infrastructure/test_utils" } -config = "0.13.0" +config = "0.14.0" clap = "3.2" tempfile = "3.1.0" diff --git a/base_layer/service_framework/src/context/handles.rs b/base_layer/service_framework/src/context/handles.rs index 8b29b98131..2042c8f4e8 100644 --- a/base_layer/service_framework/src/context/handles.rs +++ b/base_layer/service_framework/src/context/handles.rs @@ -197,7 +197,7 @@ impl ServiceHandles { acquire_lock!(self.handles) .get(&type_id) .and_then(|b| b.downcast_ref::()) - .map(Clone::clone) + .cloned() } /// Returns the shutdown signal for this stack diff --git a/base_layer/wallet/src/config.rs b/base_layer/wallet/src/config.rs index 2d8a7ef0eb..3d804cb5f5 100644 --- a/base_layer/wallet/src/config.rs +++ b/base_layer/wallet/src/config.rs @@ -120,6 +120,11 @@ pub struct WalletConfig { pub identity_file: Option, /// The type of wallet software, or specific type of hardware pub wallet_type: Option, + /// The cool down period between balance enquiry checks in seconds; requests faster than this will be ignored. + /// For specialized wallets processing many batch transactions this setting could be increased to 60 s to retain + /// responsiveness of the wallet with slightly delayed balance updates + #[serde(with = "serializers::seconds")] + pub balance_enquiry_cooldown_period: Duration, } impl Default for WalletConfig { @@ -159,6 +164,7 @@ impl Default for WalletConfig { use_libtor: true, identity_file: None, wallet_type: None, + balance_enquiry_cooldown_period: Duration::from_secs(5), } } } diff --git a/base_layer/wallet/src/operation_id.rs b/base_layer/wallet/src/operation_id.rs index 7b91bd8a23..d1d565a746 100644 --- a/base_layer/wallet/src/operation_id.rs +++ b/base_layer/wallet/src/operation_id.rs @@ -50,7 +50,7 @@ impl Hash for OperationId { impl PartialEq for OperationId { fn eq(&self, other: &Self) -> bool { - self.0.eq(&other.0) + self.0 == other.0 } } diff --git a/base_layer/wallet/src/output_manager_service/error.rs b/base_layer/wallet/src/output_manager_service/error.rs index f7f7c3ea64..c7ae684627 100644 --- a/base_layer/wallet/src/output_manager_service/error.rs +++ b/base_layer/wallet/src/output_manager_service/error.rs @@ -148,6 +148,8 @@ pub enum OutputManagerError { ValidationInProgress, #[error("Invalid data: `{0}`")] RangeProofError(String), + #[error("Transaction is over sized: `{0}`")] + TooManyInputsToFulfillTransaction(String), } impl From for OutputManagerError { diff --git a/base_layer/wallet/src/output_manager_service/mod.rs b/base_layer/wallet/src/output_manager_service/mod.rs index b73a23f515..ef26e0d5a2 100644 --- a/base_layer/wallet/src/output_manager_service/mod.rs +++ b/base_layer/wallet/src/output_manager_service/mod.rs @@ -62,6 +62,9 @@ use crate::{ util::wallet_identity::WalletIdentity, }; +/// The maximum number of transaction inputs that can be created in a single transaction, slightly less than the maximum +/// that a single comms message can hold. +pub const TRANSACTION_INPUTS_LIMIT: u32 = 4000; const LOG_TARGET: &str = "wallet::output_manager_service::initializer"; pub struct OutputManagerServiceInitializer diff --git a/base_layer/wallet/src/output_manager_service/service.rs b/base_layer/wallet/src/output_manager_service/service.rs index e812d00d49..674f4e3742 100644 --- a/base_layer/wallet/src/output_manager_service/service.rs +++ b/base_layer/wallet/src/output_manager_service/service.rs @@ -63,7 +63,7 @@ use tari_script::{inputs, script, ExecutionStack, Opcode, TariScript}; use tari_service_framework::reply_channel; use tari_shutdown::ShutdownSignal; use tari_utilities::{hex::Hex, ByteArray}; -use tokio::sync::Mutex; +use tokio::{sync::Mutex, time::Instant}; use crate::{ base_node_service::handle::{BaseNodeEvent, BaseNodeServiceHandle}, @@ -88,6 +88,7 @@ use crate::{ OutputStatus, }, tasks::TxoValidationTask, + TRANSACTION_INPUTS_LIMIT, }, util::wallet_identity::WalletIdentity, }; @@ -1257,6 +1258,7 @@ where num_outputs: usize, total_output_features_and_scripts_byte_size: usize, ) -> Result { + let start = Instant::now(); debug!( target: LOG_TARGET, "select_utxos amount: {}, fee_per_gram: {}, num_outputs: {}, output_features_and_scripts_byte_size: {}, \ @@ -1284,10 +1286,20 @@ where "select_utxos selection criteria: {}", selection_criteria ); let tip_height = chain_metadata.as_ref().map(|m| m.best_block_height()); + let start_new = Instant::now(); let uo = self .resources .db .fetch_unspent_outputs_for_spending(&selection_criteria, amount, tip_height)?; + let uo_len = uo.len(); + trace!( + target: LOG_TARGET, + "select_utxos profile - fetch_unspent_outputs_for_spending: {} outputs, {} ms (at {})", + uo_len, + start_new.elapsed().as_millis(), + start.elapsed().as_millis(), + ); + let start_new = Instant::now(); // For non-standard queries, we want to ensure that the intended UTXOs are selected if !selection_criteria.filter.is_standard() && uo.is_empty() { @@ -1310,7 +1322,7 @@ where .map_err(|e| OutputManagerError::ConversionError(e.to_string()))?, ); - trace!(target: LOG_TARGET, "We found {} UTXOs to select from", uo.len()); + trace!(target: LOG_TARGET, "We found {} UTXOs to select from", uo_len); let mut requires_change_output = false; let mut utxos_total_value = MicroMinotari::from(0); @@ -1349,8 +1361,22 @@ where let perfect_utxo_selection = utxos_total_value == amount + fee_without_change; let enough_spendable = utxos_total_value > amount + fee_with_change; + trace!( + target: LOG_TARGET, + "select_utxos profile - final_selection: {} outputs from {}, {} ms (at {})", + utxos.len(), + uo_len, + start_new.elapsed().as_millis(), + start.elapsed().as_millis(), + ); if !perfect_utxo_selection && !enough_spendable { + if uo_len == TRANSACTION_INPUTS_LIMIT as usize { + return Err(OutputManagerError::TooManyInputsToFulfillTransaction(format!( + "Input limit '{}' reached", + TRANSACTION_INPUTS_LIMIT + ))); + } let current_tip_for_time_lock_calculation = chain_metadata.map(|cm| cm.best_block_height()); let balance = self.get_balance(current_tip_for_time_lock_calculation)?; let pending_incoming = balance.pending_incoming_balance; diff --git a/base_layer/wallet/src/output_manager_service/storage/database/backend.rs b/base_layer/wallet/src/output_manager_service/storage/database/backend.rs index a76f580848..2f1a72cffc 100644 --- a/base_layer/wallet/src/output_manager_service/storage/database/backend.rs +++ b/base_layer/wallet/src/output_manager_service/storage/database/backend.rs @@ -46,7 +46,7 @@ pub trait OutputManagerBackend: Send + Sync + Clone { /// Perform a batch update of the outputs' unmined and invalid state fn set_outputs_to_unmined_and_invalid(&self, hashes: Vec) -> Result<(), OutputManagerStorageError>; /// Perform a batch update of the outputs' last validation timestamp - fn update_last_validation_timestamps(&self, hashes: Vec) -> Result<(), OutputManagerStorageError>; + fn update_last_validation_timestamps(&self, commitments: Vec) -> Result<(), OutputManagerStorageError>; fn set_outputs_to_be_revalidated(&self) -> Result<(), OutputManagerStorageError>; /// Perform a batch update of the outputs' spent status fn mark_outputs_as_spent(&self, updates: Vec) -> Result<(), OutputManagerStorageError>; diff --git a/base_layer/wallet/src/output_manager_service/storage/database/mod.rs b/base_layer/wallet/src/output_manager_service/storage/database/mod.rs index cac7fba87c..503f607b84 100644 --- a/base_layer/wallet/src/output_manager_service/storage/database/mod.rs +++ b/base_layer/wallet/src/output_manager_service/storage/database/mod.rs @@ -402,9 +402,12 @@ where T: OutputManagerBackend + 'static Ok(()) } - pub fn update_last_validation_timestamps(&self, hashes: Vec) -> Result<(), OutputManagerStorageError> { + pub fn update_last_validation_timestamps( + &self, + commitments: Vec, + ) -> Result<(), OutputManagerStorageError> { let db = self.db.clone(); - db.update_last_validation_timestamps(hashes)?; + db.update_last_validation_timestamps(commitments)?; Ok(()) } diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs index dec27f0901..8e0018044d 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs @@ -425,12 +425,6 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { let mut conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - debug!( - target: LOG_TARGET, - "`set_received_outputs_mined_height_and_statuses` for {} outputs", - updates.len() - ); - let commitments: Vec = updates.iter().map(|update| update.commitment.clone()).collect(); if !OutputSql::verify_outputs_exist(&commitments, &mut conn)? { return Err(OutputManagerStorageError::ValuesNotFound); @@ -569,30 +563,58 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { Ok(()) } - fn update_last_validation_timestamps(&self, hashes: Vec) -> Result<(), OutputManagerStorageError> { + fn update_last_validation_timestamps(&self, commitments: Vec) -> Result<(), OutputManagerStorageError> { let start = Instant::now(); let mut conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - // Using a raw query here, as the obvious diesel query is not as performant as expected: - // `diesel::update(outputs::table.filter(outputs::hash.eq_any(hashes.iter().map(|hash| hash.to_vec())))) - // ` .set(outputs::last_validation_timestamp.eq(Some(Utc::now().naive_utc()))) - // ` .execute(&mut conn) - // ` .num_rows_affected_or_not_found(hashes.len())?; - let sql_query = format!( - r#" - UPDATE outputs - SET last_validation_timestamp = '{}' - WHERE hash IN ({}) - "#, - Utc::now().naive_utc(), - hashes + if !OutputSql::verify_outputs_exist(&commitments, &mut conn)? { + return Err(OutputManagerStorageError::ValuesNotFound); + } + + let last_validation_timestamp = Utc::now().naive_utc(); + + // Three queries were evaluated to determine the most efficient way to update the last validation timestamp + // during system-level stress testing: + // - Using `diesel`: + // - `diesel::update(outputs::table.filter(outputs::hash.eq_any(hashes)).set(...).execute(&mut conn)` + // - Note: `diesel` does not support batch updates, so we have to do it manually. + // - Using a raw query that mimicked the `diesel` query: + // - `UPDATE outputs SET last_validation_timestamp = '{}' WHERE hash IN ({})` + // - 20% faster than `diesel` on average + // - Using a raw query with a batch insert (as implemented below): + // - `INSERT INTO outputs (..) VALUES (...) ON CONFLICT (commitment) DO UPDATE SET ...` + // - 1011% faster than `diesel` on average + + let mut query = String::from( + "INSERT INTO outputs ( commitment, last_validation_timestamp, mined_height, mined_in_block, status, \ + mined_timestamp, spending_key, value, output_type, maturity, hash, script, input_data, \ + script_private_key, sender_offset_public_key, metadata_signature_ephemeral_commitment, \ + metadata_signature_ephemeral_pubkey, metadata_signature_u_a, metadata_signature_u_x, \ + metadata_signature_u_y, spending_priority, covenant, encrypted_data, minimum_value_promise + ) + VALUES ", + ); + + query.push_str( + &commitments .iter() - .map(|hash| format!("'{}'", hash)) - .collect::>() - .join(",") + .map(|commitment| { + format!( + "(x'{}', '{}', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)", + commitment.to_hex(), + last_validation_timestamp, + ) + }) + .collect::>() + .join(", "), + ); + + query.push_str( + " ON CONFLICT (commitment) DO UPDATE SET last_validation_timestamp = excluded.last_validation_timestamp", ); - conn.batch_execute(&sql_query)?; + + conn.batch_execute(&query)?; if start.elapsed().as_millis() > 0 { trace!( @@ -601,7 +623,7 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { acquire_lock.as_millis(), (start.elapsed() - acquire_lock).as_millis(), start.elapsed().as_millis(), - hashes.len() + commitments.len(), ); } @@ -614,12 +636,6 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { let mut conn = self.database_connection.get_pooled_connection()?; let acquire_lock = start.elapsed(); - debug!( - target: LOG_TARGET, - "`mark_outputs_as_spent` for {} outputs", - updates.len() - ); - let commitments: Vec = updates.iter().map(|update| update.commitment.clone()).collect(); if !OutputSql::verify_outputs_exist(&commitments, &mut conn)? { return Err(OutputManagerStorageError::ValuesNotFound); diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs index 5f2431c135..83d7d2478c 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs @@ -58,6 +58,7 @@ use crate::{ }, UtxoSelectionFilter, UtxoSelectionOrdering, + TRANSACTION_INPUTS_LIMIT, }, schema::outputs, }; @@ -264,7 +265,7 @@ impl OutputSql { }, }; - Ok(query.load(conn)?) + Ok(query.limit(i64::from(TRANSACTION_INPUTS_LIMIT)).load(conn)?) } /// Return all unspent outputs that have a maturity above the provided chain tip diff --git a/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs b/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs index c840894eb1..45e68e3103 100644 --- a/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs +++ b/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs @@ -186,10 +186,10 @@ where .for_protocol(self.operation_id)?; } - let unmined_hashes: Vec<_> = unmined.iter().map(|o| o.hash).collect(); - if !unmined_hashes.is_empty() { + let unmined_info: Vec<_> = unmined.iter().map(|o| o.commitment.clone()).collect(); + if !unmined_info.is_empty() { self.db - .update_last_validation_timestamps(unmined_hashes) + .update_last_validation_timestamps(unmined_info) .for_protocol(self.operation_id)?; } } diff --git a/base_layer/wallet/src/storage/sqlite_db/wallet.rs b/base_layer/wallet/src/storage/sqlite_db/wallet.rs index 5be73d2888..74b073b711 100644 --- a/base_layer/wallet/src/storage/sqlite_db/wallet.rs +++ b/base_layer/wallet/src/storage/sqlite_db/wallet.rs @@ -954,9 +954,15 @@ impl ClientKeyValueSql { impl Encryptable for ClientKeyValueSql { fn domain(&self, field_name: &'static str) -> Vec { - [Self::CLIENT_KEY_VALUE, self.key.as_bytes(), field_name.as_bytes()] - .concat() - .to_vec() + // Because there are two variable-length inputs in the concatenation, we prepend the length of the first + [ + Self::CLIENT_KEY_VALUE, + (self.key.len() as u64).to_le_bytes().as_bytes(), + self.key.as_bytes(), + field_name.as_bytes(), + ] + .concat() + .to_vec() } #[allow(unused_assignments)] diff --git a/base_layer/wallet/src/wallet.rs b/base_layer/wallet/src/wallet.rs index 9728957771..33accdfb68 100644 --- a/base_layer/wallet/src/wallet.rs +++ b/base_layer/wallet/src/wallet.rs @@ -774,12 +774,11 @@ pub fn read_or_create_wallet_type( (None, None) => { panic!("Something is very wrong, no wallet type was found in the DB, or provided (on first run)") }, - (Some(_), Some(_)) => panic!("Something is very wrong we have a wallet type from the DB and on first run"), (None, Some(t)) => { db.set_wallet_type(t)?; Ok(t) }, - (Some(t), None) => Ok(t), + (Some(t), _) => Ok(t), } } diff --git a/base_layer/wallet/tests/transaction_service_tests/storage.rs b/base_layer/wallet/tests/transaction_service_tests/storage.rs index 2fb7043b94..bf32ac5ca4 100644 --- a/base_layer/wallet/tests/transaction_service_tests/storage.rs +++ b/base_layer/wallet/tests/transaction_service_tests/storage.rs @@ -115,7 +115,7 @@ pub async fn test_db_backend(backend: T) { let stp = builder.build().await.unwrap(); - let messages = vec!["Hey!".to_string(), "Yo!".to_string(), "Sup!".to_string()]; + let messages = ["Hey!".to_string(), "Yo!".to_string(), "Sup!".to_string()]; let amounts = [ MicroMinotari::from(10_000), MicroMinotari::from(23_000), diff --git a/base_layer/wallet_ffi/README.md b/base_layer/wallet_ffi/README.md index 092a3d15f8..f00bdf193a 100644 --- a/base_layer/wallet_ffi/README.md +++ b/base_layer/wallet_ffi/README.md @@ -132,8 +132,8 @@ Install [Rust](https://www.rust-lang.org/tools/install) Install the following tools and system images ```Shell Script -rustup toolchain add nightly-2023-12-12 -rustup default nightly-2023-12-12 +rustup toolchain add nightly-2024-02-04 +rustup default nightly-2024-02-04 rustup component add rustfmt --toolchain nightly rustup component add clippy rustup target add x86_64-apple-ios aarch64-apple-ios # iPhone and emulator cross compiling diff --git a/base_layer/wallet_ffi/src/lib.rs b/base_layer/wallet_ffi/src/lib.rs index 8fb30829e3..ef7cdff9de 100644 --- a/base_layer/wallet_ffi/src/lib.rs +++ b/base_layer/wallet_ffi/src/lib.rs @@ -4924,6 +4924,7 @@ pub unsafe extern "C" fn comms_list_connected_public_keys( let mut connectivity = (*wallet).wallet.comms.connectivity(); let peer_manager = (*wallet).wallet.comms.peer_manager(); + #[allow(clippy::blocks_in_conditions)] match (*wallet).runtime.block_on(async move { let connections = connectivity.get_active_connections().await?; let mut public_keys = Vec::with_capacity(connections.len()); @@ -6412,6 +6413,7 @@ pub unsafe extern "C" fn wallet_get_seed_peers(wallet: *mut TariWallet, error_ou } let peer_manager = (*wallet).wallet.comms.peer_manager(); let query = PeerQuery::new().select_where(|p| p.is_seed()); + #[allow(clippy::blocks_in_conditions)] match (*wallet).runtime.block_on(async move { let peers = peer_manager.perform_query(query).await?; let mut public_keys = Vec::with_capacity(peers.len()); diff --git a/buildtools/docker/base_node.Dockerfile b/buildtools/docker/base_node.Dockerfile index aa02113de8..e5aae31e56 100644 --- a/buildtools/docker/base_node.Dockerfile +++ b/buildtools/docker/base_node.Dockerfile @@ -1,13 +1,13 @@ # syntax=docker/dockerfile:1 #FROM rust:1.42.0 as builder -FROM quay.io/tarilabs/rust_tari-build-with-deps:nightly-2023-12-12 as builder +FROM quay.io/tarilabs/rust_tari-build-with-deps:nightly-2024-02-04 as builder # Copy the dependency lists #ADD Cargo.toml ./ ADD . /minotari_node WORKDIR /minotari_node -# RUN rustup component add rustfmt --toolchain nightly-2023-12-12-x86_64-unknown-linux-gnu +# RUN rustup component add rustfmt --toolchain nightly-2024-02-04-x86_64-unknown-linux-gnu #ARG TBN_ARCH=native ARG TBN_ARCH=x86-64 #ARG TBN_FEATURES=avx2 diff --git a/common/Cargo.toml b/common/Cargo.toml index c7f357ae1d..20ddb72700 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -19,7 +19,7 @@ tari_features = { path = "./tari_features", version = "1.0.0-dan.5" } anyhow = "1.0.53" blake2 = "0.10" -config = { version = "0.13.0", default_features = false, features = ["toml"] } +config = { version = "0.14.0", default_features = false, features = ["toml"] } dirs-next = "1.0.2" git2 = { version = "0.18", default_features = false, optional = true } log = "0.4.8" diff --git a/common/config/presets/c_base_node_b_mining_allow_methods.toml b/common/config/presets/c_base_node_b_mining_allow_methods.toml index 8a2d6113dc..085017b383 100644 --- a/common/config/presets/c_base_node_b_mining_allow_methods.toml +++ b/common/config/presets/c_base_node_b_mining_allow_methods.toml @@ -33,6 +33,8 @@ grpc_server_allow_methods = [ "get_network_difficulty", "get_new_block_template", "get_new_block", + "get_new_block_with_coinbases", + "get_new_block_template_with_coinbases", "get_new_block_blob", "submit_block", "submit_block_blob", diff --git a/common/config/presets/c_base_node_b_non_mining_allow_methods.toml b/common/config/presets/c_base_node_b_non_mining_allow_methods.toml index 92ebf7cf34..bf27e6186c 100644 --- a/common/config/presets/c_base_node_b_non_mining_allow_methods.toml +++ b/common/config/presets/c_base_node_b_non_mining_allow_methods.toml @@ -32,6 +32,8 @@ grpc_server_allow_methods = [ #"get_tokens_in_circulation", #"get_network_difficulty", #"get_new_block_template", + #"get_new_block_with_coinbases", + #"get_new_block_template_with_coinbases", #"get_new_block", #"get_new_block_blob", #"submit_block", diff --git a/common/config/presets/d_console_wallet.toml b/common/config/presets/d_console_wallet.toml index 176cca2c85..d54d355e8d 100644 --- a/common/config/presets/d_console_wallet.toml +++ b/common/config/presets/d_console_wallet.toml @@ -95,6 +95,11 @@ # An example script is available here: applications/minotari_console_wallet/src/notifier/notify_example.sh #notify_file = "/path/to/script" +# The cool down period between balance enquiry checks in seconds; requests faster than this will be ignored. +# For specialized wallets processing many batch transactions this setting could be increased to 60 s to retain +# responsiveness of the wallet with slightly delayed balance updates (default = 5): +#balance_enquiry_cooldown_period = 5 + [wallet.transactions] # This is the timeout period that will be used for base node broadcast monitoring tasks (default = 30) broadcast_monitoring_timeout = 180 diff --git a/common/config/presets/f_merge_mining_proxy.toml b/common/config/presets/f_merge_mining_proxy.toml index 0083fd4921..fdaccccf53 100644 --- a/common/config/presets/f_merge_mining_proxy.toml +++ b/common/config/presets/f_merge_mining_proxy.toml @@ -7,24 +7,37 @@ [merge_mining_proxy] -# URL to monerod (default = "") + +# Use dynamic monerod URL obtained form the official Monero website (https://monero.fail/) (default: true) +#use_dynamic_fail_data = true + +# The monero fail URL to get the monerod URLs from - must be pointing to the official Monero website. +# Valid alternatives are: +# - mainnet: 'https://monero.fail/?chain=monero&network=mainnet&all=true' (default) +# - stagenet: `https://monero.fail/?chain=monero&network=stagenet&all=true` +# - testnet: `https://monero.fail/?chain=monero&network=testnet&all=true` +#monero_fail_url = "https://monero.fail/?chain=monero&network=mainnet&all=true" + +# URL to monerod (you can add your own server here or use public nodes from https://monero.fail/), only if +# 'use_dynamic_fail_data = false' (default = "") + #monerod_url = [# stagenet # "http://stagenet.xmr-tw.org:38081", -# "http://stagenet.community.xmr.to:38081", -# "http://monero-stagenet.exan.tech:38081", +# "http://node.monerodevs.org:38089", +# "http://node3.monerodevs.org:38089", # "http://xmr-lux.boldsuck.org:38081", # "http://singapore.node.xmr.pm:38081", #] monerod_url = [ # mainnet - # more reliable - "http://xmr.support:18081", "http://node1.xmr-tw.org:18081", + "https://monero.homeqloud.com:443", + "http://monero1.com:18089", + "http://node.c3pool.org:18081", + "http://xmr-full.p2pool.uk:18089", + "https://monero.stackwallet.com:18081", + "http://xmr.support:18081", "http://xmr.nthrow.nyc:18081", - # not so reliable - "http://node.xmrig.com:18081", - "http://monero.exan.tech:18081", - "http://18.132.124.81:18081", ] # Username for curl. (default = "") diff --git a/common/config/presets/g_miner.toml b/common/config/presets/g_miner.toml index 35b58500f9..45e473b9c9 100644 --- a/common/config/presets/g_miner.toml +++ b/common/config/presets/g_miner.toml @@ -42,11 +42,6 @@ # Base node reconnect timeout after any GRPC or miner error (default: 10 s) #wait_timeout_on_error = 10 -# The extra data to store in the coinbase, usually some data about the mining pool. -# Note that this data is publicly readable, but it is suggested you populate it so that -# pool dominance can be seen before any one party has more than 51%. (default = "minotari_miner") -#coinbase_extra = "minotari_miner" - # The Tari wallet address (valid address in hex) where the mining funds will be sent to - must be assigned # e.g. "78e724f466d202abdee0f23c261289074e4a2fc9eb61e83e0179eead76ce2d3f17" #wallet_payment_address = "YOUR_WALLET_TARI_ADDRESS" diff --git a/comms/core/src/peer_manager/migrations.rs b/comms/core/src/peer_manager/migrations.rs index 8672bfe967..e933c3bc54 100644 --- a/comms/core/src/peer_manager/migrations.rs +++ b/comms/core/src/peer_manager/migrations.rs @@ -31,7 +31,7 @@ pub(super) const MIGRATION_VERSION_KEY: u64 = u64::MAX; pub fn migrate(database: &LMDBDatabase) -> Result<(), LMDBError> { // Add migrations here in version order - let migrations = vec![v7::Migration.boxed()]; + let migrations = [v7::Migration.boxed()]; if migrations.is_empty() { return Ok(()); } diff --git a/comms/core/src/test_utils/mocks/connection_manager.rs b/comms/core/src/test_utils/mocks/connection_manager.rs index 66b8cc41e3..a84a2a65f6 100644 --- a/comms/core/src/test_utils/mocks/connection_manager.rs +++ b/comms/core/src/test_utils/mocks/connection_manager.rs @@ -139,7 +139,7 @@ impl ConnectionManagerMock { .lock() .await .get(&node_id) - .map(Clone::clone) + .cloned() .ok_or(ConnectionManagerError::DialConnectFailedAllAddresses); let _result = reply_tx.take().map(|tx| tx.send(result)); }, diff --git a/comms/core/src/tor/hidden_service/controller.rs b/comms/core/src/tor/hidden_service/controller.rs index 1171f14b02..66a26fb4e6 100644 --- a/comms/core/src/tor/hidden_service/controller.rs +++ b/comms/core/src/tor/hidden_service/controller.rs @@ -365,7 +365,7 @@ impl HiddenServiceController { }, }; - let identity = self.identity.as_ref().map(Clone::clone).expect("already checked"); + let identity = self.identity.clone().expect("already checked"); debug!( target: LOG_TARGET, "Added hidden service with service id '{}' on port '{}'", identity.service_id, identity.onion_port diff --git a/comms/dht/src/test_utils/dht_actor_mock.rs b/comms/dht/src/test_utils/dht_actor_mock.rs index b8714ffa36..7ccbd2fd09 100644 --- a/comms/dht/src/test_utils/dht_actor_mock.rs +++ b/comms/dht/src/test_utils/dht_actor_mock.rs @@ -72,7 +72,7 @@ impl DhtMockState { } pub fn get_setting(&self, key: DhtMetadataKey) -> Option> { - self.settings.read().unwrap().get(&key.to_string()).map(Clone::clone) + self.settings.read().unwrap().get(&key.to_string()).cloned() } } @@ -124,13 +124,7 @@ impl DhtActorMock { .unwrap(); }, GetMetadata(key, reply_tx) => { - let _result = reply_tx.send(Ok(self - .state - .settings - .read() - .unwrap() - .get(&key.to_string()) - .map(Clone::clone))); + let _result = reply_tx.send(Ok(self.state.settings.read().unwrap().get(&key.to_string()).cloned())); }, SetMetadata(key, value, reply_tx) => { self.state.settings.write().unwrap().insert(key.to_string(), value); diff --git a/infrastructure/storage/src/lmdb_store/mod.rs b/infrastructure/storage/src/lmdb_store/mod.rs index 2833649193..2e2cf94c86 100644 --- a/infrastructure/storage/src/lmdb_store/mod.rs +++ b/infrastructure/storage/src/lmdb_store/mod.rs @@ -28,4 +28,4 @@ pub use lmdb_zero::{ db, traits::{AsLmdbBytes, FromLmdbBytes}, }; -pub use store::{DatabaseRef, LMDBBuilder, LMDBConfig, LMDBDatabase, LMDBStore}; +pub use store::{DatabaseRef, LMDBBuilder, LMDBConfig, LMDBDatabase, LMDBStore, BYTES_PER_MB}; diff --git a/infrastructure/storage/src/lmdb_store/store.rs b/infrastructure/storage/src/lmdb_store/store.rs index 0756a9cc52..bf2cc33fc3 100644 --- a/infrastructure/storage/src/lmdb_store/store.rs +++ b/infrastructure/storage/src/lmdb_store/store.rs @@ -9,6 +9,7 @@ use std::{ convert::TryInto, path::{Path, PathBuf}, sync::Arc, + time::Instant, }; use lmdb_zero::{ @@ -41,7 +42,7 @@ use crate::{ }; const LOG_TARGET: &str = "lmdb"; -const BYTES_PER_MB: usize = 1024 * 1024; +pub const BYTES_PER_MB: usize = 1024 * 1024; /// An atomic pointer to an LMDB database instance pub type DatabaseRef = Arc>; @@ -92,7 +93,8 @@ impl LMDBConfig { impl Default for LMDBConfig { fn default() -> Self { - Self::new_from_mb(16, 16, 4) + // Do not choose these values too small, as the entire SMT is replaced for every new block + Self::new_from_mb(128, 128, 64) } } @@ -186,7 +188,7 @@ impl LMDBBuilder { let flags = self.env_flags | open::NOTLS; let env = builder.open(&path, flags, 0o600)?; // SAFETY: no transactions can be open at this point - LMDBStore::resize_if_required(&env, &self.env_config)?; + LMDBStore::resize_if_required(&env, &self.env_config, None)?; Arc::new(env) }; @@ -346,16 +348,15 @@ pub struct LMDBStore { } impl LMDBStore { - /// Close all databases and close the environment. You cannot be guaranteed that the dbs will be closed after - /// calling this function because there still may be threads accessing / writing to a database that will block - /// this call. However, in that case `shutdown` returns an error. + /// Force flush the data buffers to disk. pub fn flush(&self) -> Result<(), lmdb_zero::error::Error> { - trace!(target: LOG_TARGET, "Forcing flush of buffers to disk"); + let start = Instant::now(); self.env.sync(true)?; - debug!(target: LOG_TARGET, "LMDB Buffers have been flushed"); + trace!(target: LOG_TARGET, "LMDB buffers flushed in {:.2?}", start.elapsed()); Ok(()) } + /// Write log information about the LMDB environment and databases to the log. pub fn log_info(&self) { match self.env.info() { Err(e) => warn!( @@ -406,10 +407,12 @@ impl LMDBStore { self.databases.get(db_name).cloned() } + /// Returns the LMDB environment configuration pub fn env_config(&self) -> LMDBConfig { self.env_config.clone() } + /// Returns the LMDB environment with handle pub fn env(&self) -> Arc { self.env.clone() } @@ -421,30 +424,39 @@ impl LMDBStore { /// not check for this condition, the caller must ensure it explicitly. /// /// - pub unsafe fn resize_if_required(env: &Environment, config: &LMDBConfig) -> Result<(), LMDBError> { - let env_info = env.info()?; - let stat = env.stat()?; - let size_used_bytes = stat.psize as usize * env_info.last_pgno; - let size_left_bytes = env_info.mapsize - size_used_bytes; - debug!( - target: LOG_TARGET, - "Resize check: Used bytes: {}, Remaining bytes: {}", size_used_bytes, size_left_bytes - ); - - if size_left_bytes <= config.resize_threshold_bytes { - Self::resize(env, config)?; + pub unsafe fn resize_if_required( + env: &Environment, + config: &LMDBConfig, + increase_threshold_by: Option, + ) -> Result<(), LMDBError> { + let (mapsize, size_used_bytes, size_left_bytes) = LMDBStore::get_stats(env)?; + if size_left_bytes <= config.resize_threshold_bytes + increase_threshold_by.unwrap_or_default() { debug!( target: LOG_TARGET, - "({}) LMDB size used {:?} MB, environment space left {:?} MB, increased by {:?} MB", - env.path()?.to_str()?, + "Resize required: mapsize: {} MB, used: {} MB, remaining: {} MB", + mapsize / BYTES_PER_MB, size_used_bytes / BYTES_PER_MB, - size_left_bytes / BYTES_PER_MB, - config.grow_size_bytes / BYTES_PER_MB, + size_left_bytes / BYTES_PER_MB ); + Self::resize(env, config, Some(increase_threshold_by.unwrap_or_default()))?; } Ok(()) } + /// Returns the LMDB environment statistics. + /// Note: + /// In Windows and Ubuntu, this function does not always return the actual used size of the + /// database on disk when the database has grown large (> 700MB), reason unknown (not tested + /// on Mac). + pub fn get_stats(env: &Environment) -> Result<(usize, usize, usize), LMDBError> { + let env_info = env.info()?; + let stat = env.stat()?; + let size_used_bytes = stat.psize as usize * env_info.last_pgno; + let size_left_bytes = env_info.mapsize - size_used_bytes; + + Ok((env_info.mapsize, size_used_bytes, size_left_bytes)) + } + /// Grows the LMDB environment by the configured amount /// /// # Safety @@ -452,19 +464,25 @@ impl LMDBStore { /// not check for this condition, the caller must ensure it explicitly. /// /// - pub unsafe fn resize(env: &Environment, config: &LMDBConfig) -> Result<(), LMDBError> { + pub unsafe fn resize( + env: &Environment, + config: &LMDBConfig, + increase_threshold_by: Option, + ) -> Result<(), LMDBError> { + let start = Instant::now(); let env_info = env.info()?; let current_mapsize = env_info.mapsize; - env.set_mapsize(current_mapsize + config.grow_size_bytes)?; + env.set_mapsize(current_mapsize + config.grow_size_bytes + increase_threshold_by.unwrap_or_default())?; let env_info = env.info()?; let new_mapsize = env_info.mapsize; debug!( target: LOG_TARGET, - "({}) LMDB MB, mapsize was grown from {:?} MB to {:?} MB, increased by {:?} MB", + "({}) LMDB MB, mapsize was grown from {} MB to {} MB, increased by {} MB, in {:.2?}", env.path()?.to_str()?, current_mapsize / BYTES_PER_MB, new_mapsize / BYTES_PER_MB, - config.grow_size_bytes / BYTES_PER_MB, + (config.grow_size_bytes + increase_threshold_by.unwrap_or_default()) / BYTES_PER_MB, + start.elapsed() ); Ok(()) @@ -487,18 +505,20 @@ impl LMDBDatabase { K: AsLmdbBytes + ?Sized, V: Serialize, { - const MAX_RESIZES: usize = 5; + // Resize this many times before assuming something is not right (up to 1 GB) + let max_resizes = 1024 * BYTES_PER_MB / self.env_config.grow_size_bytes(); let value = LMDBWriteTransaction::convert_value(value)?; - for _ in 0..MAX_RESIZES { + for i in 0..max_resizes { match self.write(key, &value) { Ok(txn) => return Ok(txn), Err(error::Error::Code(error::MAP_FULL)) => { info!( target: LOG_TARGET, - "Failed to obtain write transaction because the database needs to be resized" + "Database resize required (resized {} time(s) in this transaction)", + i + 1 ); unsafe { - LMDBStore::resize(&self.env, &self.env_config)?; + LMDBStore::resize(&self.env, &self.env_config, Some(value.len()))?; } }, Err(e) => return Err(e.into()), diff --git a/infrastructure/storage/tests/lmdb.rs b/infrastructure/storage/tests/lmdb.rs index 45740521c7..ce14dd3386 100644 --- a/infrastructure/storage/tests/lmdb.rs +++ b/infrastructure/storage/tests/lmdb.rs @@ -222,9 +222,9 @@ fn test_multi_thread_writes() { #[test] fn test_multi_writes() { { - let env = init("multi-writes").unwrap(); + let store = init("multi-writes").unwrap(); for i in 0..2 { - let db = env.get_handle("users").unwrap(); + let db = store.get_handle("users").unwrap(); let res = db.with_write_transaction(|mut txn| { for j in 0..1000 { let v = i * 1000 + j; @@ -235,7 +235,7 @@ fn test_multi_writes() { }); assert!(res.is_ok()); } - env.flush().unwrap(); + store.flush().unwrap(); } clean_up("multi-writes"); // In Windows file handles must be released before files can be deleted } @@ -277,7 +277,7 @@ fn test_lmdb_resize_on_create() { let db_name = "test"; { // Create db with large preset environment size - let env = LMDBBuilder::new() + let store = LMDBBuilder::new() .set_path(&path) .set_env_config(LMDBConfig::new( 100 * PRESET_SIZE * 1024 * 1024, @@ -289,17 +289,17 @@ fn test_lmdb_resize_on_create() { .build() .unwrap(); // Add some data that is `>= 2 * (PRESET_SIZE * 1024 * 1024)` - let db = env.get_handle(db_name).unwrap(); + let db = store.get_handle(db_name).unwrap(); let users = load_users(); for i in 0..100 { db.insert(&i, &users).unwrap(); } // Ensure enough data is loaded - let env_info = env.env().info().unwrap(); - let env_stat = env.env().stat().unwrap(); + let env_info = store.env().info().unwrap(); + let env_stat = store.env().stat().unwrap(); size_used_round_1 = env_stat.psize as usize * env_info.last_pgno; assert!(size_used_round_1 >= 2 * (PRESET_SIZE * 1024 * 1024)); - env.flush().unwrap(); + store.flush().unwrap(); } { diff --git a/integration_tests/Cargo.toml b/integration_tests/Cargo.toml index d8a737b350..95022046d5 100644 --- a/integration_tests/Cargo.toml +++ b/integration_tests/Cargo.toml @@ -35,7 +35,7 @@ tari_key_manager = { path = "../base_layer/key_manager" } anyhow = "1.0.53" async-trait = "0.1.50" chrono = { version = "0.4.22", default-features = false } -config = "0.13.0" +config = "0.14.0" csv = "1.1" cucumber = { version = "0.20.0", features = ["default", "libtest", "output-junit"] } futures = { version = "^0.3.1" } diff --git a/integration_tests/src/base_node_process.rs b/integration_tests/src/base_node_process.rs index d24e16fb29..675bee11ff 100644 --- a/integration_tests/src/base_node_process.rs +++ b/integration_tests/src/base_node_process.rs @@ -204,6 +204,8 @@ pub async fn spawn_base_node_with_config( GrpcMethod::GetNetworkDifficulty, GrpcMethod::GetNewBlockTemplate, GrpcMethod::GetNewBlock, + GrpcMethod::GetNewBlockWithCoinbases, + GrpcMethod::GetNewBlockTemplateWithCoinbases, GrpcMethod::GetNewBlockBlob, GrpcMethod::SubmitBlock, GrpcMethod::SubmitBlockBlob, diff --git a/integration_tests/src/chat_ffi.rs b/integration_tests/src/chat_ffi.rs index 6ecd2d0be7..3b02918d38 100644 --- a/integration_tests/src/chat_ffi.rs +++ b/integration_tests/src/chat_ffi.rs @@ -41,10 +41,7 @@ use tari_comms::{ multiaddr::Multiaddr, peer_manager::{Peer, PeerFeatures}, }; -use tari_contacts::contacts_service::{ - service::ContactOnlineStatus, - types::{Message, MessageMetadataType}, -}; +use tari_contacts::contacts_service::{service::ContactOnlineStatus, types::Message}; use crate::{chat_client::test_config, get_port}; @@ -92,7 +89,7 @@ extern "C" { pub fn send_chat_message(client: *mut ClientFFI, message: *mut c_void, error_out: *const c_int); pub fn add_chat_message_metadata( message: *mut c_void, - metadata_type: c_int, + metadata_type: *const c_char, data: *const c_char, error_out: *const c_int, ) -> *mut c_void; @@ -199,20 +196,22 @@ impl ChatClient for ChatFFI { } } - fn add_metadata(&self, message: Message, metadata_type: MessageMetadataType, data: String) -> Message { + fn add_metadata(&self, message: Message, key: String, data: String) -> Message { let message_ptr = Box::into_raw(Box::new(message)) as *mut c_void; - let message_type = metadata_type.as_byte(); - let error_out = Box::into_raw(Box::new(0)); - let bytes = data.into_bytes(); - let len = i32::try_from(bytes.len()).expect("Truncation occurred") as c_uint; - let byte_data = unsafe { chat_byte_vector_create(bytes.as_ptr(), len, error_out) }; + let key_bytes = key.into_bytes(); + let len = i32::try_from(key_bytes.len()).expect("Truncation occurred") as c_uint; + let byte_key = unsafe { chat_byte_vector_create(key_bytes.as_ptr(), len, error_out) }; + + let data_bytes = data.into_bytes(); + let len = i32::try_from(data_bytes.len()).expect("Truncation occurred") as c_uint; + let byte_data = unsafe { chat_byte_vector_create(data_bytes.as_ptr(), len, error_out) }; unsafe { add_chat_message_metadata( message_ptr, - i32::from(message_type), + byte_key as *const c_char, byte_data as *const c_char, error_out, ); diff --git a/integration_tests/src/merge_mining_proxy.rs b/integration_tests/src/merge_mining_proxy.rs index 87e7efa4e8..26663bab8a 100644 --- a/integration_tests/src/merge_mining_proxy.rs +++ b/integration_tests/src/merge_mining_proxy.rs @@ -117,8 +117,8 @@ impl MergeMiningProxyProcess { "merge_mining_proxy.monerod_url".to_string(), [ "http://stagenet.xmr-tw.org:38081", - "http://stagenet.community.xmr.to:38081", - "http://monero-stagenet.exan.tech:38081", + "http://node.monerodevs.org:38089", + "http://node3.monerodevs.org:38089", "http://xmr-lux.boldsuck.org:38081", "http://singapore.node.xmr.pm:38081", ] @@ -140,6 +140,10 @@ impl MergeMiningProxyProcess { wallet_payment_address.to_hex(), ), ("merge_mining_proxy.stealth_payment".to_string(), stealth.to_string()), + ( + "merge_mining_proxy.use_dynamic_fail_data".to_string(), + "false".to_string(), + ), ], }, non_interactive_mode: false, diff --git a/integration_tests/tests/features/BlockTemplate.feature b/integration_tests/tests/features/BlockTemplate.feature index c7854f2dd1..12b4533c7f 100644 --- a/integration_tests/tests/features/BlockTemplate.feature +++ b/integration_tests/tests/features/BlockTemplate.feature @@ -9,3 +9,10 @@ Scenario: Verify UTXO and kernel MMR size in header Given I have a seed node SEED_A When I have 1 base nodes connected to all seed nodes Then meddling with block template data from node SEED_A is not allowed + + @critical + Scenario: Verify gprc cna create block with more than 1 coinbase + Given I have a seed node SEED_A + When I have 1 base nodes connected to all seed nodes + Then generate a block with 2 coinbases from node SEED_A + Then generate a block with 2 coinbases as a single request from node SEED_A \ No newline at end of file diff --git a/integration_tests/tests/steps/chat_steps.rs b/integration_tests/tests/steps/chat_steps.rs index 7b7d6aa525..7fc202faf2 100644 --- a/integration_tests/tests/steps/chat_steps.rs +++ b/integration_tests/tests/steps/chat_steps.rs @@ -26,7 +26,7 @@ use cucumber::{then, when}; use tari_contacts::contacts_service::{ handle::{DEFAULT_MESSAGE_LIMIT, DEFAULT_MESSAGE_PAGE}, service::ContactOnlineStatus, - types::{Direction, Message, MessageMetadata, MessageMetadataType}, + types::{Direction, Message, MessageMetadata}, }; use tari_integration_tests::{chat_client::spawn_chat_client, TariWorld}; @@ -108,7 +108,7 @@ async fn i_reply_to_message( let message = sender.add_metadata( message, - MessageMetadataType::Reply, + "reply".to_string(), String::from_utf8(inbound_chat_message.message_id).expect("bytes to uuid"), ); @@ -221,11 +221,7 @@ async fn have_replied_message( let metadata: &MessageMetadata = &inbound_chat_message.metadata[0]; // Metadata data is a reply type - assert_eq!( - metadata.metadata_type, - MessageMetadataType::Reply, - "Metadata type is wrong" - ); + assert_eq!(metadata.key, "reply".as_bytes(), "Metadata type is wrong"); // Metadata data contains id to original message assert_eq!( diff --git a/integration_tests/tests/steps/node_steps.rs b/integration_tests/tests/steps/node_steps.rs index 7b3e325359..f6c1049ab6 100644 --- a/integration_tests/tests/steps/node_steps.rs +++ b/integration_tests/tests/steps/node_steps.rs @@ -20,15 +20,29 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{convert::TryFrom, time::Duration}; +use std::{ + convert::{TryFrom, TryInto}, + time::Duration, +}; use cucumber::{given, then, when}; use futures::StreamExt; use indexmap::IndexMap; -use minotari_app_grpc::tari_rpc::{self as grpc, GetBlocksRequest, ListHeadersRequest}; +use minotari_app_grpc::tari_rpc::{ + self as grpc, + pow_algo::PowAlgos, + GetBlocksRequest, + GetNewBlockTemplateWithCoinbasesRequest, + GetNewBlockWithCoinbasesRequest, + ListHeadersRequest, + NewBlockCoinbase, + NewBlockTemplateRequest, + PowAlgo, +}; use minotari_node::BaseNodeConfig; use minotari_wallet_grpc_client::grpc::{Empty, GetIdentityRequest}; -use tari_core::blocks::Block; +use tari_common_types::tari_address::TariAddress; +use tari_core::{blocks::Block, transactions::aggregated_body::AggregateBody}; use tari_integration_tests::{ base_node_process::{spawn_base_node, spawn_base_node_with_config}, get_peer_addresses, @@ -719,6 +733,143 @@ async fn no_meddling_with_data(world: &mut TariWorld, node: String) { } } +#[then(expr = "generate a block with 2 coinbases from node {word}")] +async fn generate_block_with_2_coinbases(world: &mut TariWorld, node: String) { + let mut client = world.get_node_client(&node).await.unwrap(); + + let template_req = NewBlockTemplateRequest { + algo: Some(PowAlgo { + pow_algo: PowAlgos::Sha3x.into(), + }), + max_weight: 0, + }; + + let template_response = client.get_new_block_template(template_req).await.unwrap().into_inner(); + + let block_template = template_response.new_block_template.clone().unwrap(); + let miner_data = template_response.miner_data.clone().unwrap(); + let amount = miner_data.reward + miner_data.total_fees; + let request = GetNewBlockWithCoinbasesRequest { + new_template: Some(block_template), + coinbases: vec![ + NewBlockCoinbase { + address: TariAddress::from_hex("30a815df7b8d7f653ce3252f08a21d570b1ac44958cb4d7af0e0ef124f89b11943") + .unwrap() + .to_hex(), + value: amount - 1000, + stealth_payment: false, + revealed_value_proof: true, + coinbase_extra: Vec::new(), + }, + NewBlockCoinbase { + address: TariAddress::from_hex("3e596f98f6904f0fc1c8685e2274bd8b2c445d5dac284a9398d09a0e9a760436d0") + .unwrap() + .to_hex(), + value: 1000, + stealth_payment: false, + revealed_value_proof: true, + coinbase_extra: Vec::new(), + }, + ], + }; + + let new_block = client.get_new_block_with_coinbases(request).await.unwrap().into_inner(); + + let new_block = new_block.block.unwrap(); + let mut coinbase_kernel_count = 0; + let mut coinbase_utxo_count = 0; + let body: AggregateBody = new_block.body.clone().unwrap().try_into().unwrap(); + for kernel in body.kernels() { + if kernel.is_coinbase() { + coinbase_kernel_count += 1; + } + } + for utxo in body.outputs() { + if utxo.is_coinbase() { + coinbase_utxo_count += 1; + } + } + assert_eq!(coinbase_kernel_count, 1); + assert_eq!(coinbase_utxo_count, 2); + + match client.submit_block(new_block).await { + Ok(_) => (), + Err(e) => panic!("The block should have been valid, {}", e), + } +} + +#[then(expr = "generate a block with 2 coinbases as a single request from node {word}")] +async fn generate_block_with_2_as_single_request_coinbases(world: &mut TariWorld, node: String) { + let mut client = world.get_node_client(&node).await.unwrap(); + + let template_req = GetNewBlockTemplateWithCoinbasesRequest { + algo: Some(PowAlgo { + pow_algo: PowAlgos::Sha3x.into(), + }), + max_weight: 0, + coinbases: vec![ + NewBlockCoinbase { + address: TariAddress::from_hex("30a815df7b8d7f653ce3252f08a21d570b1ac44958cb4d7af0e0ef124f89b11943") + .unwrap() + .to_hex(), + value: 1, + stealth_payment: false, + revealed_value_proof: true, + coinbase_extra: Vec::new(), + }, + NewBlockCoinbase { + address: TariAddress::from_hex("3e596f98f6904f0fc1c8685e2274bd8b2c445d5dac284a9398d09a0e9a760436d0") + .unwrap() + .to_hex(), + value: 2, + stealth_payment: false, + revealed_value_proof: true, + coinbase_extra: Vec::new(), + }, + ], + }; + let new_block = client + .get_new_block_template_with_coinbases(template_req) + .await + .unwrap() + .into_inner(); + + let new_block = new_block.block.unwrap(); + let mut coinbase_kernel_count = 0; + let mut coinbase_utxo_count = 0; + let body: AggregateBody = new_block.body.clone().unwrap().try_into().unwrap(); + for kernel in body.kernels() { + if kernel.is_coinbase() { + coinbase_kernel_count += 1; + } + } + println!("{}", body); + for utxo in body.outputs() { + if utxo.is_coinbase() { + coinbase_utxo_count += 1; + } + } + assert_eq!(coinbase_kernel_count, 1); + assert_eq!(coinbase_utxo_count, 2); + let mut num_6154266700 = 0; + let mut num_12308533398 = 0; + for output in body.outputs() { + if output.minimum_value_promise.as_u64() == 6154266700 { + num_6154266700 += 1; + } + if output.minimum_value_promise.as_u64() == 12308533398 { + num_12308533398 += 1; + } + } + assert_eq!(num_6154266700, 1); + assert_eq!(num_12308533398, 1); + + match client.submit_block(new_block).await { + Ok(_) => (), + Err(e) => panic!("The block should have been valid, {}", e), + } +} + #[when(expr = "I have a lagging delayed node {word} connected to node {word} with \ blocks_behind_before_considered_lagging {int}")] async fn lagging_delayed_node(world: &mut TariWorld, delayed_node: String, node: String, delay: u64) { diff --git a/lints.toml b/lints.toml index a7a7465c02..cb44026f34 100644 --- a/lints.toml +++ b/lints.toml @@ -69,4 +69,5 @@ allow = [ 'clippy::too_many_arguments', # `assert!(!foo(bar))` is misread the majority of the time, while `assert_eq!(foo(bar), false)` is crystal clear 'clippy::bool-assert-comparison', + 'clippy::blocks_in_conditions', ] diff --git a/rust-toolchain.toml b/rust-toolchain.toml index ef0b4ecdc2..4b396a926c 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -13,4 +13,4 @@ # - the CI files in .github folder # - the Makefile in base_layer/key_manager/Makefile [toolchain] -channel = "nightly-2023-12-12" +channel = "nightly-2024-02-04" diff --git a/rustfmt.toml b/rustfmt.toml index 3bc22cf400..13868eb0c1 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -10,7 +10,7 @@ imports_layout = "HorizontalVertical" imports_granularity = "Crate" match_block_trailing_comma = true max_width = 120 -newline_style = "Native" +newline_style = "Auto" normalize_comments = true overflow_delimited_expr = true reorder_imports = true diff --git a/scripts/test_in_docker.sh b/scripts/test_in_docker.sh index 8bab931a5f..8717674b52 100755 --- a/scripts/test_in_docker.sh +++ b/scripts/test_in_docker.sh @@ -2,8 +2,8 @@ # Run the Tari test suite locally inside a suitable docker container -IMAGE=quay.io/tarilabs/rust_tari-build-with-deps:nightly-2023-12-12 -TOOLCHAIN_VERSION=nightly-2023-12-12 +IMAGE=quay.io/tarilabs/rust_tari-build-with-deps:nightly-2024-02-04 +TOOLCHAIN_VERSION=nightly-2024-02-04 CONTAINER=tari_test echo "Deleting old container"
TypeURLHeightUpWeb
Compatible
NetworkLast CheckedHistory
+ // + // + // + // http://node.liumin.io:18089 + // 3119644 + // + // + // + // mainnet5 hours ago + // + // + // + // + // + // + //