diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index f55e79c9..f33c31cd 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -12,6 +12,9 @@ jobs: - name: Checkout Repository uses: actions/checkout@v3 + - name: Cache Project + uses: Swatinem/rust-cache@v2 + - name: Run Audit-Check uses: rustsec/audit-check@v1.4.1 with: diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 15b575aa..83dd79f5 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -16,9 +16,22 @@ jobs: runs-on: ubuntu-latest steps: + - name: Setup IPFS + uses: ibnesayeed/setup-ipfs@master + with: + run_daemon: true + - name: Checkout Repository uses: actions/checkout@v3 + - name: Cache Project + uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + + - name: Use mold-linker + uses: rui314/setup-mold@v1 + - name: Install Rust Toolchain id: toolchain uses: dtolnay/rust-toolchain@nightly @@ -26,9 +39,6 @@ jobs: - name: Override rust-toolchain.toml run: rustup override set ${{steps.toolchain.outputs.name}} - - name: Cache Project - uses: Swatinem/rust-cache@v2 - - name: Install cargo-llvm-cov uses: taiki-e/install-action@cargo-llvm-cov diff --git a/.github/workflows/tests_and_checks.yml b/.github/workflows/tests_and_checks.yml index b4662563..796bee8c 100644 --- a/.github/workflows/tests_and_checks.yml +++ b/.github/workflows/tests_and_checks.yml @@ -13,15 +13,16 @@ concurrency: jobs: run-checks: - runs-on: ubuntu-latest strategy: fail-fast: false matrix: + os: [ubuntu-latest, macos-latest] rust-toolchain: - stable - nightly # minimum version - - 1.66 + - 1.67 + runs-on: ${{ matrix.os }} steps: - name: Checkout Repository uses: actions/checkout@v3 @@ -30,6 +31,12 @@ jobs: # https://github.com/Swatinem/rust-cache - name: Cache Project uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + shared-key: check-${{ matrix.rust-toolchain }}-${{ matrix.os }} + + - name: Use mold-linker + uses: rui314/setup-mold@v1 - name: Install Rust Toolchain id: toolchain @@ -49,7 +56,7 @@ jobs: # Check for security advisories - name: Check Advisories - if: ${{ matrix.rust-toolchain == 'stable' }} + if: ${{ matrix.rust-toolchain == 'stable' && matrix.os == 'ubuntu-latest' }} uses: EmbarkStudios/cargo-deny-action@v1 with: command: check advisories @@ -57,7 +64,7 @@ jobs: # Audit licenses, unreleased crates, and unexpected duplicate versions. - name: Check Bans, Licenses, and Sources - if: ${{ matrix.rust-toolchain == 'stable' }} + if: ${{ matrix.rust-toolchain == 'stable' && matrix.os == 'ubuntu-latest' }} uses: EmbarkStudios/cargo-deny-action@v1 with: command: check bans licenses sources @@ -68,19 +75,31 @@ jobs: run: cargo build --release run-tests-all-features: - runs-on: ubuntu-latest strategy: fail-fast: false matrix: + os: [ubuntu-latest, macos-latest] rust-toolchain: - stable - nightly + runs-on: ${{ matrix.os }} steps: + - name: Setup IPFS + uses: ibnesayeed/setup-ipfs@master + with: + run_daemon: true + - name: Checkout Repository uses: actions/checkout@v3 - name: Cache Project uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + shared-key: test-all-${{ matrix.rust-toolchain }}-${{ matrix.os }} + + - name: Use mold-linker + uses: rui314/setup-mold@v1 - name: Install Rust Toolchain id: toolchain @@ -101,19 +120,26 @@ jobs: run: cargo test --doc run-tests-no-default-features: - runs-on: ubuntu-latest strategy: fail-fast: false matrix: + os: [ubuntu-latest, macos-latest] rust-toolchain: - stable - nightly + runs-on: ${{ matrix.os }} steps: - name: Checkout Repository uses: actions/checkout@v3 - name: Cache Project uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + shared-key: test-0-${{ matrix.rust-toolchain }}-${{ matrix.os }} + + - name: Use mold-linker + uses: rui314/setup-mold@v1 - name: Install Rust Toolchain id: toolchain @@ -128,7 +154,7 @@ jobs: uses: taiki-e/install-action@nextest - name: Run Tests - run: cargo nextest run --profile ci --no-default-features + run: cargo nextest run --profile ci --no-default-features --features "test-utils" run-docs: runs-on: ubuntu-latest @@ -141,6 +167,11 @@ jobs: - name: Cache Project uses: Swatinem/rust-cache@v2 + with: + shared-key: doc + + - name: Use mold-linker + uses: rui314/setup-mold@v1 - name: Install Rust Toolchain uses: dtolnay/rust-toolchain@stable diff --git a/.gitignore b/.gitignore index b7a825ff..bbc8610d 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,8 @@ private *.temp *.db +*.db-shm +*.db-wal *.tmp *.png *.dot @@ -20,6 +22,10 @@ private .DS_Store homestar-guest-wasm/out homestar-wasm/out +**/fixtures/test_* + +# ipfs +.ipfs # daemon homestar.err diff --git a/.ignore b/.ignore index b5fe04d4..3fb39912 100644 --- a/.ignore +++ b/.ignore @@ -15,3 +15,5 @@ LICENSE .gitignore .release-please-manifest.json .pre-commit-config.yaml + +**/fixtures diff --git a/Cargo.lock b/Cargo.lock index af90992e..316e764f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,22 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "ab_glyph" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5110f1c78cf582855d895ecd0746b653db010cec6d9f5575293f27934d980a39" +dependencies = [ + "ab_glyph_rasterizer", + "owned_ttf_parser", +] + +[[package]] +name = "ab_glyph_rasterizer" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c71b1793ee61086797f5c80b6efa2b8ffa6d5dd703f118545808a7f2e27f7046" + [[package]] name = "addr2line" version = "0.19.0" @@ -245,6 +261,12 @@ dependencies = [ "event-listener", ] +[[package]] +name = "async-once-cell" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b49bd4c5b769125ea6323601c39815848972880efd33ffb2d01f9f909adc699" + [[package]] name = "async-recursion" version = "1.0.4" @@ -269,9 +291,9 @@ dependencies = [ [[package]] name = "asynchronous-codec" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06a0daa378f5fd10634e44b0a29b2a87b890657658e072a30d6f26e57ddee182" +checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" dependencies = [ "bytes", "futures-sink", @@ -408,25 +430,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bincode" -version = "2.0.0-rc.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f11ea1a0346b94ef188834a65c068a03aec181c94896d481d7a0a40d85b0ce95" -dependencies = [ - "bincode_derive", - "serde", -] - -[[package]] -name = "bincode_derive" -version = "2.0.0-rc.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e30759b3b99a1b802a7a3aa21c85c3ded5c28e1c83170d82d70f08bbf7f3e4c" -dependencies = [ - "virtue", -] - [[package]] name = "bit-set" version = "0.5.3" @@ -595,7 +598,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" dependencies = [ "memchr", - "regex-automata 0.3.2", + "regex-automata 0.3.4", "serde", ] @@ -659,6 +662,9 @@ name = "bytes" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +dependencies = [ + "serde", +] [[package]] name = "cap-fs-ext" @@ -740,11 +746,12 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "6c6b2562119bf28c3439f7f02db99faf0aa1a8cdfe5772a2ee155d32227239f0" dependencies = [ "jobserver", + "libc", ] [[package]] @@ -786,8 +793,11 @@ checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ "android-tzdata", "iana-time-zone", + "js-sys", "num-traits", "serde", + "time 0.1.45", + "wasm-bindgen", "winapi", ] @@ -924,12 +934,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "concat-in-place" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5b80dba65d26e0c4b692ad0312b837f1177e8175031af57fd1de4f3bc36b430" - [[package]] name = "concurrent-queue" version = "2.2.0" @@ -997,9 +1001,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6340df57935414636969091153f35f68d9f00bbc8fb4a9c6054706c213e6c6bc" +checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" [[package]] name = "constant_time_eq" @@ -1058,18 +1062,18 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.97.1" +version = "0.98.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c289b8eac3a97329a524e953b5fd68a8416ca629e1a37287f12d9e0760aadbc" +checksum = "1380172556902242d32f78ed08c98aac4f5952aef22d3684aed5c66a5db0a6fc" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.97.1" +version = "0.98.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bf07ba80f53fa7f7dc97b11087ea867f7ae4621cfca21a909eca92c0b96c7d9" +checksum = "037cca234e1ad0766fdfe43b527ec14e100414b4ccf4bb614977aa9754958f57" dependencies = [ "bumpalo", "cranelift-bforest", @@ -1088,42 +1092,42 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.97.1" +version = "0.98.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a7ca088173130c5c033e944756e3e441fbf3f637f32b4f6eb70252580c6dd4" +checksum = "d375e6afa8b9a304999ea8cf58424414b8e55e004571265a4f0826eba8b74f18" dependencies = [ "cranelift-codegen-shared", ] [[package]] name = "cranelift-codegen-shared" -version = "0.97.1" +version = "0.98.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0114095ec7d2fbd658ed100bd007006360bc2530f57c6eee3d3838869140dbf9" +checksum = "ca590e72ccb8da963def6e36460cce4412032b1f03c31d1a601838d305abdc39" [[package]] name = "cranelift-control" -version = "0.97.1" +version = "0.98.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d56031683a55a949977e756d21826eb17a1f346143a1badc0e120a15615cd38" +checksum = "9d2d38eea4373639f4b6236a40f69820fed16c5511093cd3783bf8491a93d9cf" dependencies = [ "arbitrary", ] [[package]] name = "cranelift-entity" -version = "0.97.1" +version = "0.98.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6565198b5684367371e2b946ceca721eb36965e75e3592fad12fc2e15f65d7b" +checksum = "5e3173c1434af23c00e4964722cf93ca8f0e6287289bf5d52110597c3ba2ea09" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.97.1" +version = "0.98.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f28cc44847c8b98cb921e6bfc0f7b228f4d27519376fea724d181da91709a6" +checksum = "aec4a3a33825062eccf6eec73e852c8773220f6e4798925e19696562948beb1f" dependencies = [ "cranelift-codegen", "log", @@ -1133,15 +1137,15 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.97.1" +version = "0.98.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80b658177e72178c438f7de5d6645c56d97af38e17fcb0b500459007b4e05cc5" +checksum = "5146b5cea4b21095a021d964b0174cf6ff5530f83e8d0a822683c7559e360b66" [[package]] name = "cranelift-native" -version = "0.97.1" +version = "0.98.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf1c7de7221e6afcc5e13ced3b218faab3bc65b47eac67400046a05418aecd6a" +checksum = "21cec3717ce554d3936b2101aa8eae1a2a410bd6da0f4df698a4b008fe9cf1e9" dependencies = [ "cranelift-codegen", "libc", @@ -1150,9 +1154,9 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.97.1" +version = "0.98.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76b0d28ebe8edb6b503630c489aa4669f1e2d13b97bec7271a0fcb0e159be3ad" +checksum = "d7fd2f9f1bf29ce6639ae2f477a2fe20bad0bd09289df13efeb890e8e4b9f807" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -1361,9 +1365,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" +checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" dependencies = [ "darling_core", "darling_macro", @@ -1371,9 +1375,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" +checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" dependencies = [ "fnv", "ident_case", @@ -1385,9 +1389,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" +checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", @@ -1453,16 +1457,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "deranged" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7684a49fb1af197853ef7b2ee694bc1f5b4179556f1e5710e1760c5db6f5e929" +dependencies = [ + "serde", +] + [[package]] name = "diesel" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7a532c1f99a0f596f6960a60d1e119e91582b24b39e2d83a190e61262c3ef0c" dependencies = [ + "chrono", "diesel_derives", "libsqlite3-sys", "r2d2", - "time", + "time 0.3.25", ] [[package]] @@ -1590,9 +1604,15 @@ checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" [[package]] name = "dtoa" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "519b83cd10f5f6e969625a409f735182bea5558cd8b64c655806ceaae36f1999" +checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" + +[[package]] +name = "ecolor" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e479a7fa3f23d4e794f8b2f8b3568dd4e47886ad1b12c9c095e141cb591eb63" [[package]] name = "ed25519" @@ -1629,11 +1649,28 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "egui" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3aef8ec3ae1b772f340170c65bf27d5b8c28f543a0116c844d2ac08d01123e7" +dependencies = [ + "ahash 0.8.3", + "epaint", + "nohash-hasher", +] + [[package]] name = "either" -version = "1.8.1" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" + +[[package]] +name = "emath" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +checksum = "3857d743a6e0741cdd60b622a74c7a36ea75f5f8f11b793b41d905d2c9721a4b" [[package]] name = "encoding_rs" @@ -1705,17 +1742,32 @@ dependencies = [ "termcolor", ] +[[package]] +name = "epaint" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09333964d4d57f40a85338ba3ca5ed4716070ab184dcfed966b35491c5c64f3b" +dependencies = [ + "ab_glyph", + "ahash 0.8.3", + "atomic_refcell", + "ecolor", + "emath", + "nohash-hasher", + "parking_lot", +] + [[package]] name = "equivalent" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" dependencies = [ "errno-dragonfly", "libc", @@ -1738,17 +1790,6 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" -[[package]] -name = "evmap" -version = "10.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3ea06a83f97d3dc2eb06e51e7a729b418f0717a5558a5c870e3d5156dc558d" -dependencies = [ - "hashbag", - "slab", - "smallvec", -] - [[package]] name = "exr" version = "1.7.0" @@ -1780,6 +1821,12 @@ dependencies = [ "instant", ] +[[package]] +name = "fastrand" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" + [[package]] name = "fd-lock" version = "4.0.0" @@ -1787,7 +1834,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b0377f1edc77dbd1118507bc7a66e4ab64d2b90c66f90726dc801e73a8c68f9" dependencies = [ "cfg-if", - "rustix 0.38.3", + "rustix 0.38.6", "windows-sys", ] @@ -1885,7 +1932,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d167b646a876ba8fda6b50ac645cfd96242553cbaf0ca4fccaa39afcbf0801f" dependencies = [ "io-lifetimes 1.0.11", - "rustix 0.38.3", + "rustix 0.38.6", "windows-sys", ] @@ -1950,7 +1997,7 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ - "fastrand", + "fastrand 1.9.0", "futures-core", "futures-io", "memchr", @@ -2150,12 +2197,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hashbag" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d44c238cb72d3e8993a30c32e97b2b2c2c1a12388603f28c4f19a44c4396bc" - [[package]] name = "hashbrown" version = "0.12.3" @@ -2311,11 +2352,11 @@ dependencies = [ "anyhow", "assert_cmd", "async-trait", + "atomic_refcell", "axum", - "bincode 2.0.0-rc.3", "byte-unit", + "chrono", "clap", - "concat-in-place", "config", "console-subscriber", "criterion", @@ -2327,17 +2368,19 @@ dependencies = [ "diesel_migrations", "dotenvy", "enum-assoc", - "evmap", "fnv", "futures", "headers", "homestar-core", + "homestar-runtime-tests-proc-macro", "homestar-wasm", "http", "http-serde", + "humantime", "indexmap 2.0.0", "ipfs-api", "ipfs-api-backend-hyper", + "itertools 0.11.0", "json", "libipld", "libp2p", @@ -2348,6 +2391,8 @@ dependencies = [ "openssl", "predicates", "proptest", + "puffin", + "puffin_egui", "rand 0.8.5", "reqwest", "retry", @@ -2363,7 +2408,9 @@ dependencies = [ "tabled", "tarpc", "thiserror", + "time 0.3.25", "tokio", + "tokio-serde", "tokio-tungstenite 0.20.0", "tokio-util", "tracing", @@ -2373,6 +2420,16 @@ dependencies = [ "tryhard", "url", "wait-timeout", + "wnfs-common", +] + +[[package]] +name = "homestar-runtime-tests-proc-macro" +version = "0.0.0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.28", ] [[package]] @@ -2386,7 +2443,6 @@ dependencies = [ "enum-as-inner 0.6.0", "heck", "homestar-core", - "image", "itertools 0.11.0", "libipld", "rust_decimal", @@ -2398,10 +2454,10 @@ dependencies = [ "wasi-common", "wasmparser 0.110.0", "wasmtime", - "wasmtime-component-util 11.0.1", + "wasmtime-component-util", "wasmtime-wasi", "wat", - "wit-component 0.13.0", + "wit-component 0.13.1", ] [[package]] @@ -2769,7 +2825,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.3", + "rustix 0.38.6", "windows-sys", ] @@ -2799,15 +2855,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b02a5381cc465bd3041d84623d0fa3b66738b52b8e2fc3bab8ad63ab032f4a" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "ittapi" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e648c437172ce7d3ac35ca11a068755072054826fa455a916b43524fa4a62a7" +checksum = "41e0d0b7b3b53d92a7e8b80ede3400112a6b8b4c98d1f5b8b16bb787c780582c" dependencies = [ "anyhow", "ittapi-sys", @@ -2816,9 +2872,9 @@ dependencies = [ [[package]] name = "ittapi-sys" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9b32a4d23f72548178dde54f3c12c6b6a08598e25575c0d0fa5bd861e0dc1a5" +checksum = "f2f8763c96e54e6d6a0dccc2990d8b5e33e3313aaeae6185921a3f4c1614a77c" dependencies = [ "cc", ] @@ -3047,9 +3103,9 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d45dd90e8f0e1fa59e85ff5316dd4d1ac41a9a507e79cda1b0e9b7be43ad1a56" +checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58" dependencies = [ "libp2p-core", "libp2p-identity", @@ -3156,9 +3212,9 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2874d9c6575f1d7a151022af5c42bb0ffdcdfbafe0a6fd039de870b384835a2" +checksum = "a38d6012784fe4cc14e6d443eb415b11fc7c456dc15d9f0d90d9b70bc7ac3ec1" dependencies = [ "asn1_der", "bs58", @@ -3175,9 +3231,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.44.1" +version = "0.44.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5588b884dcb1dadc04e49de342f634f60cf28b6beaaca5a4fe3dd1f09bb30041" +checksum = "4f2584b0c27f879a1cca4b753fd96874109e5a2f46bd6e30924096456c2ba9b2" dependencies = [ "arrayvec", "asynchronous-codec", @@ -3286,9 +3342,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.43.0" +version = "0.43.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6f1fe3817492f88c5298c8b5fbaa5ff3a0c802ecf4e79be4e341cf07abfa82f" +checksum = "43106820057e0f65c77b01a3873593f66e676da4e40c70c3a809b239109f1d30" dependencies = [ "either", "fnv", @@ -3443,9 +3499,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" +checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" [[package]] name = "lock_api" @@ -3481,6 +3537,12 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "lz4_flex" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b8c72594ac26bfd34f2d99dfced2edfaddfe8a476e3ff2ca0eb293d925c4f83" + [[package]] name = "mach" version = "0.3.2" @@ -3513,9 +3575,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" +checksum = "67827e6ea8ee8a7c4a72227ef4fc08957040acffdb5f122733b24fa12daff41b" [[package]] name = "maybe-owned" @@ -3824,6 +3886,12 @@ dependencies = [ "tempfile", ] +[[package]] +name = "natord" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "308d96db8debc727c3fd9744aac51751243420e46edf401010908da7f8d5e57c" + [[package]] name = "netlink-packet-core" version = "0.4.2" @@ -3990,9 +4058,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg", "libm 0.2.7", @@ -4081,9 +4149,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.26.0+1.1.1u" +version = "111.27.0+1.1.1v" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efc62c9f12b22b8f5208c23a7200a442b2e5999f8bdf80233852122b5a4f6f37" +checksum = "06e8f197c82d7511c5b014030c9b1efeda40d7d5f99d23b4ceed3524a5e63f02" dependencies = [ "cc", ] @@ -4160,6 +4228,15 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "owned_ttf_parser" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "706de7e2214113d63a8238d1910463cfce781129a6f263d13fdb09ff64355ba4" +dependencies = [ + "ttf-parser", +] + [[package]] name = "owo-colors" version = "3.5.0" @@ -4218,9 +4295,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b27ab7be369122c218afc2079489cdcb4b517c0a3fc386ff11e1fedfcc2b35" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pathdiff" @@ -4245,9 +4322,9 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.7.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73935e4d55e2abf7f130186537b19e7a4abc886a0252380b59248af473a3fc9" +checksum = "1acb4a4365a13f749a93f1a094a7805e5cfa0955373a9de860d962eaa3a5fe5a" dependencies = [ "thiserror", "ucd-trie", @@ -4255,9 +4332,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aef623c9bbfa0eedf5a0efba11a5ee83209c326653ca31ff019bec3a95bfff2b" +checksum = "666d00490d4ac815001da55838c500eafb0320019bbaa44444137c48b443a853" dependencies = [ "pest", "pest_generator", @@ -4265,9 +4342,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3e8cba4ec22bada7fc55ffe51e2deb6a0e0db2d0b7ab0b103acc80d2510c190" +checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" dependencies = [ "pest", "pest_meta", @@ -4278,9 +4355,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01f71cb40bd8bb94232df14b946909e14660e33fc05db3e50ae2a82d7ea0ca0" +checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48" dependencies = [ "once_cell", "pest", @@ -4520,18 +4597,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus-client" -version = "0.21.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78c2f43e8969d51935d2a7284878ae053ba30034cd563f673cde37ba5205685e" +checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" dependencies = [ "dtoa", "itoa", @@ -4631,6 +4708,39 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "puffin" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76425abd4e1a0ad4bd6995dd974b52f414fca9974171df8e3708b3e660d05a21" +dependencies = [ + "anyhow", + "bincode", + "byteorder", + "cfg-if", + "instant", + "lz4_flex", + "once_cell", + "parking_lot", + "serde", +] + +[[package]] +name = "puffin_egui" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00f0ef87ac588ec9a979ea4952042134ff047407436aa6859ba9e061f55ca55d" +dependencies = [ + "egui", + "indexmap 1.9.3", + "instant", + "natord", + "once_cell", + "puffin", + "time 0.3.25", + "vec1", +] + [[package]] name = "pulldown-cmark" version = "0.8.0" @@ -4703,9 +4813,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.29" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" +checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" dependencies = [ "proc-macro2", ] @@ -4860,9 +4970,9 @@ dependencies = [ [[package]] name = "regalloc2" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12513beb38dd35aab3ac5f5b89fd0330159a0dc21d5309d75073011bbc8032b0" +checksum = "5b4dcbd3a2ae7fb94b5813fa0e957c6ab51bf5d0a8ee1b69e0c2d0f1e6eb8485" dependencies = [ "hashbrown 0.13.2", "log", @@ -4879,8 +4989,8 @@ checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.2", - "regex-syntax 0.7.3", + "regex-automata 0.3.4", + "regex-syntax 0.7.4", ] [[package]] @@ -4894,13 +5004,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf" +checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.3", + "regex-syntax 0.7.4", ] [[package]] @@ -4911,9 +5021,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" +checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "rend" @@ -5023,6 +5133,28 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "rmp" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f9860a6cc38ed1da53456442089b4dfa35e7cedaa326df63017af88385e6b20" +dependencies = [ + "byteorder", + "num-traits", + "paste", +] + +[[package]] +name = "rmp-serde" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "723ecff9ad04f4ad92fe1c8ca6c20d2196d9286e9c60727c4cb5511629260e9d" +dependencies = [ + "byteorder", + "rmp", + "serde", +] + [[package]] name = "ron" version = "0.7.1" @@ -5115,14 +5247,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.3" +version = "0.38.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac5ffa1efe7548069688cd7028f32591853cd7b5b756d41bcffd2353e4fc75b4" +checksum = "1ee020b1716f0a80e2ace9b03441a749e402e86712f15f16fe8a8f75afac732f" dependencies = [ "bitflags 2.3.3", "errno", "libc", - "linux-raw-sys 0.4.3", + "linux-raw-sys 0.4.5", "windows-sys", ] @@ -5150,9 +5282,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "rusty-fork" @@ -5179,9 +5311,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe232bdf6be8c8de797b22184ee71118d63780ea42ac85b61d1baa6d3b782ae9" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "same-file" @@ -5212,9 +5344,9 @@ dependencies = [ [[package]] name = "scopeguard" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" @@ -5247,9 +5379,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.1" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -5260,9 +5392,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -5294,9 +5426,9 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.11" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a16be4fe5320ade08736447e3198294a5ea9a6d44dde6f35f0a5e06859c427a" +checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" dependencies = [ "serde", ] @@ -5336,9 +5468,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.100" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c" +checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c" dependencies = [ "itoa", "ryu", @@ -5347,9 +5479,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc4422959dd87a76cb117c191dcbffc20467f06c9100b76721dab370f24d3a" +checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" dependencies = [ "itoa", "serde", @@ -5389,7 +5521,7 @@ dependencies = [ "serde", "serde_json", "serde_with_macros", - "time", + "time 0.3.25", ] [[package]] @@ -5528,9 +5660,9 @@ checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" [[package]] name = "simd-adler32" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "238abfbb77c1915110ad968465608b68e869e0772622c9656714e73e5a1a522f" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" [[package]] name = "simdutf8" @@ -5567,9 +5699,9 @@ checksum = "f67ad224767faa3c7d8b6d91985b78e70a1324408abcb1cfcc2be4c06bc06043" [[package]] name = "snafu" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0656e7e3ffb70f6c39b3c2a86332bb74aa3c679da781642590f3c1118c5045" +checksum = "e4de37ad025c587a29e8f3f5605c00f70b98715ef90b9061a815b9e59e9042d6" dependencies = [ "doc-comment", "snafu-derive", @@ -5577,9 +5709,9 @@ dependencies = [ [[package]] name = "snafu-derive" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "475b3bbe5245c26f2d8a6f62d67c1f30eb9fffeccee721c45d162c3ebbdf81b2" +checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf" dependencies = [ "heck", "proc-macro2", @@ -5883,7 +6015,7 @@ dependencies = [ "cap-std", "fd-lock", "io-lifetimes 2.0.2", - "rustix 0.38.3", + "rustix 0.38.6", "windows-sys", "winx 0.36.1", ] @@ -5891,7 +6023,8 @@ dependencies = [ [[package]] name = "tabled" version = "0.13.0" -source = "git+https://github.com/zhiburt/tabled.git#3ea7f5d84fbd4a18ca9c650dee00ed419a1f114e" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d38d39c754ae037a9bc3ca1580a985db7371cd14f1229172d1db9093feb6739" dependencies = [ "papergrid", "tabled_derive", @@ -5919,9 +6052,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.8" +version = "0.12.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1c7f239eb94671427157bd93b3694320f3668d4e1eff08c7285366fd777fac" +checksum = "9d0e916b1148c8e263850e1ebcbd046f333e0683c724876bb0da63ea4373dc8a" [[package]] name = "tarpc" @@ -5960,15 +6093,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.6.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" +checksum = "5486094ee78b2e5038a6382ed7645bc084dc2ec433426ca4c3cb61e2007b8998" dependencies = [ - "autocfg", "cfg-if", - "fastrand", + "fastrand 2.0.0", "redox_syscall 0.3.5", - "rustix 0.37.23", + "rustix 0.38.6", "windows-sys", ] @@ -6051,10 +6183,22 @@ dependencies = [ [[package]] name = "time" -version = "0.3.22" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi", +] + +[[package]] +name = "time" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fdd63d58b18d663fbdf70e049f00a22c8e42be082203be7f26589213cd75ea" +dependencies = [ + "deranged", "itoa", "serde", "time-core", @@ -6069,9 +6213,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +checksum = "eb71511c991639bb078fd5bf97757e03914361c48100d52878b8e52b46fb92cd" dependencies = [ "time-core", ] @@ -6158,12 +6302,13 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "911a61637386b789af998ee23f50aa30d5fd7edcec8d6d3dedae5e5815205466" dependencies = [ - "bincode 1.3.3", + "bincode", "bytes", "educe", "futures-core", "futures-sink", "pin-project", + "rmp-serde", "serde", ] @@ -6249,9 +6394,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.12" +version = "0.19.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c500344a19072298cd05a7224b3c0c629348b78692bf48466c5238656e315a78" +checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" dependencies = [ "indexmap 2.0.0", "serde", @@ -6340,7 +6485,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e" dependencies = [ "crossbeam-channel", - "time", + "time 0.3.25", "tracing-subscriber", ] @@ -6382,7 +6527,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce2fb2783ed7727b30a78ebecb49d59c98102b1f384105aa27d632487875a67b" dependencies = [ - "time", + "time 0.3.25", "tracing", "tracing-core", "tracing-subscriber", @@ -6483,6 +6628,12 @@ dependencies = [ "tokio", ] +[[package]] +name = "ttf-parser" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a464a4b34948a5f67fddd2b823c62d9d92e44be75058b99939eae6c5b6960b33" + [[package]] name = "tungstenite" version = "0.19.0" @@ -6606,19 +6757,15 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22049a19f4a68748a168c0fc439f9516686aa045927ff767eca0a85101fb6e73" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-linebreak" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5faade31a542b8b35855fff6e8def199853b2da8da256da52f52f1316ee3137" -dependencies = [ - "hashbrown 0.12.3", - "regex", -] +checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-normalization" @@ -6726,16 +6873,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] -name = "version_check" -version = "0.9.4" +name = "vec1" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "2bda7c41ca331fe9a1c278a9e7ee055f4be7f5eb1c2b72f079b4ff8b5fce9d5c" [[package]] -name = "virtue" -version = "0.0.13" +name = "version_check" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dcc60c0624df774c82a0ef104151231d37da4962957d691c011c852b2473314" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "void" @@ -6783,6 +6930,12 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -6791,9 +6944,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi-cap-std-sync" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291862f1014dd7e674f93b263d57399de4dd1907ea37e74cf7d36454536ba2f0" +checksum = "dc0fb9a3b1143c8f549b64d707aef869d134fb681f17fb316f0d796537b670ef" dependencies = [ "anyhow", "async-trait", @@ -6815,9 +6968,9 @@ dependencies = [ [[package]] name = "wasi-common" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b422ae2403cae9ca603864272a402cf5001dd6fef8632e090e00c4fb475741b" +checksum = "41512a0523d86be06d7cf606e1bafd0238948b237ce832179f85dfdbce217e1a" dependencies = [ "anyhow", "bitflags 1.3.2", @@ -6835,9 +6988,9 @@ dependencies = [ [[package]] name = "wasi-tokio" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92108a97e839351fb6aa7462f9d8757a123fa90e84769cb9d72d1eac57e41ea7" +checksum = "b4fea3f0a9656ecc6599dd27c6f0ac02dd61b7753cb58ad4327eaac9b0988c65" dependencies = [ "anyhow", "cap-std", @@ -6960,9 +7113,9 @@ dependencies = [ [[package]] name = "wasm-metadata" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb85334398ff45293a2363baf031c959e19c4e1a95929aacefdceb95c1bc7f60" +checksum = "be44e148f09a188971ec512250b3ae136029e2df586dd740586ce76a17ee657d" dependencies = [ "anyhow", "indexmap 2.0.0", @@ -6970,7 +7123,7 @@ dependencies = [ "serde_json", "spdx", "wasm-encoder 0.31.1", - "wasmparser 0.109.0", + "wasmparser 0.110.0", ] [[package]] @@ -6993,16 +7146,6 @@ dependencies = [ "semver", ] -[[package]] -name = "wasmparser" -version = "0.109.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf9564f29de2890ee34406af52d2a92dec6ef044c8ddfc5add5db8dcfd36e6c" -dependencies = [ - "indexmap 2.0.0", - "semver", -] - [[package]] name = "wasmparser" version = "0.110.0" @@ -7015,23 +7158,23 @@ dependencies = [ [[package]] name = "wasmprinter" -version = "0.2.59" +version = "0.2.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc960b30b84abca377768f3c62cff3a1c74db8c0f6759ed581827da0bd3a3fed" +checksum = "42cd12ed4d96a984e4b598a17457f1126d01640cc7461afbb319642111ff9e7f" dependencies = [ "anyhow", - "wasmparser 0.107.0", + "wasmparser 0.110.0", ] [[package]] name = "wasmtime" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd02b992d828b91efaf2a7499b21205fe4ab3002e401e3fe0f227aaeb4001d93" +checksum = "0b1f817f2ca5070983c71f1205fbab5848c9073df7f4e1af9fdceb4cc4a1b8e5" dependencies = [ "anyhow", "async-trait", - "bincode 1.3.3", + "bincode", "bumpalo", "cfg-if", "encoding_rs", @@ -7050,7 +7193,7 @@ dependencies = [ "wasmparser 0.107.0", "wasmtime-cache", "wasmtime-component-macro", - "wasmtime-component-util 10.0.1", + "wasmtime-component-util", "wasmtime-cranelift", "wasmtime-environ", "wasmtime-fiber", @@ -7063,22 +7206,22 @@ dependencies = [ [[package]] name = "wasmtime-asm-macros" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284466ef356ce2d909bc0ad470b60c4d0df5df2de9084457e118131b3c779b92" +checksum = "0f82fbfda4610e9225238c62574ecded8e9d6ad3a12f387ac45819ecad5c3f9b" dependencies = [ "cfg-if", ] [[package]] name = "wasmtime-cache" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efc78cfe1a758d1336f447a47af6ec05e0df2c03c93440d70faf80e17fbb001e" +checksum = "b4f5b87f1ed383d6c219c04467ab6ae87990d6c2815d5a990138990a7fcbab95" dependencies = [ "anyhow", "base64 0.21.2", - "bincode 1.3.3", + "bincode", "directories-next", "file-per-thread-logger", "log", @@ -7092,25 +7235,19 @@ dependencies = [ [[package]] name = "wasmtime-component-macro" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e916103436a6d84faa4c2083e2e98612a323c2cc6147ec419124f67c764c9c" +checksum = "e27b96c540c78e12b60025fcbc0ba8a55bff1b32885a5e8eae2df765a6bc97ac" dependencies = [ "anyhow", "proc-macro2", "quote", "syn 1.0.109", - "wasmtime-component-util 10.0.1", + "wasmtime-component-util", "wasmtime-wit-bindgen", "wit-parser 0.8.0", ] -[[package]] -name = "wasmtime-component-util" -version = "10.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f20a5135ec5ef01080e674979b02d6fa5eebaa2b0c2d6660513ee9956a1bf624" - [[package]] name = "wasmtime-component-util" version = "11.0.1" @@ -7119,9 +7256,9 @@ checksum = "0928fe66c22bf8887e2fb524b7647308b8ce836a333af8504e4f1d80b8ea849f" [[package]] name = "wasmtime-cranelift" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1aa99cbf3f8edb5ad8408ba380f5ab481528ecd8a5053acf758e006d6727fd" +checksum = "b659f6e58662d1131f250339acd03aa49377f9351474282699985b79ca4d4a7c" dependencies = [ "anyhow", "cranelift-codegen", @@ -7142,9 +7279,9 @@ dependencies = [ [[package]] name = "wasmtime-cranelift-shared" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cce31fd55978601acc103acbb8a26f81c89a6eae12d3a1c59f34151dfa609484" +checksum = "74171de083bf2ecb716c507900f825e2b858346c714fbf48f4763ea760f998a8" dependencies = [ "anyhow", "cranelift-codegen", @@ -7158,9 +7295,9 @@ dependencies = [ [[package]] name = "wasmtime-environ" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f9e58e0ee7d43ff13e75375c726b16bce022db798d3a099a65eeaa7d7a544b" +checksum = "b124cbac1a3e04a744c76b3f77919343ef16dc4c818a2406dd7b689b16a54639" dependencies = [ "anyhow", "cranelift-entity", @@ -7174,15 +7311,15 @@ dependencies = [ "wasm-encoder 0.29.0", "wasmparser 0.107.0", "wasmprinter", - "wasmtime-component-util 10.0.1", + "wasmtime-component-util", "wasmtime-types", ] [[package]] name = "wasmtime-fiber" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14309cbdf2c395258b124a24757c727403070c0465a28bcc780c4f82f4bca5ff" +checksum = "f92ffb8869395c63100ffefbd71cf9489e7e9218e63a3798dcfe93fa8945f9cf" dependencies = [ "cc", "cfg-if", @@ -7193,13 +7330,13 @@ dependencies = [ [[package]] name = "wasmtime-jit" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0f2eaeb01bb67266416507829bd8e0bb60278444e4cbd048e280833ebeaa02" +checksum = "90ff15f426c2378f32ffb6d9b4370e3504231492e93f6968e8b5102c3256bbc4" dependencies = [ "addr2line 0.19.0", "anyhow", - "bincode 1.3.3", + "bincode", "cfg-if", "cpp_demangle", "gimli", @@ -7219,9 +7356,9 @@ dependencies = [ [[package]] name = "wasmtime-jit-debug" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f42e59d62542bfb73ce30672db7eaf4084a60b434b688ac4f05b287d497de082" +checksum = "c549e219102426aa1f90bd18e56a3195ed1e696c318abb3f501c1f4924b530ac" dependencies = [ "object 0.30.4", "once_cell", @@ -7230,9 +7367,9 @@ dependencies = [ [[package]] name = "wasmtime-jit-icache-coherence" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b49ceb7e2105a8ebe5614d7bbab6f6ef137a284e371633af60b34925493081f" +checksum = "1cf02fedda287a409cff80ad40a7c6c0f0771e99b0cd5e2b79d9cb7ecdc1b2f4" dependencies = [ "cfg-if", "libc", @@ -7241,9 +7378,9 @@ dependencies = [ [[package]] name = "wasmtime-runtime" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a5de4762421b0b2b19e02111ca403632852b53e506e03b4b227ffb0fbfa63c2" +checksum = "fc38c6229a5d3b8a2528eb33eb11d3e7ebf570259c7cd2f01e8668fe783ea443" dependencies = [ "anyhow", "cc", @@ -7268,9 +7405,9 @@ dependencies = [ [[package]] name = "wasmtime-types" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcbb7c138f797192f46afdd3ec16f85ef007c3bb45fa8e5174031f17b0be4c4a" +checksum = "768f6c5e7afc3a02eff2753196741db8e5ac5faf26a1e2204d7341b30a637c6f" dependencies = [ "cranelift-entity", "serde", @@ -7280,9 +7417,9 @@ dependencies = [ [[package]] name = "wasmtime-wasi" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01686e859249d4dffe3d7ce9957ae35bcf4161709dfafd165ee136bd54d179f1" +checksum = "ff7bb52cc5f9f3878cb012c5e42296e2fbb96e5407301b1e8e7007deec8dca9c" dependencies = [ "anyhow", "async-trait", @@ -7308,9 +7445,9 @@ dependencies = [ [[package]] name = "wasmtime-winch" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60160d8f7d2b301790730dac8ff25156c61d4fed79481e7074c21dd1283cfe2f" +checksum = "b2249faeb887b8b7e7b1797c460ac76160654aea3b8d5842093a771d77fc3819" dependencies = [ "anyhow", "cranelift-codegen", @@ -7325,9 +7462,9 @@ dependencies = [ [[package]] name = "wasmtime-wit-bindgen" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3334b0466a4d340de345cda83474d1d2c429770c3d667877971407672bc618a" +checksum = "84a4a005a6a2d5faa7cd953d389da8ae979cb571fe40edec7769649d8c98d874" dependencies = [ "anyhow", "heck", @@ -7407,9 +7544,9 @@ checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" [[package]] name = "wiggle" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea93d31f59f2b2fa4196990b684771500072d385eaac12587c63db2bc185d705" +checksum = "a89f0d9c91096db5e250cb803500bddfdd65ae3268a9e09283b75d3b513ede7a" dependencies = [ "anyhow", "async-trait", @@ -7422,9 +7559,9 @@ dependencies = [ [[package]] name = "wiggle-generate" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7df96ee6bea595fabf0346c08c553f684b08e88fad6fdb125e6efde047024f7b" +checksum = "12b5552356799612587de885e02b7e7e7d39e41657af1ddb985d18fbe5ac1642" dependencies = [ "anyhow", "heck", @@ -7437,9 +7574,9 @@ dependencies = [ [[package]] name = "wiggle-macro" -version = "10.0.1" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8649011a011ecca6197c4db6ee630735062ba20595ea56ce58529b3b1c20aa2f" +checksum = "2ca58f5cfecefaec28b09bfb6197a52dbd24df4656154bd377a166f1031d9b17" dependencies = [ "proc-macro2", "quote", @@ -7480,9 +7617,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "winch-codegen" -version = "0.8.1" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525fdd0d4e82d1bd3083bd87e8ca8014abfbdc5bf290d1d5371dac440d351e89" +checksum = "21de111a36e8f367416862fdf6f10caa411cc07a6e21b614eedbf9388c2a3dc9" dependencies = [ "anyhow", "cranelift-codegen", @@ -7614,9 +7751,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.4.9" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a2094c43cc94775293eaa0e499fbc30048a6d824ac82c0351a8c0bf9112529" +checksum = "f46aab759304e4d7b2075a9aecba26228bb073ee8c50db796b2c72c676b5d807" dependencies = [ "memchr", ] @@ -7688,7 +7825,7 @@ checksum = "c9658ec54d4a3c9e2f079bc65a131093337595b595fbf82f805008469838cdea" dependencies = [ "anyhow", "wit-component 0.12.0", - "wit-parser 0.9.1", + "wit-parser 0.9.2", ] [[package]] @@ -7742,23 +7879,23 @@ dependencies = [ "wasm-encoder 0.30.0", "wasm-metadata 0.9.0", "wasmparser 0.108.0", - "wit-parser 0.9.1", + "wit-parser 0.9.2", ] [[package]] name = "wit-component" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2ba1f13b1762d1995046790e18f283b38da19ff82505283d97a29b3914c8b5" +checksum = "6d843f4dfead0d465b09e8bfba4d3dcb1a1bcc857f87917d348c7fa401158bc5" dependencies = [ "anyhow", "bitflags 2.3.3", "indexmap 2.0.0", "log", "wasm-encoder 0.31.1", - "wasm-metadata 0.10.0", - "wasmparser 0.109.0", - "wit-parser 0.9.1", + "wasm-metadata 0.10.1", + "wasmparser 0.110.0", + "wit-parser 0.9.2", ] [[package]] @@ -7779,9 +7916,9 @@ dependencies = [ [[package]] name = "wit-parser" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93b5016f1dc64a1e7ffae43731381549b7846904e3ae9765fac464c4884caa94" +checksum = "541efa2046e544de53a9da1e2f6299e63079840360c9e106f1f8275a97771318" dependencies = [ "anyhow", "id-arena", @@ -7805,6 +7942,26 @@ dependencies = [ "wast 35.0.2", ] +[[package]] +name = "wnfs-common" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfcb4584f3866ead49adae8c05cec6f633139d19283448aa7807280612e24b7" +dependencies = [ + "anyhow", + "async-once-cell", + "async-trait", + "bytes", + "chrono", + "futures", + "libipld", + "multihash 0.18.1", + "once_cell", + "rand_core 0.6.4", + "serde", + "thiserror", +] + [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index d080e4a7..22fed4e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,23 +14,29 @@ authors = [ ] edition = "2021" license = "Apache" -rust-version = "1.66.0" +rust-version = "1.67.0" [workspace.dependencies] anyhow = { version = "1.0", features = ["backtrace"] } async-trait = "0.1" +atomic_refcell = "0.1" byte-unit = { version = "4.0", default-features = false } +chrono = "0.4" enum-assoc = " 1.1" enum-as-inner = "0.6" futures = "0.3" +humantime = "2.1" +itertools = "0.11" libipld = { version = "0.16", features = ["serde-codec"] } rand = "0.8" serde_ipld_dagcbor = "0.3" thiserror = "1.0" +time = { version = "0.3", features = ["serde"] } tokio = { version = "1.29", features = ["fs", "io-util", "io-std", "macros", "rt", "rt-multi-thread", "signal", "sync", "tracing"] } tracing = "0.1" ucan = "0.4" +ucan-key-support = "0.1" # Speedup build on macOS # See https://blog.rust-lang.org/2021/03/25/Rust-1.51.0.html#splitting-debug-information diff --git a/README.md b/README.md index 0d570bf5..53330d60 100644 --- a/README.md +++ b/README.md @@ -228,6 +228,7 @@ We would be happy to try to answer your question or try opening a new issue on G - [What Is An IPVM][ipvm-wg] - [IPVM: High-Level Spec][ipvm-spec] +- [Contributing Research]: [research] - [Seamless Services for an Open World][seamless-services] by Brooklyn Zelenka - [Foundations for Open-World Compute][foundations-for-openworld-compute] by Zeeshan Lakhani - [IPVM: The Long-Fabled Execution Layer][cod-ipvm] by Brooklyn Zelenka @@ -271,6 +272,7 @@ conditions. [nix]:https://nixos.org/download.html [nix-flake]: https://nixos.wiki/wiki/Flakes [pre-commit]: https://pre-commit.com/ +[research]: https://github.com/ipvm-wg/research [seamless-services]: https://youtu.be/Kr3B3sXh_VA [ucan-invocation]: https://github.com/ucan-wg/invocation [wasm-component]: https://github.com/WebAssembly/component-model diff --git a/deny.toml b/deny.toml index 2d1fff76..ea60c5b8 100644 --- a/deny.toml +++ b/deny.toml @@ -77,8 +77,10 @@ allow = [ "BSD-3-Clause", "CC0-1.0", "ISC", + "LicenseRef-UFL-1.0", "MIT", "MPL-2.0", + "OFL-1.1", "Zlib" ] # List of explicitly disallowed licenses @@ -195,7 +197,6 @@ allow-registry = ["https://github.com/rust-lang/crates.io-index"] # List of URLs for allowed Git repositories allow-git = [ "https://github.com/zhiburt/tabled.git", - "https://github.com/ucan-wg/rs-ucan", "https://github.com/bytecodealliance/preview2-prototyping", "https://github.com/bytecodealliance/wasmtime", "https://github.com/bytecodealliance/wit-bindgen", diff --git a/flake.lock b/flake.lock index 5c4524f0..e6c61c98 100644 --- a/flake.lock +++ b/flake.lock @@ -21,11 +21,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1687709756, - "narHash": "sha256-Y5wKlQSkgEK2weWdOu4J3riRd+kV/VCgHsqLNTTWQ/0=", + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", "owner": "numtide", "repo": "flake-utils", - "rev": "dbabf0ca0c0c4bce6ea5eaf65af5cb694d2082c7", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", "type": "github" }, "original": { @@ -36,11 +36,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1688956120, - "narHash": "sha256-7geHGr2aLpQvwGgaZlTLPHMVFxvFzAuB35mZYsKgLpQ=", + "lastModified": 1691003216, + "narHash": "sha256-Qq/MPkhS12Bl0X060pPvX3v9ac3f2rRQfHjjozPh/Qs=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "2169d3b0bce0daa64d05abbdf9da552a7b8c22a7", + "rev": "4a56ce9727a0c5478a836a0d8a8f641c5b9a3d5f", "type": "github" }, "original": { @@ -68,11 +68,11 @@ ] }, "locked": { - "lastModified": 1688956505, - "narHash": "sha256-6sa19mHTkdOi867lIolhpiS20trMdo0unk5/37859X4=", + "lastModified": 1691029059, + "narHash": "sha256-QwVeE9YTgH3LmL7yw2V/hgswL6yorIvYSp4YGI8lZYM=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "4acc04c26df84e0a718c3efe4e13021222d23b28", + "rev": "99df4908445be37ddb2d332580365fce512a7dcf", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 4ce6725c..71bc5f2e 100644 --- a/flake.nix +++ b/flake.nix @@ -48,6 +48,7 @@ cargo-nextest cargo-outdated cargo-sort + cargo-spellcheck cargo-udeps cargo-watch twiggy @@ -55,6 +56,7 @@ ]; ci = pkgs.writeScriptBin "ci" '' + #!${pkgs.stdenv.shell} cargo fmt --check cargo clippy cargo build --release @@ -63,95 +65,114 @@ ''; db = pkgs.writeScriptBin "db" '' + #!${pkgs.stdenv.shell} diesel setup diesel migration run ''; dbReset = pkgs.writeScriptBin "db-reset" '' + #!${pkgs.stdenv.shell} diesel database reset diesel setup diesel migration run ''; doc = pkgs.writeScriptBin "doc" '' + #!${pkgs.stdenv.shell} cargo doc --no-deps --document-private-items --open ''; docAll = pkgs.writeScriptBin "doc-all" '' + #!${pkgs.stdenv.shell} cargo doc --document-private-items --open ''; compileWasm = pkgs.writeScriptBin "compile-wasm" '' + #!${pkgs.stdenv.shell} cargo build -p homestar-functions --target wasm32-unknown-unknown --release ''; dockerBuild = arch: pkgs.writeScriptBin "docker-${arch}" '' + #!${pkgs.stdenv.shell} docker buildx build --file docker/Dockerfile --platform=linux/${arch} -t homestar-runtime --progress=plain . ''; xFunc = cmd: pkgs.writeScriptBin "x-${cmd}" '' + #!${pkgs.stdenv.shell} cargo watch -c -x ${cmd} ''; xFuncAll = cmd: pkgs.writeScriptBin "x-${cmd}-all" '' + #!${pkgs.stdenv.shell} cargo watch -c -s "cargo ${cmd} --all-features" ''; xFuncNoDefault = cmd: pkgs.writeScriptBin "x-${cmd}-0" '' + #!${pkgs.stdenv.shell} cargo watch -c -s "cargo ${cmd} --no-default-features" ''; xFuncPackage = cmd: crate: pkgs.writeScriptBin "x-${cmd}-${crate}" '' + #!${pkgs.stdenv.shell} cargo watch -c -s "cargo ${cmd} -p homestar-${crate} --all-features" ''; xFuncTest = pkgs.writeScriptBin "x-test" '' + #!${pkgs.stdenv.shell} cargo watch -c -s "cargo nextest run --nocapture && cargo test --doc" ''; xFuncTestAll = pkgs.writeScriptBin "x-test-all" '' + #!${pkgs.stdenv.shell} cargo watch -c -s "cargo nextest run --all-features --nocapture \ && cargo test --doc --all-features" ''; xFuncTestNoDefault = pkgs.writeScriptBin "x-test-0" '' + #!${pkgs.stdenv.shell} cargo watch -c -s "cargo nextest run --no-default-features --nocapture \ && cargo test --doc --no-default-features" ''; xFuncTestPackage = crate: pkgs.writeScriptBin "x-test-${crate}" '' + #!${pkgs.stdenv.shell} cargo watch -c -s "cargo nextest run -p homestar-${crate} --all-features \ && cargo test --doc -p homestar-${crate} --all-features" ''; nxTest = pkgs.writeScriptBin "nx-test" '' + #!${pkgs.stdenv.shell} cargo nextest run cargo test --doc ''; nxTestAll = pkgs.writeScriptBin "nx-test-all" '' + #!${pkgs.stdenv.shell} cargo nextest run --all-features --nocapture cargo test --doc --all-features ''; nxTestNoDefault = pkgs.writeScriptBin "nx-test-0" '' + #!${pkgs.stdenv.shell} cargo nextest run --no-default-features --nocapture cargo test --doc --no-default-features ''; wasmTest = pkgs.writeScriptBin "wasm-ex-test" '' + #!${pkgs.stdenv.shell} cargo build -p homestar-functions --features example-test --target wasm32-unknown-unknown --release cp target/wasm32-unknown-unknown/release/homestar_functions.wasm homestar-wasm/fixtures/example_test.wasm wasm-tools component new homestar-wasm/fixtures/example_test.wasm -o homestar-wasm/fixtures/example_test_component.wasm ''; wasmAdd = pkgs.writeScriptBin "wasm-ex-add" '' + #!${pkgs.stdenv.shell} cargo build -p homestar-functions --features example-add --target wasm32-unknown-unknown --release cp target/wasm32-unknown-unknown/release/homestar_functions.wasm homestar-wasm/fixtures/example_add.wasm wasm-tools component new homestar-wasm/fixtures/example_add.wasm -o homestar-wasm/fixtures/example_add_component.wasm @@ -159,6 +180,11 @@ wasm-tools print homestar-wasm/fixtures/example_add_component.wasm -o homestar-wasm/fixtures/example_add_component.wat ''; + runIpfs = pkgs.writeScriptBin "run-ipfs" '' + #!${pkgs.stdenv.shell} + ipfs --repo-dir ./.ipfs --offline daemon + ''; + scripts = [ ci db @@ -180,6 +206,7 @@ nxTest nxTestAll nxTestNoDefault + runIpfs wasmTest wasmAdd ]; @@ -199,6 +226,7 @@ pre-commit diesel-cli direnv + kubo self.packages.${system}.irust ] ++ format-pkgs @@ -212,9 +240,23 @@ NIX_PATH = "nixpkgs=" + pkgs.path; RUST_BACKTRACE = 1; - shellHook = '' - [ -e .git/hooks/pre-commit ] || pre-commit install --install-hooks && pre-commit install --hook-type commit-msg - ''; + shellHook = + '' + [ -e .git/hooks/pre-commit ] || pre-commit install --install-hooks && pre-commit install --hook-type commit-msg + + # Setup local Kubo config + if [ ! -e ./.ipfs ]; then + ipfs --repo-dir ./.ipfs --offline init + fi + + # Run Kubo / IPFS + echo -e "To run Kubo as a local IPFS node, use the following command:" + echo -e "ipfs --repo-dir ./.ipfs --offline daemon" + '' + # See https://github.com/nextest-rs/nextest/issues/267 + + (pkgs.lib.strings.optionalString pkgs.stdenv.isDarwin '' + export DYLD_FALLBACK_LIBRARY_PATH="$(rustc --print sysroot)/lib" + ''); }; packages.irust = pkgs.rustPlatform.buildRustPackage rec { diff --git a/homestar-core/Cargo.toml b/homestar-core/Cargo.toml index 756f7088..e9f7f137 100644 --- a/homestar-core/Cargo.toml +++ b/homestar-core/Cargo.toml @@ -24,7 +24,7 @@ doctest = true anyhow = { workspace = true } async-recursion = "1.0" byte-unit = { workspace = true } -diesel = { version = "2.0", features = ["sqlite"] } +diesel = { version = "2.1", features = ["sqlite"] } enum-as-inner = { workspace = true } enum-assoc = { workspace = true } futures = { workspace = true } @@ -50,7 +50,7 @@ json = "0.12" [features] default = [] -test_utils = ["once_cell", "proptest"] +test-utils = ["once_cell", "proptest"] [package.metadata.docs.rs] all-features = true diff --git a/homestar-core/fixtures/.gitkeep b/homestar-core/fixtures/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/homestar-core/src/ipld/dag_json.rs b/homestar-core/src/ipld/dag_json.rs index 3e2eff18..0913b1a6 100644 --- a/homestar-core/src/ipld/dag_json.rs +++ b/homestar-core/src/ipld/dag_json.rs @@ -4,7 +4,10 @@ use crate::{workflow::Error, Unit}; use libipld::{codec::Decode, json::DagJsonCodec, prelude::Codec, Ipld}; -use std::io::Cursor; +use std::{ + fs, + io::{Cursor, Write}, +}; /// Trait for serializing and deserializing types to and from JSON. pub trait DagJson @@ -32,7 +35,7 @@ where let from_ipld = Self::try_from(ipld).map_err(|_err| { // re-decode with an unwrap, without a clone, as we know the data is // valid JSON. - Error::::UnexpectedIpldTypeError( + Error::::UnexpectedIpldType( Ipld::decode(DagJsonCodec, &mut Cursor::new(data)).unwrap(), ) })?; @@ -44,4 +47,49 @@ where let data = json.as_bytes(); Self::from_json(data) } + + /// Write JSON to file. + fn to_file(&self, filename: String) -> Result<(), Error> { + Ok(fs::File::create(filename)?.write_all(&self.to_json()?)?) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{ + test_utils, + workflow::{config::Resources, instruction::RunInstruction, prf::UcanPrf, Task}, + Workflow, + }; + + #[test] + fn write_json_to_file_and_read() { + let config = Resources::default(); + let (instruction1, instruction2, _) = + test_utils::workflow::related_wasm_instructions::(); + + let task1 = Task::new( + RunInstruction::Expanded(instruction1.clone()), + config.clone().into(), + UcanPrf::default(), + ); + + let task2 = Task::new( + RunInstruction::Expanded(instruction2.clone()), + config.into(), + UcanPrf::default(), + ); + + let workflow = Workflow::new(vec![task1, task2]); + let json = workflow.to_json_string().unwrap(); + workflow + .to_file("./fixtures/test_add.json".to_string()) + .unwrap(); + let read_file = fs::read_to_string("./fixtures/test_add.json").unwrap(); + assert_eq!(json, read_file); + + let workflow_read = Workflow::from_json_string(read_file).unwrap(); + assert_eq!(workflow, workflow_read); + } } diff --git a/homestar-core/src/lib.rs b/homestar-core/src/lib.rs index e3f3c66e..13a92eb0 100644 --- a/homestar-core/src/lib.rs +++ b/homestar-core/src/lib.rs @@ -20,8 +20,8 @@ pub mod consts; pub mod ipld; pub mod macros; -#[cfg(any(test, feature = "test_utils"))] -#[cfg_attr(docsrs, doc(cfg(feature = "test_utils")))] +#[cfg(any(test, feature = "test-utils"))] +#[cfg_attr(docsrs, doc(cfg(feature = "test-utils")))] pub mod test_utils; mod unit; pub mod workflow; diff --git a/homestar-core/src/macros.rs b/homestar-core/src/macros.rs index d05e6f4b..4d344f73 100644 --- a/homestar-core/src/macros.rs +++ b/homestar-core/src/macros.rs @@ -19,7 +19,7 @@ /// # /// /// if !has_permission(user, resource) { -/// bail!(workflow::Error::UnknownError); +/// bail!(workflow::Error::Unknown); /// } /// /// # Ok(()) diff --git a/homestar-core/src/test_utils/mod.rs b/homestar-core/src/test_utils/mod.rs index 479873bc..ee96ff7f 100644 --- a/homestar-core/src/test_utils/mod.rs +++ b/homestar-core/src/test_utils/mod.rs @@ -1,13 +1,18 @@ //! Test Utilities. -#[cfg(feature = "test_utils")] +#[cfg(feature = "test-utils")] +#[cfg_attr(docsrs, doc(cfg(feature = "test-utils")))] pub mod cid; -#[cfg(feature = "test_utils")] +#[cfg(feature = "test-utils")] +#[cfg_attr(docsrs, doc(cfg(feature = "test-utils")))] pub mod ports; -#[cfg(feature = "test_utils")] +#[cfg(feature = "test-utils")] +#[cfg_attr(docsrs, doc(cfg(feature = "test-utils")))] mod rvg; -#[cfg(feature = "test_utils")] +#[cfg(feature = "test-utils")] +#[cfg_attr(docsrs, doc(cfg(feature = "test-utils")))] pub mod workflow; -#[cfg(feature = "test_utils")] +#[cfg(feature = "test-utils")] +#[cfg_attr(docsrs, doc(cfg(feature = "test-utils")))] pub use rvg::*; diff --git a/homestar-core/src/test_utils/workflow.rs b/homestar-core/src/test_utils/workflow.rs index 3658206a..d03baf25 100644 --- a/homestar-core/src/test_utils/workflow.rs +++ b/homestar-core/src/test_utils/workflow.rs @@ -24,7 +24,7 @@ type NonceBytes = Vec; /// Return a `mocked` `wasm/run` [Instruction]. pub fn wasm_instruction<'a, T>() -> Instruction<'a, T> { - let wasm = "bafkreidztuwoszw2dfnzufjpsjmzj67x574qcdm2autnhnv43o3t4zmh7i".to_string(); + let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); let resource = Url::parse(format!("ipfs://{wasm}").as_str()).unwrap(); Instruction::new( @@ -100,7 +100,7 @@ where /// Return a `mocked` `wasm/run` [Instruction], along with it's [Nonce] as bytes. pub fn wasm_instruction_with_nonce<'a, T>() -> (Instruction<'a, T>, NonceBytes) { - let wasm = "bafkreidztuwoszw2dfnzufjpsjmzj67x574qcdm2autnhnv43o3t4zmh7i".to_string(); + let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); let resource = Url::parse(format!("ipfs://{wasm}").as_str()).unwrap(); let nonce = Nonce::generate(); @@ -120,7 +120,7 @@ pub fn wasm_instruction_with_nonce<'a, T>() -> (Instruction<'a, T>, NonceBytes) /// Return a `mocked` [Instruction]. pub fn instruction<'a, T>() -> Instruction<'a, T> { - let wasm = "bafkreidztuwoszw2dfnzufjpsjmzj67x574qcdm2autnhnv43o3t4zmh7i".to_string(); + let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); let resource = Url::parse(format!("ipfs://{wasm}").as_str()).unwrap(); Instruction::new( @@ -132,7 +132,7 @@ pub fn instruction<'a, T>() -> Instruction<'a, T> { /// Return a `mocked` [Instruction], along with it's [Nonce] as bytes. pub fn instruction_with_nonce<'a, T>() -> (Instruction<'a, T>, NonceBytes) { - let wasm = "bafkreidztuwoszw2dfnzufjpsjmzj67x574qcdm2autnhnv43o3t4zmh7i".to_string(); + let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); let resource = Url::parse(format!("ipfs://{wasm}").as_str()).unwrap(); let nonce = Nonce::generate(); diff --git a/homestar-core/src/unit.rs b/homestar-core/src/unit.rs index 31694bed..5df47ebe 100644 --- a/homestar-core/src/unit.rs +++ b/homestar-core/src/unit.rs @@ -45,6 +45,6 @@ impl input::Parse for Input { impl From> for InputParseError { fn from(err: Error) -> Self { - InputParseError::WorkflowError(err.into()) + InputParseError::Workflow(err.into()) } } diff --git a/homestar-core/src/workflow.rs b/homestar-core/src/workflow.rs index 2a098c9f..09428277 100644 --- a/homestar-core/src/workflow.rs +++ b/homestar-core/src/workflow.rs @@ -118,7 +118,7 @@ where let map = from_ipld::>(ipld)?; let ipld = map .get(TASKS_KEY) - .ok_or_else(|| WorkflowError::::MissingFieldError(TASKS_KEY.to_string()))?; + .ok_or_else(|| WorkflowError::::MissingField(TASKS_KEY.to_string()))?; let tasks = if let Ipld::List(tasks) = ipld { tasks.iter().try_fold(vec![], |mut acc, ipld| { diff --git a/homestar-core/src/workflow/error.rs b/homestar-core/src/workflow/error.rs index 749e2f90..93957bb6 100644 --- a/homestar-core/src/workflow/error.rs +++ b/homestar-core/src/workflow/error.rs @@ -8,6 +8,7 @@ use crate::{ }; use libipld::Ipld; use serde::de::Error as DeError; +use std::io; /// Generic error type for [Workflow] use cases. /// @@ -18,7 +19,7 @@ pub enum Error { /// /// [Cid]: libipld::cid::Cid #[error("failed to encode CID: {0}")] - CidError(#[from] libipld::cid::Error), + CidEncode(#[from] libipld::cid::Error), /// Error thrown when condition or dynamic check is not met. #[error("condition not met: {0}")] ConditionNotMet(String), @@ -29,20 +30,20 @@ pub enum Error { /// /// [DagCborCodec]: libipld::cbor::DagCborCodec #[error("failed to decode/encode DAG-CBOR: {0}")] - DagCborTranslationError(#[from] anyhow::Error), + DagCborTranslation(#[from] anyhow::Error), /// Error converting from [Ipld] structure via [serde]. /// /// Transparently forwards from [libipld::error::SerdeError]'s `source` and /// `Display` methods through to an underlying error. #[error("cannot convert from Ipld structure: {0}")] - FromIpldError(#[from] libipld::error::SerdeError), + FromIpld(#[from] libipld::error::SerdeError), /// Invalid match discriminant or enumeration. #[error("invalid discriminant {0:#?}")] InvalidDiscriminant(T), /// Error related to a missing a field in a structure or key /// in a map. #[error("no {0} field set")] - MissingFieldError(String), + MissingField(String), /// Error during parsing a [Url]. /// /// Transparently forwards from [url::ParseError]'s `source` and @@ -50,20 +51,23 @@ pub enum Error { /// /// [Url]: url::Url #[error(transparent)] - ParseResourceError(#[from] url::ParseError), + ParseResource(#[from] url::ParseError), /// Generic unknown error. #[error("unknown error")] - UnknownError, + Unknown, /// Unexpcted [Ipld] type. #[error("unexpected Ipld type: {0:#?}")] - UnexpectedIpldTypeError(Ipld), + UnexpectedIpldType(Ipld), /// Error when attempting to interpret a sequence of [u8] /// as a string. /// /// Transparently forwards from [std::str::Utf8Error]'s `source` and /// `Display` methods through to an underlying error. #[error(transparent)] - Utf8Error(#[from] std::str::Utf8Error), + Utf8(#[from] std::str::Utf8Error), + /// Propagated IO error. + #[error("error writing data to console: {0}")] + Io(#[from] io::Error), } impl Error { @@ -72,7 +76,7 @@ impl Error { /// /// [SerdeError]: libipld::error::SerdeError pub fn unexpected_ipld(ipld: Ipld) -> Self { - Error::UnexpectedIpldTypeError(ipld) + Error::UnexpectedIpldType(ipld) } /// Return an `invalid type` [SerdeError] when not matching an expected @@ -80,7 +84,7 @@ impl Error { /// /// [SerdeError]: libipld::error::SerdeError pub fn not_an_ipld_list() -> Self { - Error::FromIpldError(libipld::error::SerdeError::invalid_type( + Error::FromIpld(libipld::error::SerdeError::invalid_type( serde::de::Unexpected::Seq, &"an Ipld list / sequence", )) @@ -89,13 +93,13 @@ impl Error { impl From> for Error { fn from(_err: Error) -> Self { - Error::UnknownError + Error::Unknown } } impl From> for Error { fn from(_err: Error) -> Error { - Error::UnknownError + Error::Unknown } } @@ -115,10 +119,10 @@ pub enum InputParseError { /// Transparently forwards from [libipld::error::SerdeError]'s `source` and /// `Display` methods through to an underlying error. #[error("cannot convert from Ipld structure: {0}")] - FromIpldError(#[from] libipld::error::SerdeError), + FromIpld(#[from] libipld::error::SerdeError), /// Error converting from [Ipld] structure into [Args]. #[error("cannot convert from Ipld structure into arguments: {0:#?}")] - IpldToArgsError(Args), + IpldToArgs(Args), /// Unexpected [Input] in [Task] structure. /// /// [Task]: crate::workflow::Task @@ -128,7 +132,7 @@ pub enum InputParseError { /// /// [Workflow errors]: Error #[error(transparent)] - WorkflowError(#[from] Error), + Workflow(#[from] Error), } impl From for InputParseError { @@ -148,19 +152,19 @@ pub enum ResolveError { /// Transparently forwards from [anyhow::Error]'s `source` and /// `Display` methods through to an underlying error. #[error(transparent)] - RuntimeError(#[from] anyhow::Error), + Runtime(#[from] anyhow::Error), /// Transport error when attempting to resolve [Workflow] [Input]'s [Cid]. /// /// [Cid]: libipld::Cid /// [Workflow]: crate::Workflow #[error("transport error during resolve phase of input Cid: {0}")] - TransportError(String), + Transport(String), /// Unable to resolve a [Cid] within a [Workflow]'s [Input]. /// /// [Cid]: libipld::Cid /// [Workflow]: crate::Workflow #[error("error resolving input Cid: {0}")] - UnresolvedCidError(String), + UnresolvedCid(String), } impl From for ResolveError { diff --git a/homestar-core/src/workflow/input/parse.rs b/homestar-core/src/workflow/input/parse.rs index bf27c384..acf189ae 100644 --- a/homestar-core/src/workflow/input/parse.rs +++ b/homestar-core/src/workflow/input/parse.rs @@ -64,7 +64,7 @@ impl From> for Args { /// use libipld::Ipld; /// use url::Url; /// -/// let wasm = "bafkreidztuwoszw2dfnzufjpsjmzj67x574qcdm2autnhnv43o3t4zmh7i".to_string(); +/// let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); /// let resource = Url::parse(format!("ipfs://{wasm}").as_str()).unwrap(); /// /// let inst = Instruction::unique( diff --git a/homestar-core/src/workflow/instruction.rs b/homestar-core/src/workflow/instruction.rs index b6a9ddeb..f206172d 100644 --- a/homestar-core/src/workflow/instruction.rs +++ b/homestar-core/src/workflow/instruction.rs @@ -132,7 +132,7 @@ where /// use libipld::Ipld; /// use url::Url; /// -/// let wasm = "bafkreidztuwoszw2dfnzufjpsjmzj67x574qcdm2autnhnv43o3t4zmh7i".to_string(); +/// let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); /// let resource = Url::parse(format!("ipfs://{wasm}").as_str()).unwrap(); /// /// let instr = Instruction::unique( @@ -154,7 +154,7 @@ where /// use libipld::{cid::{multihash::{Code, MultihashDigest}, Cid}, Ipld, Link}; /// use url::Url; -/// let wasm = "bafkreidztuwoszw2dfnzufjpsjmzj67x574qcdm2autnhnv43o3t4zmh7i".to_string(); +/// let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); /// let resource = Url::parse(format!("ipfs://{wasm}").as_str()).expect("IPFS URL"); /// let h = Code::Blake3_256.digest(b"beep boop"); /// let cid = Cid::new_v1(0x55, h); @@ -280,29 +280,27 @@ where let rsc = match map.get(RESOURCE_KEY) { Some(Ipld::Link(cid)) => cid .to_string_of_base(Base::Base32Lower) // Cid v1 - .map_err(WorkflowError::::CidError) + .map_err(WorkflowError::::CidEncode) .and_then(|txt| { Url::parse(format!("{}{}", "ipfs://", txt).as_str()) - .map_err(WorkflowError::ParseResourceError) + .map_err(WorkflowError::ParseResource) }), Some(Ipld::String(txt)) => { - Url::parse(txt.as_str()).map_err(WorkflowError::ParseResourceError) + Url::parse(txt.as_str()).map_err(WorkflowError::ParseResource) } - _ => Err(WorkflowError::MissingFieldError(RESOURCE_KEY.to_string())), + _ => Err(WorkflowError::MissingField(RESOURCE_KEY.to_string())), }?; Ok(Self { rsc, op: from_ipld( map.get(OP_KEY) - .ok_or_else(|| WorkflowError::::MissingFieldError(OP_KEY.to_string()))? + .ok_or_else(|| WorkflowError::::MissingField(OP_KEY.to_string()))? .to_owned(), )?, input: Input::try_from( map.get(INPUT_KEY) - .ok_or_else(|| { - WorkflowError::::MissingFieldError(INPUT_KEY.to_string()) - })? + .ok_or_else(|| WorkflowError::::MissingField(INPUT_KEY.to_string()))? .to_owned(), )?, nnc: Nonce::try_from( @@ -332,7 +330,7 @@ mod test { ( RESOURCE_KEY.into(), Ipld::String( - "ipfs://bafkreidztuwoszw2dfnzufjpsjmzj67x574qcdm2autnhnv43o3t4zmh7i".into() + "ipfs://bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".into() ) ), (OP_KEY.into(), Ipld::String("ipld/fun".to_string())), diff --git a/homestar-core/src/workflow/invocation.rs b/homestar-core/src/workflow/invocation.rs index 78f4865e..299e88bf 100644 --- a/homestar-core/src/workflow/invocation.rs +++ b/homestar-core/src/workflow/invocation.rs @@ -58,7 +58,7 @@ where Ok(Self { task: Task::try_from( map.get(TASK_KEY) - .ok_or_else(|| WorkflowError::::MissingFieldError(TASK_KEY.to_string()))? + .ok_or_else(|| WorkflowError::::MissingField(TASK_KEY.to_string()))? .to_owned(), )?, }) diff --git a/homestar-core/src/workflow/receipt.rs b/homestar-core/src/workflow/receipt.rs index 48a02cba..98cbd933 100644 --- a/homestar-core/src/workflow/receipt.rs +++ b/homestar-core/src/workflow/receipt.rs @@ -137,16 +137,16 @@ impl TryFrom for Receipt { let ran = map .get(RAN_KEY) - .ok_or_else(|| WorkflowError::::MissingFieldError(RAN_KEY.to_string()))? + .ok_or_else(|| WorkflowError::::MissingField(RAN_KEY.to_string()))? .try_into()?; let out = map .get(OUT_KEY) - .ok_or_else(|| WorkflowError::::MissingFieldError(OUT_KEY.to_string()))?; + .ok_or_else(|| WorkflowError::::MissingField(OUT_KEY.to_string()))?; let meta = map .get(METADATA_KEY) - .ok_or_else(|| WorkflowError::::MissingFieldError(METADATA_KEY.to_string()))?; + .ok_or_else(|| WorkflowError::::MissingField(METADATA_KEY.to_string()))?; let issuer = map .get(ISSUER_KEY) @@ -159,7 +159,7 @@ impl TryFrom for Receipt { let prf = map .get(PROOF_KEY) - .ok_or_else(|| WorkflowError::::MissingFieldError(PROOF_KEY.to_string()))?; + .ok_or_else(|| WorkflowError::::MissingField(PROOF_KEY.to_string()))?; Ok(Receipt { ran, diff --git a/homestar-core/src/workflow/task.rs b/homestar-core/src/workflow/task.rs index dfa30370..172a251a 100644 --- a/homestar-core/src/workflow/task.rs +++ b/homestar-core/src/workflow/task.rs @@ -115,7 +115,7 @@ where Ok(Self { run: RunInstruction::try_from( map.get(RUN_KEY) - .ok_or_else(|| WorkflowError::::MissingFieldError(RUN_KEY.to_string()))? + .ok_or_else(|| WorkflowError::::MissingField(RUN_KEY.to_string()))? .to_owned(), )?, cause: map @@ -127,11 +127,11 @@ where .and_then(|ipld| ipld.to_owned().try_into().ok()), meta: map .get(METADATA_KEY) - .ok_or_else(|| WorkflowError::::MissingFieldError(METADATA_KEY.to_string()))? + .ok_or_else(|| WorkflowError::::MissingField(METADATA_KEY.to_string()))? .to_owned(), prf: UcanPrf::try_from( map.get(PROOF_KEY) - .ok_or_else(|| WorkflowError::::MissingFieldError(PROOF_KEY.to_string()))? + .ok_or_else(|| WorkflowError::::MissingField(PROOF_KEY.to_string()))? .to_owned(), )?, }) @@ -190,7 +190,7 @@ mod test { ( "rsc".into(), Ipld::String( - "ipfs://bafkreidztuwoszw2dfnzufjpsjmzj67x574qcdm2autnhnv43o3t4zmh7i".into(), + "ipfs://bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".into(), ), ), ("op".into(), Ipld::String("ipld/fun".to_string())), diff --git a/homestar-runtime/Cargo.toml b/homestar-runtime/Cargo.toml index 1ea113fc..d494c420 100644 --- a/homestar-runtime/Cargo.toml +++ b/homestar-runtime/Cargo.toml @@ -30,22 +30,20 @@ bench = false # https://github.com/DevinR528/cargo-sort/issues/47 anyhow = { workspace = true } async-trait = "0.1" +atomic_refcell = { workspace = true } axum = { version = "0.6", features = ["ws", "headers"], optional = true } -bincode = { version = "2.0.0-rc.3", features = ["serde"] } byte-unit = { workspace = true } +chrono = { workspace = true } clap = { version = "4.3", features = ["derive", "color", "help", "env"] } -concat-in-place = "1.1" config = "0.13" console-subscriber = { version = "0.1", default-features = false, features = [ "parking_lot" ], optional = true } crossbeam = "0.8" -daemonize = "0.5" dagga = "0.2" dashmap = "5.5" -diesel = { version = "2.1", features = ["sqlite", "r2d2", "returning_clauses_for_sqlite_3_35"] } +diesel = { version = "2.1", features = ["sqlite", "r2d2", "returning_clauses_for_sqlite_3_35", "time", "chrono"] } diesel_migrations = "2.1" dotenvy = "0.15" enum-assoc = { workspace = true } -evmap = "10.0" fnv = "1.0" futures = { workspace = true } headers = "0.3" @@ -53,9 +51,11 @@ homestar-core = { version = "0.1", path = "../homestar-core" } homestar-wasm = { version = "0.1", path = "../homestar-wasm" } http = "0.2" http-serde = "1.1" -indexmap = "2.0" +humantime = { workspace = true } +indexmap = { version = "2.0", features = ["serde"] } ipfs-api = { version = "0.17", optional = true } ipfs-api-backend-hyper = { version = "0.6", features = ["with-builder", "with-send-sync"], optional = true } +itertools = { workspace = true } libipld = { workspace = true } libp2p = { version = "0.52", default-features = false, features = ["kad", "request-response", "macros", "identify", "ed25519", "secp256k1", "mdns", "gossipsub", "request-response", "tokio", "dns", "tcp", @@ -64,6 +64,8 @@ libsqlite3-sys = { version = "0.26", features = ["bundled"] } miette = { version = "5.10", features = ["fancy"] } openssl = { version = "0.10", features = ["vendored"] } proptest = { version = "1.2", optional = true } +puffin = { version = "0.16", optional = true } +puffin_egui = { version = "0.22.0", optional = true } rand = "0.8" reqwest = { version = "0.11", features = ["json"] } sec1 = { version = "0.7", features = ["pem", "der"] } @@ -73,10 +75,12 @@ serde_ipld_dagcbor = { workspace = true } serde_with = { version = "3.1", features = ["base64"] } stream-cancel = "0.8" strum = { version = "0.25", features = ["derive"] } -tabled = { git = "https://github.com/zhiburt/tabled.git" } +tabled = { version = "0.13", features = ["std"] } tarpc = { version = "0.33", features = ["serde-transport", "serde-transport-bincode", "tcp"] } thiserror = { workspace = true } +time = { workspace = true } tokio = { workspace = true } +tokio-serde = { version = "0.8", features = ["messagepack"] } tokio-util = { version = "0.7", features = ["time"] } tracing = { workspace = true } tracing-appender = "0.2" @@ -84,6 +88,7 @@ tracing-logfmt = "0.3" tracing-subscriber = { version = "0.3", features = ["env-filter", "parking_lot", "registry"] } tryhard = "0.5" url = "2.4" +wnfs-common = "0.1" [target.'cfg(not(windows))'.dependencies] daemonize = "0.5" @@ -91,7 +96,8 @@ daemonize = "0.5" [dev-dependencies] assert_cmd = "2.0" criterion = "0.5" -homestar-core = { version = "0.1", path = "../homestar-core", features = [ "test_utils" ] } +homestar-core = { version = "0.1", path = "../homestar-core", features = [ "test-utils" ] } +homestar_runtime_proc_macro = { path = "src/test_utils/proc_macro", package = "homestar-runtime-tests-proc-macro" } json = "0.12" nix = "0.26" once_cell = "1.18" @@ -107,7 +113,8 @@ wait-timeout = "0.2" default = ["ipfs", "websocket-server"] console = ["dep:console-subscriber"] ipfs = ["dep:ipfs-api", "dep:ipfs-api-backend-hyper"] -test_utils = ["dep:proptest"] +profile = ["dep:puffin", "dep:puffin_egui"] +test-utils = ["dep:proptest"] websocket-server = ["dep:axum"] [package.metadata.docs.rs] diff --git a/homestar-runtime/fixtures/test-workflow-add-one.json b/homestar-runtime/fixtures/test-workflow-add-one.json new file mode 100644 index 00000000..42dc847d --- /dev/null +++ b/homestar-runtime/fixtures/test-workflow-add-one.json @@ -0,0 +1,48 @@ +{ + "tasks": [ + { + "cause": null, + "meta": { + "fuel": 18446744073709552000, + "memory": 4294967296, + "time": 100000 + }, + "prf": [], + "run": { + "input": { + "args": [ + 1 + ], + "func": "add_one" + }, + "nnc": "", + "op": "wasm/run", + "rsc": "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4" + } + }, + { + "cause": null, + "meta": { + "fuel": 18446744073709552000, + "memory": 4294967296, + "time": 100000 + }, + "prf": [], + "run": { + "input": { + "args": [ + { + "await/ok": { + "/": "bafyrmig5jivpubiljl26w5qc4om2rxbya6h43ljanotrvp2b2opux6gtbe" + } + } + ], + "func": "add_one" + }, + "nnc": "", + "op": "wasm/run", + "rsc": "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4" + } + } + ] +} diff --git a/homestar-runtime/migrations/2023-06-04-135955_create_workflows/up.sql b/homestar-runtime/migrations/2023-06-04-135955_create_workflows/up.sql index 12b8cc07..8d698a85 100644 --- a/homestar-runtime/migrations/2023-06-04-135955_create_workflows/up.sql +++ b/homestar-runtime/migrations/2023-06-04-135955_create_workflows/up.sql @@ -1,4 +1,7 @@ CREATE TABLE workflows ( - cid TEXT NOT NULL PRIMARY KEY, - num_tasks INTEGER NOT NULL + cid TEXT NOT NULL PRIMARY KEY, + num_tasks INTEGER NOT NULL, + resources BLOB NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + completed_at TIMESTAMP ); diff --git a/homestar-runtime/src/cli.rs b/homestar-runtime/src/cli.rs index ae92e313..2a00b12f 100644 --- a/homestar-runtime/src/cli.rs +++ b/homestar-runtime/src/cli.rs @@ -1,17 +1,23 @@ //! CLI commands/arguments. -use crate::network::rpc::Client; +use crate::{ + network::rpc::Client, + runner::{file, response}, +}; use anyhow::anyhow; -use clap::Parser; +use clap::{Args, Parser}; use std::{ net::{IpAddr, SocketAddr}, path::PathBuf, str::FromStr, + time::{Duration, SystemTime}, }; +use tarpc::context; mod error; -mod show; pub use error::Error; +pub(crate) mod show; +pub(crate) use show::ConsoleTable; const TMP_DIR: &str = "/tmp"; const HELP_TEMPLATE: &str = "{about} {version} @@ -31,6 +37,26 @@ pub struct Cli { pub command: Command, } +/// General RPC arguments for [Client] commands. +/// +/// [Client]: crate::network::rpc::Client +#[derive(Debug, Clone, Args)] +pub struct RpcArgs { + /// RPC Homestar runtime host to ping. + #[clap( + long = "host", + default_value_t = String::from("::1"), + value_hint = clap::ValueHint::Hostname + )] + host: String, + /// RPC Homestar runtime port to ping. + #[clap(short = 'p', long = "port", default_value_t = 3030)] + port: u16, + /// RPC Homestar runtime port to ping. + #[clap(long = "timeout", default_value = "60s", value_parser = humantime::parse_duration)] + timeout: Duration, +} + /// CLI Argument types. #[derive(Debug, Parser)] pub enum Command { @@ -71,42 +97,24 @@ pub enum Command { daemon_dir: PathBuf, }, /// Stop the Homestar runtime. - Stop { - #[arg( - long = "host", - default_value_t = String::from("::1"), - value_hint = clap::ValueHint::Hostname - )] - /// RPC Homestar runtime host to ping. - host: String, - #[arg(short = 'p', long = "port", default_value_t = 3030)] - /// RPC Homestar runtime port to ping. - port: u16, - }, + Stop(RpcArgs), /// Ping the Homestar runtime. - Ping { - #[arg( - long = "host", - default_value_t = String::from("::1"), - value_hint = clap::ValueHint::Hostname - )] - /// RPC Homestar runtime host to ping. - host: String, - #[arg(short = 'p', long = "port", default_value_t = 3030)] - /// RPC Homestar runtime port to ping. - port: u16, - }, - /// Run a workflow, given a workflow json file. + Ping(RpcArgs), + /// Run a workflow, given a workflow file. Run { - /// Path to workflow json file. + /// RPC host / port arguments. + #[clap(flatten)] + args: RpcArgs, #[arg( short='w', long = "workflow", value_hint = clap::ValueHint::FilePath, value_name = "FILE", + value_parser = clap::value_parser!(file::ReadWorkflow), help = "path to workflow file" )] - workflow: PathBuf, + /// Workflow file to run. + workflow: file::ReadWorkflow, }, } @@ -121,7 +129,6 @@ impl Command { } /// Handle CLI commands related to [Client] RPC calls. - #[allow(clippy::unnecessary_wraps)] pub fn handle_rpc_command(&self) -> Result<(), Error> { // Spin up a new tokio runtime on the current thread. let rt = tokio::runtime::Builder::new_current_thread() @@ -129,30 +136,47 @@ impl Command { .build()?; match self { - Command::Ping { host, port } => { - let host = IpAddr::from_str(host).map_err(anyhow::Error::new)?; - let addr = SocketAddr::new(host, *port); - let response = rt.block_on(async { - let client = Client::new(addr).await?; + Command::Ping(args) => { + let (client, response) = rt.block_on(async { + let client = args.client().await?; let response = client.ping().await?; - Ok::(response) + Ok::<(Client, String), Error>((client, response)) })?; - show::Ping::table(addr, response).echo()?; + let response = response::Ping::new(client.addr(), response); + response.echo_table()?; Ok(()) } - Command::Stop { host, port } => { - let host = IpAddr::from_str(host).map_err(anyhow::Error::new)?; - let addr = SocketAddr::new(host, *port); - rt.block_on(async { - let client = Client::new(addr).await?; - let _ = client.stop().await?; - Ok::<(), Error>(()) + Command::Stop(args) => rt.block_on(async { + let client = args.client().await?; + client.stop().await??; + Ok(()) + }), + Command::Run { + args, + workflow: workflow_file, + } => { + let response = rt.block_on(async { + let client = args.client().await?; + let response = client.run(workflow_file.to_owned()).await??; + Ok::(response) })?; + response.echo_table()?; Ok(()) } _ => Err(anyhow!("Invalid command {}", self.name()).into()), } } } + +impl RpcArgs { + async fn client(&self) -> Result { + let host = IpAddr::from_str(&self.host).map_err(anyhow::Error::new)?; + let addr = SocketAddr::new(host, self.port); + let mut ctx = context::current(); + ctx.deadline = SystemTime::now() + self.timeout; + let client = Client::new(addr, ctx).await?; + Ok(client) + } +} diff --git a/homestar-runtime/src/cli/error.rs b/homestar-runtime/src/cli/error.rs index c9e154de..742c0828 100644 --- a/homestar-runtime/src/cli/error.rs +++ b/homestar-runtime/src/cli/error.rs @@ -1,5 +1,6 @@ //! Error type for CLI / CLI-interaction. +use crate::network::rpc; use miette::{miette, Diagnostic}; use std::io; use tarpc::client::RpcError; @@ -9,22 +10,25 @@ use tarpc::client::RpcError; pub enum Error { /// Generic CLI error. #[error("{error_message}")] - CliError { + Cli { /// Error message. error_message: String, }, - /// Propagated RPC error. + /// Propagated RPC error related to [tarpc::client::RpcError]. #[error(transparent)] - RpcError(#[from] RpcError), + Rpc(#[from] RpcError), + /// Propagated error related to an . + #[error(transparent)] + RpcMessage(#[from] rpc::Error), /// Propagated IO error. #[error("error writing data to console: {0}")] - WriteError(#[from] io::Error), + Io(#[from] io::Error), } impl Error { /// Create a new [Error]. pub fn new(err: miette::ErrReport) -> Self { - Error::CliError { + Error::Cli { error_message: err.to_string(), } } diff --git a/homestar-runtime/src/cli/show.rs b/homestar-runtime/src/cli/show.rs index 89aafa90..c631b539 100644 --- a/homestar-runtime/src/cli/show.rs +++ b/homestar-runtime/src/cli/show.rs @@ -1,23 +1,41 @@ use std::{ + fmt, io::{self, Write}, - net::SocketAddr, }; use tabled::{ settings::{ object::Rows, style::{BorderColor, BorderSpanCorrection}, - themes::Colorization, Alignment, Color, Modify, Panel, Style, }, - Table, Tabled, + Table, }; -const TABLE_TITLE: &str = "homestar(╯°□°)╯"; +/// Panel title for the output table. +pub(crate) const TABLE_TITLE: &str = "homestar(╯°□°)╯"; /// Output response wrapper. +#[derive(Debug, Clone, PartialEq)] pub(crate) struct Output(String); +impl fmt::Display for Output { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0.trim_end()) + } +} + impl Output { + /// Create a new output response. + pub(crate) fn new(table: String) -> Self { + Self(table) + } + + /// Get the inner string as a reference. + #[allow(dead_code)] + pub(crate) fn inner(&self) -> &str { + &self.0 + } + /// Print ouput response to console via [io::stdout]. pub(crate) fn echo(&self) -> Result<(), io::Error> { let stdout = io::stdout(); @@ -26,14 +44,14 @@ impl Output { } } -/// Ping response for display. -#[derive(Tabled)] -pub(crate) struct Ping { - address: SocketAddr, - response: String, +/// Trait for console table output responses. +pub(crate) trait ConsoleTable { + fn table(&self) -> Output; + fn echo_table(&self) -> Result<(), io::Error>; } -trait ApplyStyle { +/// Style trait for console table output responses. +pub(crate) trait ApplyStyle { fn default(&mut self) -> Output; } @@ -43,11 +61,6 @@ impl ApplyStyle for Table { .with(Style::modern()) .with(Panel::header(TABLE_TITLE)) .with(Modify::new(Rows::first()).with(Alignment::left())) - .with(Colorization::exact([Color::FG_WHITE], Rows::first())) - .with(Colorization::exact( - [Color::FG_BRIGHT_GREEN], - Rows::single(1), - )) .with(BorderColor::filled(Color::FG_WHITE)) .with(BorderSpanCorrection) .to_string(); @@ -55,10 +68,3 @@ impl ApplyStyle for Table { Output(table) } } - -impl Ping { - /// Display a singleton table of a `ping` response. - pub(crate) fn table(address: SocketAddr, response: String) -> Output { - Table::new(vec![Self { address, response }]).default() - } -} diff --git a/homestar-runtime/src/db.rs b/homestar-runtime/src/db.rs index 40a117a9..eaf377d9 100644 --- a/homestar-runtime/src/db.rs +++ b/homestar-runtime/src/db.rs @@ -1,7 +1,4 @@ -//! Sqlite database integration and setup. - -#[allow(missing_docs, unused_imports)] -pub mod schema; +//! (Default) sqlite database integration and setup. use crate::{ settings, @@ -23,6 +20,10 @@ use std::{env, sync::Arc, time::Duration}; use tokio::fs; use tracing::info; +#[allow(missing_docs, unused_imports)] +pub mod schema; +pub(crate) mod utils; + const ENV: &str = "DATABASE_URL"; const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/"); const PRAGMAS: &str = " @@ -63,11 +64,15 @@ impl Db { let byte_unit = byte.get_adjusted_unit(ByteUnit::MB); Ok(byte_unit) } +} +/// Database trait for working with different Sqlite connection pool and +/// connection configurations. +pub trait Database: Send + Sync + Clone { /// Get database url. /// /// Contains a minimal side-effect to set the env if not already set. - pub fn set_url(database_url: Option) -> Option { + fn set_url(database_url: Option) -> Option { database_url.map_or_else( || dotenv().ok().and_then(|_| env::var(ENV).ok()), |url| { @@ -76,11 +81,7 @@ impl Db { }, ) } -} -/// Database trait for working with different Sqlite connection pool and -/// connection configurations. -pub trait Database: Send + Sync + Clone { /// Test a Sqlite connection to the database and run pending migrations. fn setup(url: &str) -> Result { info!("Using database at {:?}", url); @@ -96,57 +97,84 @@ pub trait Database: Send + Sync + Clone { Self: Sized; /// Get a pooled connection for the database. fn conn(&self) -> Result; + + /// Commit a receipt to the database, updating two tables + /// within a transaction. + fn commit_receipt( + workflow_cid: Cid, + receipt: Receipt, + conn: &mut Connection, + ) -> Result { + let receipt = conn.transaction::<_, diesel::result::Error, _>(|conn| { + let returned = Self::store_receipt(receipt, conn)?; + Self::store_workflow_receipt(workflow_cid, returned.cid(), conn)?; + Ok(returned) + })?; + + Ok(receipt) + } + /// Store receipt given a connection to the database pool. /// /// On conflicts, do nothing. - fn store_receipt(receipt: Receipt, conn: &mut Connection) -> Result { + fn store_receipt( + receipt: Receipt, + conn: &mut Connection, + ) -> Result { diesel::insert_into(schema::receipts::table) .values(&receipt) .on_conflict(schema::receipts::cid) .do_nothing() .get_result(conn) - .map_err(Into::into) } /// Store receipts given a connection to the Database pool. - fn store_receipts(receipts: Vec, conn: &mut Connection) -> Result { - diesel::insert_into(schema::receipts::table) - .values(&receipts) - .execute(conn) - .map_err(Into::into) + fn store_receipts( + receipts: Vec, + conn: &mut Connection, + ) -> Result { + receipts.iter().try_fold(0, |acc, receipt| { + let res = diesel::insert_into(schema::receipts::table) + .values(receipt) + .on_conflict(schema::receipts::cid) + .do_nothing() + .execute(conn)?; + + Ok::<_, diesel::result::Error>(acc + res) + }) } /// Find receipt for a given [Instruction] [Pointer], which is indexed. /// - /// This *should* always return one receipt, but sometimes it's nicer to - /// work across vecs/arrays. - /// /// [Instruction]: homestar_core::workflow::Instruction - fn find_instructions(pointers: &Vec, conn: &mut Connection) -> Result> { - let found_receipts = schema::receipts::dsl::receipts + fn find_instruction_pointers( + pointers: &Vec, + conn: &mut Connection, + ) -> Result, diesel::result::Error> { + schema::receipts::dsl::receipts .filter(schema::receipts::instruction.eq_any(pointers)) - .load(conn)?; - Ok(found_receipts) + .load(conn) } - /// Find receipt for a given [Instruction] [Pointer], which is indexed. + /// Find receipt for a given [Instruction] [Cid], which is indexed. /// /// [Instruction]: homestar_core::workflow::Instruction - fn find_instruction(pointer: Pointer, conn: &mut Connection) -> Result { - let found_receipt = schema::receipts::dsl::receipts - .filter(schema::receipts::instruction.eq(pointer)) - .first(conn)?; - Ok(found_receipt) + fn find_instruction(cid: Cid, conn: &mut Connection) -> Result { + schema::receipts::dsl::receipts + .filter(schema::receipts::instruction.eq(Pointer::new(cid))) + .first(conn) } /// Store localized workflow cid and information, e.g. number of tasks. - fn store_workflow(workflow: workflow::Stored, conn: &mut Connection) -> Result { + fn store_workflow( + workflow: workflow::Stored, + conn: &mut Connection, + ) -> Result { diesel::insert_into(schema::workflows::table) .values(&workflow) .on_conflict(schema::workflows::cid) .do_nothing() - .execute(conn) - .map_err(Into::into) + .get_result(conn) } /// Store workflow [Cid] and [Receipt] [Cid] in the database for inner join. @@ -154,7 +182,7 @@ pub trait Database: Send + Sync + Clone { workflow_cid: Cid, receipt_cid: Cid, conn: &mut Connection, - ) -> Result { + ) -> Result { let value = StoredReceipt::new(Pointer::new(workflow_cid), Pointer::new(receipt_cid)); diesel::insert_into(schema::workflows_receipts::table) .values(&value) @@ -164,7 +192,6 @@ pub trait Database: Send + Sync + Clone { )) .do_nothing() .execute(conn) - .map_err(Into::into) } /// Store series of receipts for a workflow [Cid] in the @@ -177,26 +204,31 @@ pub trait Database: Send + Sync + Clone { workflow_cid: Cid, receipts: &[Cid], conn: &mut Connection, - ) -> Result { + ) -> Result { receipts.iter().try_fold(0, |acc, receipt| { let res = Self::store_workflow_receipt(workflow_cid, *receipt, conn)?; - Ok::<_, anyhow::Error>(acc + res) + Ok::<_, diesel::result::Error>(acc + res) }) } /// Select workflow given a [Cid] to the workflow. - fn select_workflow(cid: Cid, conn: &mut Connection) -> Result { - let wf = schema::workflows::dsl::workflows + fn select_workflow( + cid: Cid, + conn: &mut Connection, + ) -> Result { + schema::workflows::dsl::workflows .filter(schema::workflows::cid.eq(Pointer::new(cid))) .select(workflow::Stored::as_select()) - .get_result(conn)?; - Ok(wf) + .get_result(conn) } /// Return workflow information with number of receipts emitted. - fn get_workflow_info(workflow_cid: Cid, conn: &mut Connection) -> Result { - let wf = Self::select_workflow(workflow_cid, conn)?; - let associated_receipts = workflow::StoredReceipt::belonging_to(&wf) + fn get_workflow_info( + workflow_cid: Cid, + conn: &mut Connection, + ) -> Result { + let workflow = Self::select_workflow(workflow_cid, conn)?; + let associated_receipts = workflow::StoredReceipt::belonging_to(&workflow) .select(schema::workflows_receipts::receipt_cid) .load(conn)?; @@ -205,13 +237,25 @@ pub trait Database: Send + Sync + Clone { .map(|pointer: Pointer| pointer.cid()) .collect(); - Ok(workflow::Info::new(workflow_cid, cids, wf.num_tasks as u32)) + Ok(workflow::Info::new( + workflow_cid, + workflow.num_tasks as u32, + cids, + workflow.resources, + )) } } impl Database for Db { fn setup_connection_pool(settings: &settings::Node) -> Result { - let database_url = env::var(ENV)?; + let database_url = env::var(ENV).unwrap_or_else(|_| { + settings + .db + .url + .as_ref() + .map_or_else(|| "homestar.db".to_string(), |url| url.to_string()) + }); + Self::setup(&database_url)?; let manager = r2d2::ConnectionManager::::new(database_url); @@ -255,12 +299,13 @@ where #[cfg(test)] mod test { use super::*; - use crate::{settings::Settings, test_utils}; + use crate::test_utils::db::MemoryDb; - #[tokio::test] - async fn check_pragmas_memory_db() { - let db = test_utils::db::MemoryDb::setup_connection_pool(Settings::load().unwrap().node()) - .unwrap(); + #[homestar_runtime_proc_macro::db_async_test] + fn check_pragmas_memory_db() { + let settings = TestSettings::load(); + + let db = MemoryDb::setup_connection_pool(settings.node()).unwrap(); let mut conn = db.conn().unwrap(); let journal_mode = diesel::dsl::sql::("PRAGMA journal_mode") diff --git a/homestar-runtime/src/db/schema.rs b/homestar-runtime/src/db/schema.rs index 5c30285c..3d00e310 100644 --- a/homestar-runtime/src/db/schema.rs +++ b/homestar-runtime/src/db/schema.rs @@ -17,6 +17,9 @@ diesel::table! { workflows (cid) { cid -> Text, num_tasks -> Integer, + resources -> Binary, + created_at -> Timestamp, + completed_at -> Nullable, } } diff --git a/homestar-runtime/src/db/utils.rs b/homestar-runtime/src/db/utils.rs new file mode 100644 index 00000000..6dc7c64c --- /dev/null +++ b/homestar-runtime/src/db/utils.rs @@ -0,0 +1,13 @@ +use chrono::NaiveDateTime; + +pub(crate) trait Timestamp { + fn timestamp_from_nanos(&self) -> Option; +} + +impl Timestamp for i64 { + fn timestamp_from_nanos(&self) -> Option { + let nanos = self % 1_000_000_000; + let seconds = (self - nanos) / 1_000_000_000; + NaiveDateTime::from_timestamp_opt(seconds, nanos as u32) + } +} diff --git a/homestar-runtime/src/event_handler/error.rs b/homestar-runtime/src/event_handler/error.rs index 6f55f29c..dfb77ea8 100644 --- a/homestar-runtime/src/event_handler/error.rs +++ b/homestar-runtime/src/event_handler/error.rs @@ -2,10 +2,10 @@ use crate::network::swarm::RequestResponseKey; use anyhow::Result; -use bincode::{Decode, Encode}; +use serde::{Deserialize, Serialize}; /// Error type for messages related to [libp2p::request_response]. -#[derive(thiserror::Error, Debug, Encode, Decode)] +#[derive(thiserror::Error, Debug, Serialize, Deserialize)] pub(crate) enum RequestResponseError { /// Return a timeout error when attempting to retrieve data keyed by [Cid]. /// @@ -24,13 +24,17 @@ pub(crate) enum RequestResponseError { } impl RequestResponseError { - /// Encode the error into a byte vector via [bincode]. + /// Encode the error into a byte vector via [CBOR]. + /// + /// [CBOR]: serde_ipld_dagcbor pub(crate) fn encode(&self) -> Result> { - bincode::encode_to_vec(self, bincode::config::standard()).map_err(anyhow::Error::new) + serde_ipld_dagcbor::to_vec(self).map_err(anyhow::Error::new) } - /// Decode the error from a byte vector via [bincode]. + /// Decode the error from a byte vector via [CBOR]. + /// + /// [CBOR]: serde_ipld_dagcbor pub(crate) fn decode(bytes: &[u8]) -> Result<(Self, usize)> { - bincode::decode_from_slice(bytes, bincode::config::standard()).map_err(anyhow::Error::new) + serde_ipld_dagcbor::from_slice(bytes).map_err(anyhow::Error::new) } } diff --git a/homestar-runtime/src/event_handler/event.rs b/homestar-runtime/src/event_handler/event.rs index bd60d617..2cd088a3 100644 --- a/homestar-runtime/src/event_handler/event.rs +++ b/homestar-runtime/src/event_handler/event.rs @@ -4,7 +4,7 @@ use super::EventHandler; #[cfg(feature = "ipfs")] use crate::network::IpfsCli; use crate::{ - db::{Connection, Database, Db}, + db::Database, event_handler::{Handler, P2PSender}, network::{ pubsub, @@ -15,12 +15,14 @@ use crate::{ use anyhow::{anyhow, Result}; use async_trait::async_trait; use homestar_core::workflow::Receipt as InvocationReceipt; -use libipld::Cid; +use libipld::{Cid, Ipld}; use libp2p::{ kad::{record::Key, Quorum, Record}, PeerId, }; use std::{num::NonZeroUsize, sync::Arc}; +#[cfg(feature = "ipfs")] +use tokio::runtime::Handle; use tokio::sync::oneshot; use tracing::{error, info}; @@ -59,7 +61,7 @@ pub struct PeerRequest { pub(crate) sender: P2PSender, } -/// Internal events to capture. +/// Events to capture. #[derive(Debug)] pub enum Event { /// [Receipt] captured event. @@ -86,8 +88,7 @@ impl Event { { match self { Event::CapturedReceipt(captured) => { - let mut conn = event_handler.db.conn()?; - let (cid, _bytes) = captured.store(event_handler, &mut conn)?; + let (cid, _receipt) = captured.store(event_handler)?; info!( cid = cid.to_string(), "record replicated with quorum {}", event_handler.receipt_quorum @@ -134,8 +135,7 @@ impl Captured { fn store( mut self, event_handler: &mut EventHandler, - conn: &mut Connection, - ) -> Result<(Cid, Vec)> + ) -> Result<(Cid, InvocationReceipt)> where DB: Database, { @@ -150,9 +150,9 @@ impl Captured { "message {msg_id} published on {} for receipt with cid: {receipt_cid}", pubsub::RECEIPTS_TOPIC ), - Err(err) => { + Err(_err) => { error!( - error=?err, "message not published on {} for receipt with cid: {receipt_cid}", + "message not published on {} for receipt with cid: {receipt_cid}", pubsub::RECEIPTS_TOPIC ) } @@ -170,7 +170,7 @@ impl Captured { Quorum::One }; - if let Ok(receipt_bytes) = Receipt::invocation_capsule(invocation_receipt) { + if let Ok(receipt_bytes) = Receipt::invocation_capsule(&invocation_receipt) { let _id = event_handler .swarm .behaviour_mut() @@ -181,8 +181,6 @@ impl Captured { ) .map_err(anyhow::Error::new)?; - // Store workflow_receipt join information. - let _ = Db::store_workflow_receipt(self.workflow.cid, receipt_cid, conn); Arc::make_mut(&mut self.workflow).increment_progress(receipt_cid); let workflow_cid_bytes = self.workflow.cid_as_bytes(); @@ -211,9 +209,7 @@ impl Captured { ) .map_err(anyhow::Error::new)?; - // TODO: Handle Workflow Complete / Num of Tasks finished. - - Ok((receipt_cid, receipt_bytes.to_vec())) + Ok((receipt_cid, invocation_receipt)) } else { Err(anyhow!("cannot convert receipt {receipt_cid} to bytes")) } @@ -300,19 +296,15 @@ where } #[cfg(feature = "ipfs")] + #[cfg_attr(docsrs, doc(cfg(feature = "ipfs")))] async fn handle_event(self, event_handler: &mut EventHandler, ipfs: IpfsCli) { match self { Event::CapturedReceipt(captured) => { - if let Err(err) = event_handler.db.conn().map(|mut conn| { - captured.store(event_handler, &mut conn).map(|(cid, bytes)| { - info!( - cid = cid.to_string(), - "record replicated with quorum {}", event_handler.receipt_quorum - ); - + let _ = captured.store(event_handler).map(|(cid, receipt)| { // Spawn client call in background, without awaiting. - tokio::spawn(async move { - match ipfs.put_receipt_bytes(bytes.to_vec()).await { + let handle = Handle::current(); + handle.spawn(async move { + match ipfs.put_receipt(receipt).await { Ok(put_cid) => { info!(cid = put_cid, "IPLD DAG node stored"); @@ -324,10 +316,7 @@ where } } }); - }) - }) { - error!(error=?err, "error storing event") - } + }); } event => { if let Err(err) = event.handle_info(event_handler).await { diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index 0b69d64d..37302f63 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -4,7 +4,7 @@ use super::EventHandler; #[cfg(feature = "ipfs")] use crate::network::IpfsCli; use crate::{ - db::Database, + db::{Connection, Database}, event_handler::{ channel::BoundedChannel, event::{PeerRequest, QueryRecord}, @@ -16,7 +16,7 @@ use crate::{ workflow::WORKFLOW_TAG, Db, Receipt, }; -use anyhow::{anyhow, bail, Result}; +use anyhow::{anyhow, Result}; use async_trait::async_trait; use homestar_core::{ consts, @@ -77,6 +77,7 @@ where DB: Database + Sync, { #[cfg(feature = "ipfs")] + #[cfg_attr(docsrs, doc(cfg(feature = "ipfs")))] async fn handle_event(self, event_handler: &mut EventHandler, _ipfs: IpfsCli) { handle_swarm_event(self, event_handler).await } @@ -171,69 +172,60 @@ async fn handle_swarm_event( QueryResult::GetRecord(Ok(_)) => {} QueryResult::GetRecord(Err(err)) => { error!(err=?err, "error retrieving record"); + // Upon an error, attempt to find the record on the DHT via // a provider if it's a Workflow/Info one. - match event_handler.query_senders.remove(&id) { - Some(( - RequestResponseKey { - cid: cid_str, - capsule_tag: CapsuleTag::Workflow, - }, - sender, - )) => { - let (tx, rx) = BoundedChannel::oneshot(); - if let Ok(cid) = Cid::try_from(cid_str.as_str()) { - if let Err(err) = - event_handler.sender().try_send(Event::GetProviders( - QueryRecord::with(cid, CapsuleTag::Workflow, tx), - )) - { - error!(err=?err, "error opening channel to get providers"); - let _ = sender.try_send(ResponseEvent::Found(Err(err.into()))); - return; - } - match rx.recv_deadline( - Instant::now() + event_handler.p2p_provider_timeout, - ) { - Ok(ResponseEvent::Providers(Ok(providers))) => { - for peer in providers { - let request = RequestResponseKey::new( - cid_str.to_string(), - CapsuleTag::Workflow, - ); - let (tx, _rx) = BoundedChannel::oneshot(); - if let Err(err) = event_handler.sender().try_send( - Event::OutboundRequest(PeerRequest::with( - peer, request, tx, - )), - ) { - error!(err=?err, "error sending outbound request"); - let _ = sender.try_send(ResponseEvent::Found(Err( - err.into(), - ))); - } + if let Some(( + RequestResponseKey { + cid: cid_str, + capsule_tag: CapsuleTag::Workflow, + }, + sender, + )) = event_handler.query_senders.remove(&id) + { + let (tx, rx) = BoundedChannel::oneshot(); + if let Ok(cid) = Cid::try_from(cid_str.as_str()) { + if let Err(err) = event_handler.sender().try_send(Event::GetProviders( + QueryRecord::with(cid, CapsuleTag::Workflow, tx), + )) { + error!(err = ?err, "error opening channel to get providers"); + let _ = sender.try_send(ResponseEvent::Found(Err(err.into()))); + return; + } + + match rx + .recv_deadline(Instant::now() + event_handler.p2p_provider_timeout) + { + Ok(ResponseEvent::Providers(Ok(providers))) => { + for peer in providers { + let request = RequestResponseKey::new( + cid_str.to_string(), + CapsuleTag::Workflow, + ); + let (tx, _rx) = BoundedChannel::oneshot(); + if let Err(err) = + event_handler.sender().try_send(Event::OutboundRequest( + PeerRequest::with(peer, request, tx), + )) + { + error!(err = ?err, "error sending outbound request"); + let _ = sender + .try_send(ResponseEvent::Found(Err(err.into()))); } } - _ => { - let _ = - sender.try_send(ResponseEvent::Found(Err(err.into()))); - } + } + _ => { + let _ = sender.try_send(ResponseEvent::Found(Err(err.into()))); } } } - Some(( - RequestResponseKey { - cid: _, - capsule_tag, - }, - sender, - )) => { - let _ = sender.try_send(ResponseEvent::Found(Err(anyhow!( - "not a valid provider record tag: {capsule_tag}" - )))); - } - None => (), + } else if let Some((RequestResponseKey { capsule_tag, .. }, sender)) = + event_handler.query_senders.remove(&id) + { + let _ = sender.try_send(ResponseEvent::Found(Err(anyhow!( + "not a valid provider record tag: {capsule_tag}" + )))); } } QueryResult::PutRecord(Ok(PutRecordOk { key })) => { @@ -274,8 +266,8 @@ async fn handle_swarm_event( cid, event_handler.p2p_provider_timeout, event_handler.sender.clone(), - event_handler.db.conn().as_mut().ok(), - |cid, _| bail!("timeout retrieving workflow info for {}", cid), + event_handler.db.conn().ok(), + None::) -> Result>, ) .await { @@ -371,7 +363,7 @@ async fn handle_swarm_event( SwarmEvent::NewListenAddr { address, .. } => { let local_peer = *event_handler.swarm.local_peer_id(); info!( - "local node is listening on {:?}", + "local node is listening on {}", address.with(Protocol::P2p(local_peer)) ); } @@ -436,7 +428,7 @@ mod test { fn found_receipt_record() { let (invocation_receipt, receipt) = test_utils::receipt::receipts(); let instruction_bytes = receipt.instruction_cid_as_bytes(); - let bytes = Receipt::invocation_capsule(invocation_receipt).unwrap(); + let bytes = Receipt::invocation_capsule(&invocation_receipt).unwrap(); let record = Record::new(instruction_bytes, bytes); let peer_record = PeerRecord { record, diff --git a/homestar-runtime/src/lib.rs b/homestar-runtime/src/lib.rs index 42938158..7af6ec55 100644 --- a/homestar-runtime/src/lib.rs +++ b/homestar-runtime/src/lib.rs @@ -37,9 +37,10 @@ pub use logger::*; pub use receipt::{Receipt, RECEIPT_TAG, VERSION_KEY}; pub use runner::Runner; pub use settings::Settings; +pub(crate) use worker::Worker; pub use workflow::WORKFLOW_TAG; /// Test utilities. -#[cfg(any(test, feature = "test_utils"))] -#[cfg_attr(docsrs, doc(cfg(feature = "test_utils")))] +#[cfg(any(test, feature = "test-utils"))] +#[cfg_attr(docsrs, doc(cfg(feature = "test-utils")))] pub mod test_utils; diff --git a/homestar-runtime/src/main.rs b/homestar-runtime/src/main.rs index 599c96ce..b30559b2 100644 --- a/homestar-runtime/src/main.rs +++ b/homestar-runtime/src/main.rs @@ -6,7 +6,6 @@ use homestar_runtime::{ Db, FileLogger, Logger, Runner, Settings, }; use miette::Result; -use std::sync::Arc; use tracing::info; fn main() -> Result<()> { @@ -38,11 +37,7 @@ fn main() -> Result<()> { let db = Db::setup_connection_pool(settings.node()).expect("Failed to setup DB pool"); info!("starting Homestar runtime..."); - let settings = Arc::new(settings); - let runner = Runner::start(settings.clone(), db).expect("Failed to start server"); - runner - .serve(settings) - .expect("Failed to run server runtime"); + Runner::start(settings, db).expect("Failed to start runtime") } cmd => cmd.handle_rpc_command()?, } diff --git a/homestar-runtime/src/network/ipfs.rs b/homestar-runtime/src/network/ipfs.rs index ff6aae79..bf4c41a6 100644 --- a/homestar-runtime/src/network/ipfs.rs +++ b/homestar-runtime/src/network/ipfs.rs @@ -1,9 +1,6 @@ //! Ipfs Client container for an [Arc]'ed [IpfsClient]. -#[cfg(test)] -use crate::tasks::{FileLoad, WasmContext}; use anyhow::Result; -#[cfg(not(test))] use futures::TryStreamExt; use homestar_core::workflow::Receipt; use ipfs_api::{ @@ -12,8 +9,6 @@ use ipfs_api::{ IpfsApi, IpfsClient, }; use libipld::{Cid, Ipld}; -#[cfg(test)] -use std::path::PathBuf; use std::{io::Cursor, sync::Arc}; use url::Url; @@ -37,13 +32,14 @@ impl Default for IpfsCli { impl IpfsCli { /// Retrieve content from a IPFS [Url]. + #[allow(dead_code)] pub(crate) async fn get_resource(&self, url: &Url) -> Result> { let cid = Cid::try_from(url.to_string())?; self.get_cid(cid).await } /// Retrieve content from a [Cid]. - #[cfg(not(test))] + #[allow(dead_code)] pub(crate) async fn get_cid(&self, cid: Cid) -> Result> { self.0 .cat(&cid.to_string()) @@ -53,16 +49,6 @@ impl IpfsCli { .map_err(Into::into) } - /// Load known content from a [Cid]. - #[cfg(test)] - pub(crate) async fn get_cid(&self, _cid: Cid) -> Result> { - let path = PathBuf::from(format!( - "{}/../homestar-wasm/fixtures/example_test.wasm", - env!("CARGO_MANIFEST_DIR") - )); - WasmContext::load(path).await - } - /// Put/Write [Receipt] into IPFS. #[allow(dead_code)] pub(crate) async fn put_receipt(&self, receipt: Receipt) -> Result { diff --git a/homestar-runtime/src/network/mod.rs b/homestar-runtime/src/network/mod.rs index d87c42d8..f1ff95e9 100644 --- a/homestar-runtime/src/network/mod.rs +++ b/homestar-runtime/src/network/mod.rs @@ -10,7 +10,7 @@ pub(crate) mod pubsub; pub mod rpc; pub(crate) mod swarm; #[cfg(feature = "websocket-server")] -pub(crate) mod ws; +pub mod ws; #[cfg(feature = "ipfs")] pub(crate) use ipfs::IpfsCli; diff --git a/homestar-runtime/src/network/rpc.rs b/homestar-runtime/src/network/rpc.rs index e5f70da7..d52ec2a7 100644 --- a/homestar-runtime/src/network/rpc.rs +++ b/homestar-runtime/src/network/rpc.rs @@ -2,24 +2,31 @@ use crate::{ channel::{BoundedChannel, BoundedChannelReceiver, BoundedChannelSender}, + runner::{self, file::ReadWorkflow, response}, settings, }; use futures::{future, StreamExt}; -use std::{io, net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{io, net::SocketAddr, sync::Arc, time::Duration}; use stream_cancel::Valved; use tarpc::{ client::{self, RpcError}, context, server::{self, incoming::Incoming, Channel}, - tokio_serde::formats::Bincode, }; use tokio::{ runtime::Handle, select, sync::{mpsc, oneshot}, + time, }; +use tokio_serde::formats::MessagePack; use tracing::{info, warn}; +mod error; +pub use error::Error; + +type RunnerSender = Arc>)>>; + /// Message type for messages sent back from the /// websocket server to the [runner] for example. /// @@ -34,17 +41,21 @@ pub(crate) enum ServerMessage { /// /// [Runner]: crate::Runner GracefulShutdown(oneshot::Sender<()>), + Run(ReadWorkflow), + RunAck(response::AckWorkflow), + RunErr(runner::Error), + Skip, } /// RPC interface definition for CLI-server interaction. #[tarpc::service] pub(crate) trait Interface { /// Returns a greeting for name. - async fn run(workflow_file: PathBuf); + async fn run(workflow_file: ReadWorkflow) -> Result; /// Ping the server. async fn ping() -> String; /// Stop the server. - async fn stop() -> Result<(), String>; + async fn stop() -> Result<(), Error>; } /// RPC server state information. @@ -59,50 +70,87 @@ pub(crate) struct Server { /// Sender for messages to be sent to the [Runner]. /// /// [Runner]: crate::Runner - pub(crate) runner_sender: Arc>, + pub(crate) runner_sender: RunnerSender, + /// Maximum number of connections to the RPC server. pub(crate) max_connections: usize, + /// Timeout for the RPC server. + pub(crate) timeout: Duration, } /// RPC client wrapper. #[derive(Debug, Clone)] -pub struct Client(InterfaceClient); +pub struct Client { + cli: InterfaceClient, + addr: SocketAddr, + ctx: context::Context, +} /// RPC server state information. #[derive(Debug, Clone)] #[allow(dead_code)] struct ServerHandler { addr: SocketAddr, - runner_sender: Arc>, + runner_sender: RunnerSender, + timeout: Duration, } impl ServerHandler { - fn new(addr: SocketAddr, runner_sender: Arc>) -> Self { + fn new(addr: SocketAddr, runner_sender: RunnerSender, timeout: Duration) -> Self { Self { addr, runner_sender, + timeout, } } } #[tarpc::server] impl Interface for ServerHandler { - async fn run(self, _: context::Context, _workflow_file: PathBuf) {} + async fn run( + self, + _: context::Context, + workflow_file: ReadWorkflow, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.runner_sender + .send((ServerMessage::Run(workflow_file), Some(tx))) + .await + .map_err(|e| Error::FailureToSendOnChannel(e.to_string()))?; + + let now = time::Instant::now(); + select! { + Ok(msg) = rx => { + match msg { + ServerMessage::RunAck(response) => { + Ok(response) + } + ServerMessage::RunErr(err) => Err(err).map_err(|e| Error::FromRunner(e.to_string()))?, + _ => Err(Error::FailureToSendOnChannel("unexpected message".into())), + } + }, + _ = time::sleep_until(now + self.timeout) => { + let s = format!("server timeout of {} ms reached", self.timeout.as_millis()); + info!("{s}"); + Err(Error::FailureToReceiveOnChannel(s)) + } + + } + } async fn ping(self, _: context::Context) -> String { "pong".into() } - async fn stop(self, _: context::Context) -> Result<(), String> { - let _ = self.runner_sender.send(ServerMessage::ShutdownCmd).await; - Ok(()) + async fn stop(self, _: context::Context) -> Result<(), Error> { + self.runner_sender + .send((ServerMessage::ShutdownCmd, None)) + .await + .map_err(|e| Error::FailureToSendOnChannel(e.to_string())) } } impl Server { /// Create a new instance of the RPC server. - pub(crate) fn new( - settings: settings::Network, - runner_sender: Arc>, - ) -> Self { + pub(crate) fn new(settings: &settings::Network, runner_sender: RunnerSender) -> Self { let (tx, rx) = BoundedChannel::oneshot(); Self { addr: SocketAddr::new(settings.rpc_host, settings.rpc_port), @@ -110,6 +158,7 @@ impl Server { receiver: rx, runner_sender, max_connections: settings.rpc_max_connections, + timeout: settings.rpc_server_timeout, } } @@ -119,8 +168,9 @@ impl Server { } /// Start the RPC server and connect the client. - pub(crate) async fn spawn(self, runtime_handle: Handle) -> anyhow::Result<()> { - let mut listener = tarpc::serde_transport::tcp::listen(self.addr, Bincode::default).await?; + pub(crate) async fn spawn(self) -> anyhow::Result<()> { + let mut listener = + tarpc::serde_transport::tcp::listen(self.addr, MessagePack::default).await?; listener.config_mut().max_frame_length(usize::MAX); info!("RPC server listening on {}", self.addr); @@ -128,6 +178,7 @@ impl Server { // setup valved listener for cancellation let (exit, incoming) = Valved::new(listener); + let runtime_handle = Handle::current(); runtime_handle.spawn(async move { let fut = incoming // Ignore accept errors. @@ -136,7 +187,8 @@ impl Server { // Limit channels to 1 per IP. .max_channels_per_key(1, |t| t.transport().peer_addr().unwrap_or(self.addr).ip()) .map(|channel| { - let handler = ServerHandler::new(self.addr, self.runner_sender.clone()); + let handler = + ServerHandler::new(self.addr, self.runner_sender.clone(), self.timeout); channel.execute(handler.serve()) }) .buffer_unordered(self.max_connections) @@ -163,19 +215,38 @@ impl Client { /// runner/server. /// /// [tcp]: tarpc::serde_transport::tcp - pub async fn new(addr: SocketAddr) -> Result { - let transport = tarpc::serde_transport::tcp::connect(addr, Bincode::default).await?; + pub async fn new(addr: SocketAddr, ctx: context::Context) -> Result { + let transport = tarpc::serde_transport::tcp::connect(addr, MessagePack::default).await?; let client = InterfaceClient::new(client::Config::default(), transport).spawn(); - Ok(Client(client)) + Ok(Client { + cli: client, + addr, + ctx, + }) + } + + /// Return the [SocketAddr] of the RPC server. + pub fn addr(&self) -> SocketAddr { + self.addr } /// Ping the server. pub async fn ping(&self) -> Result { - self.0.ping(context::current()).await + self.cli.ping(self.ctx).await } /// Stop the server. - pub async fn stop(&self) -> Result, RpcError> { - self.0.stop(context::current()).await + pub async fn stop(&self) -> Result, RpcError> { + self.cli.stop(self.ctx).await + } + + /// Run a [Workflow]. + /// + /// [Workflow]: homestar_core::Workflow + pub async fn run( + &self, + workflow_file: ReadWorkflow, + ) -> Result, RpcError> { + self.cli.run(self.ctx, workflow_file).await } } diff --git a/homestar-runtime/src/network/rpc/error.rs b/homestar-runtime/src/network/rpc/error.rs new file mode 100644 index 00000000..62c5e2b1 --- /dev/null +++ b/homestar-runtime/src/network/rpc/error.rs @@ -0,0 +1,19 @@ +//! Error types related to the RPC server / client interface(s). + +use serde::{Deserialize, Serialize}; + +/// Error types related to the RPC server interface. +#[derive(thiserror::Error, Debug, Serialize, Deserialize)] +pub enum Error { + /// Error when attempting to send data on a channel. + #[error("{0}")] + FailureToSendOnChannel(String), + /// Error when attempting to receive data on a channel. + #[error("{0}")] + FailureToReceiveOnChannel(String), + /// Error when attempting to run a workflow via the [Runner]. + /// + /// [Runner]: crate::Runner + #[error("runtime error: {0}")] + FromRunner(String), +} diff --git a/homestar-runtime/src/network/swarm.rs b/homestar-runtime/src/network/swarm.rs index ddd7b1ae..d9cbb905 100644 --- a/homestar-runtime/src/network/swarm.rs +++ b/homestar-runtime/src/network/swarm.rs @@ -5,7 +5,6 @@ use crate::{network::pubsub, settings, Receipt, RECEIPT_TAG, WORKFLOW_TAG}; use anyhow::{anyhow, Context, Result}; -use bincode::{Decode, Encode}; use enum_assoc::Assoc; use libp2p::{ core::upgrade, @@ -66,7 +65,7 @@ pub(crate) async fn new(settings: &settings::Node) -> Result &'static str)] #[func(pub(crate) fn capsule_type(s: &str) -> Option)] pub(crate) enum CapsuleTag { diff --git a/homestar-runtime/src/network/ws.rs b/homestar-runtime/src/network/ws.rs index 31c7b688..6300aa14 100644 --- a/homestar-runtime/src/network/ws.rs +++ b/homestar-runtime/src/network/ws.rs @@ -19,11 +19,15 @@ use std::{ str::FromStr, sync::Arc, }; -use tokio::sync::{broadcast, mpsc, oneshot}; +use tokio::{ + runtime::Handle, + select, + sync::{broadcast, mpsc, oneshot}, +}; use tracing::{debug, info}; /// Type alias for websocket sender. -pub(crate) type Sender = Arc>; +pub type Sender = Arc>; /// Message type for messages sent back from the /// websocket server to the [runner] for example. @@ -51,7 +55,7 @@ impl Server { broadcast::channel(capacity) } - pub(crate) fn new(settings: settings::Network) -> Result { + pub(crate) fn new(settings: &settings::Network) -> Result { let (sender, _receiver) = Self::setup_channel(settings.websocket_capacity); let host = IpAddr::from_str(&settings.websocket_host.to_string())?; @@ -147,8 +151,9 @@ async fn handle_socket(mut socket: ws::WebSocket, state: Server) { // By splitting socket we can send and receive at the same time. let (mut socket_sender, mut socket_receiver) = socket.split(); let mut subscribed_rx = state.msg_sender.subscribe(); + let handle = Handle::current(); - let mut send_task = tokio::spawn(async move { + let mut send_task = handle.spawn(async move { while let Ok(msg) = subscribed_rx.recv().await { // In any websocket error, break loop. if socket_sender @@ -161,7 +166,7 @@ async fn handle_socket(mut socket: ws::WebSocket, state: Server) { } }); - let mut recv_task = tokio::spawn(async move { + let mut recv_task = handle.spawn(async move { let mut cnt = 0; while let Some(Ok(msg)) = socket_receiver.next().await { cnt += 1; @@ -173,7 +178,7 @@ async fn handle_socket(mut socket: ws::WebSocket, state: Server) { }); // If any one of the tasks exit, abort the other. - tokio::select! { + select! { _ = (&mut send_task) => recv_task.abort(), _ = (&mut recv_task) => send_task.abort(), }; @@ -229,7 +234,7 @@ mod test { #[tokio::test] async fn ws_connect() { let settings = Settings::load().unwrap(); - let state = Server::new(settings.node.network).unwrap(); + let state = Server::new(settings.node().network()).unwrap(); let (_ws_tx, ws_rx) = mpsc::channel(1); tokio::spawn(state.start(ws_rx)); diff --git a/homestar-runtime/src/receipt.rs b/homestar-runtime/src/receipt.rs index 16729db3..0b7a399e 100644 --- a/homestar-runtime/src/receipt.rs +++ b/homestar-runtime/src/receipt.rs @@ -1,6 +1,5 @@ //! Output of an invocation, referenced by its invocation pointer. -use crate::db::schema::receipts; use anyhow::anyhow; use diesel::{ backend::Backend, @@ -42,7 +41,7 @@ const PROOF_KEY: &str = "prf"; /// [Invocation]: homestar_core::workflow::Invocation /// [Instruction]: homestar_core::workflow::Instruction #[derive(Debug, Clone, PartialEq, Queryable, Insertable, Identifiable, Selectable)] -#[diesel(primary_key(cid))] +#[diesel(table_name = crate::db::schema::receipts, primary_key(cid))] pub struct Receipt { cid: Pointer, ran: Pointer, @@ -101,9 +100,9 @@ impl Receipt { /// /// [DagCbor]: DagCborCodec pub fn invocation_capsule( - invocation_receipt: InvocationReceipt, + invocation_receipt: &InvocationReceipt, ) -> anyhow::Result> { - let receipt_ipld = Ipld::from(&invocation_receipt); + let receipt_ipld = Ipld::from(invocation_receipt); let capsule = if let Ipld::Map(mut map) = receipt_ipld { map.insert(VERSION_KEY.into(), consts::INVOCATION_VERSION.into()); Ok(Ipld::Map(BTreeMap::from([( @@ -253,31 +252,25 @@ impl TryFrom for Receipt { fn try_from(ipld: Ipld) -> Result { let map = from_ipld::>(ipld)?; - let cid = from_ipld( map.get(CID_KEY) .ok_or_else(|| anyhow!("missing {CID_KEY}"))? .to_owned(), )?; - let ran = map .get(RAN_KEY) .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? .try_into()?; - let instruction = map .get(INSTRUCTION_KEY) .ok_or_else(|| anyhow!("missing {INSTRUCTION_KEY}"))? .try_into()?; - let out = map .get(OUT_KEY) .ok_or_else(|| anyhow!("missing {OUT_KEY}"))?; - let meta = map .get(METADATA_KEY) .ok_or_else(|| anyhow!("missing {METADATA_KEY}"))?; - let issuer = map .get(ISSUER_KEY) .and_then(|ipld| match ipld { @@ -286,11 +279,9 @@ impl TryFrom for Receipt { }) .and_then(|ipld| from_ipld(ipld.to_owned()).ok()) .map(Issuer::new); - let prf = map .get(PROOF_KEY) .ok_or_else(|| anyhow!("missing {PROOF_KEY}"))?; - let version = from_ipld( map.get(VERSION_KEY) .ok_or_else(|| anyhow!("missing {VERSION_KEY}"))? @@ -357,9 +348,8 @@ mod test { use super::*; use crate::{ db::{schema, Database}, - receipt::receipts, settings::Settings, - test_utils, + test_utils::{self, db::MemoryDb}, }; use diesel::prelude::*; @@ -371,20 +361,27 @@ mod test { assert_eq!(invocation.meta(), &receipt.meta.0); assert_eq!(invocation.issuer(), &receipt.issuer); assert_eq!(invocation.prf(), &receipt.prf); + assert_eq!(invocation.to_cid().unwrap(), receipt.cid()); let output_bytes = DagCborCodec .encode::(&invocation.out().clone().into()) .unwrap(); assert_eq!(output_bytes, receipt.output_encoded().unwrap()); + + let receipt_from_invocation = + Receipt::try_with(receipt.instruction.clone(), &invocation).unwrap(); + assert_eq!(receipt_from_invocation, receipt); + + let invocation_from_receipt = InvocationReceipt::try_from(receipt).unwrap(); + assert_eq!(invocation_from_receipt, invocation); } #[test] fn receipt_sql_roundtrip() { - let mut conn = - test_utils::db::MemoryDb::setup_connection_pool(Settings::load().unwrap().node()) - .unwrap() - .conn() - .unwrap(); + let mut conn = MemoryDb::setup_connection_pool(Settings::load().unwrap().node()) + .unwrap() + .conn() + .unwrap(); let (_, receipt) = test_utils::receipt::receipts(); let rows_inserted = diesel::insert_into(schema::receipts::table) @@ -393,9 +390,7 @@ mod test { .unwrap(); assert_eq!(1, rows_inserted); - - let inserted_receipt = receipts::table.load::(&mut conn).unwrap(); - + let inserted_receipt = schema::receipts::table.load::(&mut conn).unwrap(); assert_eq!(vec![receipt.clone()], inserted_receipt); } diff --git a/homestar-runtime/src/runner.rs b/homestar-runtime/src/runner.rs index 934df874..8e1b30eb 100644 --- a/homestar-runtime/src/runner.rs +++ b/homestar-runtime/src/runner.rs @@ -10,14 +10,17 @@ use crate::{ db::Database, event_handler::{Event, EventHandler}, network::{rpc, swarm}, - Settings, + worker::WorkerMessage, + Settings, Worker, }; -use anyhow::Result; +use anyhow::{anyhow, Context, Result}; +use atomic_refcell::AtomicRefCell; use dashmap::DashMap; +use futures::future::poll_fn; use libipld::Cid; #[cfg(not(test))] use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; +use std::{ops::ControlFlow, rc::Rc, sync::Arc, task::Poll}; use tokio::{ runtime, select, signal::unix::{signal, SignalKind}, @@ -25,14 +28,19 @@ use tokio::{ task::{AbortHandle, JoinHandle}, time, }; -use tokio_util::time::DelayQueue; -use tracing::info; +use tokio_util::time::{delay_queue, DelayQueue}; +use tracing::{error, info, warn}; + +mod error; +pub(crate) mod file; +pub(crate) mod response; +pub(crate) use error::Error; #[cfg(not(test))] const HOMESTAR_THREAD: &str = "homestar-runtime"; /// Type alias for a [DashMap] containing running worker [JoinHandle]s. -pub type RunningWorkerSet = DashMap>>; +pub type RunningWorkerSet = DashMap>, delay_queue::Key)>; /// Type alias for a [DashMap] containing running task [AbortHandle]s. pub type RunningTaskSet = DashMap>; @@ -40,11 +48,21 @@ pub type RunningTaskSet = DashMap>; /// Trait for managing a [DashMap] of running task information. pub trait ModifiedSet { /// Append or insert a new [AbortHandle] into the [RunningTaskSet]. - fn append_or_insert(&mut self, cid: Cid, handles: Vec); + fn append_or_insert(&self, cid: Cid, handles: Vec); } +type RpcSender = mpsc::Sender<( + rpc::ServerMessage, + Option>, +)>; + +type RpcReceiver = mpsc::Receiver<( + rpc::ServerMessage, + Option>, +)>; + impl ModifiedSet for RunningTaskSet { - fn append_or_insert(&mut self, cid: Cid, mut handles: Vec) { + fn append_or_insert(&self, cid: Cid, mut handles: Vec) { self.entry(cid) .and_modify(|prev_handles| { prev_handles.append(&mut handles); @@ -58,15 +76,17 @@ impl ModifiedSet for RunningTaskSet { /// /// [Workflows]: homestar_core::Workflow #[cfg(feature = "websocket-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "websocket-server")))] #[allow(dead_code)] #[derive(Debug)] pub struct Runner { message_buffer_len: usize, event_sender: Arc>, - expiration_queue: DelayQueue, - running_tasks: RunningTaskSet, + expiration_queue: Rc>>, + running_tasks: Arc, running_workers: RunningWorkerSet, runtime: tokio::runtime::Runtime, + settings: Arc, ws_msg_sender: Arc, ws_mpsc_sender: mpsc::Sender, } @@ -81,26 +101,29 @@ pub struct Runner { pub struct Runner { message_buffer_len: usize, event_sender: Arc>, - expiration_queue: DelayQueue, - running_tasks: RunningTaskSet, + expiration_queue: Rc>>, + running_tasks: Arc, running_workers: RunningWorkerSet, runtime: tokio::runtime::Runtime, + settings: Arc, } impl Runner { /// Setup bounded, MPSC channel for top-level RPC communication. - pub(crate) fn setup_channel( + pub(crate) fn setup_rpc_channel(capacity: usize) -> (RpcSender, RpcReceiver) { + mpsc::channel(capacity) + } + + /// Setup bounded, MPSC channel for top-level Worker communication. + pub(crate) fn setup_worker_channel( capacity: usize, - ) -> ( - mpsc::Sender, - mpsc::Receiver, - ) { + ) -> (mpsc::Sender, mpsc::Receiver) { mpsc::channel(capacity) } /// Initialize and start the Homestar [Runner] / runtime. #[cfg(not(test))] - pub fn start(settings: Arc, db: impl Database + 'static) -> Result { + pub fn start(settings: Settings, db: impl Database + 'static) -> Result<()> { let runtime = runtime::Builder::new_multi_thread() .enable_all() .thread_name_fn(|| { @@ -110,61 +133,146 @@ impl Runner { }) .build()?; - let runner = Self::init(settings, db, runtime)?; - - Ok(runner) + Self::init(settings, db.clone(), runtime)?.serve(db) } /// Initialize and start the Homestar [Runner] / runtime. #[cfg(test)] - pub fn start(settings: Arc, db: impl Database + 'static) -> Result { + pub fn start(settings: Settings, db: impl Database + 'static) -> Result { let runtime = runtime::Builder::new_current_thread() .enable_all() .build()?; let runner = Self::init(settings, db, runtime)?; - Ok(runner) } + fn init( + settings: Settings, + db: impl Database + 'static, + runtime: tokio::runtime::Runtime, + ) -> Result { + let swarm = runtime.block_on(swarm::new(settings.node()))?; + let event_handler = EventHandler::new(swarm, db, settings.node()); + let event_sender = event_handler.sender(); + + #[cfg(feature = "ipfs")] + let _event_handler_hdl = runtime.spawn({ + let ipfs = IpfsCli::default(); + event_handler.start(ipfs) + }); + + #[cfg(not(feature = "ipfs"))] + let _event_handler_hdl = runtime.spawn(event_handler.start()); + + #[cfg(feature = "websocket-server")] + { + // Setup websocket communication. + let ws_server = ws::Server::new(settings.node().network())?; + let ws_msg_tx = ws_server.sender(); + + let (ws_tx, ws_rx) = mpsc::channel(settings.node.network.websocket_capacity); + let _ws_hdl = runtime.spawn(ws_server.start(ws_rx)); + + Ok(Self { + message_buffer_len: settings.node.network.events_buffer_len, + event_sender, + expiration_queue: Rc::new(AtomicRefCell::new(DelayQueue::new())), + running_tasks: DashMap::new().into(), + running_workers: DashMap::new(), + runtime, + settings: settings.into(), + ws_msg_sender: ws_msg_tx, + ws_mpsc_sender: ws_tx, + }) + } + + #[cfg(not(feature = "websocket-server"))] + Ok(Self { + message_buffer_len: settings.node.network.events_buffer_len, + event_sender, + expiration_queue: Rc::new(AtomicRefCell::new(DelayQueue::new())), + running_tasks: DashMap::new().into(), + running_workers: DashMap::new(), + runtime, + settings: settings.into(), + }) + } + /// Listen loop for [Runner] signals and messages. - pub fn serve(self, settings: Arc) -> Result<()> { - let (tx, mut rx) = Self::setup_channel(self.message_buffer_len); - let shutdown_timeout = settings.node.shutdown_timeout; - let rpc_server = rpc::Server::new(settings.node.network.clone(), tx.into()); + #[allow(dead_code)] + fn serve(self, db: impl Database + 'static) -> Result<()> { + let (rpc_tx, mut rpc_rx) = Self::setup_rpc_channel(self.message_buffer_len); + let (runner_tx, mut runner_rx) = Self::setup_worker_channel(self.message_buffer_len); + + let shutdown_timeout = self.settings.node.shutdown_timeout; + let rpc_server = rpc::Server::new(self.settings.node.network(), rpc_tx.into()); let rpc_sender = rpc_server.sender(); - self.runtime - .block_on(rpc_server.spawn(self.runtime.handle().clone()))?; + self.runtime.block_on(rpc_server.spawn())?; let shutdown_time_left = self.runtime.block_on(async { + let mut gc_interval = tokio::time::interval(self.settings.node.gc_interval); loop { select! { biased; - // Duplicate inner-shutdown code here, as tokio::select! - // doesn't allow for either-or patterns like matches. - Some(rpc::ServerMessage::ShutdownCmd) = rx.recv() => { - info!("RPC shutdown signal received, shutting down runner"); - let now = time::Instant::now(); - let drain_timeout = now + shutdown_timeout; - select! { - Ok(()) = self.shutdown(rpc_sender) => { - break now.elapsed(); - }, - _ = time::sleep_until(drain_timeout) => { - info!("shutdown timeout reached, shutting down runner anyway"); - break now.elapsed(); - } - } + // Handle RPC messages. + Some((rpc_message, Some(oneshot_tx))) = rpc_rx.recv() => { + let now = time::Instant::now(); + match self.handle_command_message( + rpc_message, + rpc_sender.clone(), + runner_tx.clone(), + db.clone(), + now + ).await { + Ok(ControlFlow::Break(())) => break now.elapsed(), + Ok(ControlFlow::Continue(rpc::ServerMessage::Skip)) => {}, + Ok(ControlFlow::Continue(msg @ rpc::ServerMessage::RunAck(_))) => { + info!("sending message to rpc server"); + let _ = oneshot_tx.send(msg); + }, + Err(err) => { + error!("error handling rpc message: {}", err); + let _ = oneshot_tx.send(rpc::ServerMessage::RunErr(err.into())); + }, + _ => {} + } + } + // Handle messages from the worker. + Some(msg) = runner_rx.recv() => { + match msg { + WorkerMessage::Dropped(cid) => { + let _ = self.abort_worker(cid); + }, + } + } + // Handle GC interval tick. + _ = gc_interval.tick() => { + let _ = self.gc(); + }, + // Handle expired workflows. + Some(expired) = poll_fn( + |ctx| match self.expiration_queue.try_borrow_mut() { + Ok(mut queue) => queue.poll_expired(ctx), + Err(_) => Poll::Pending, + } + ) => { + info!("worker expired, aborting"); + let _ = self.abort_worker(*expired.get_ref()); }, + // Handle shutdown signal. _ = Self::shutdown_signal() => { info!("gracefully shutting down runner"); let now = time::Instant::now(); let drain_timeout = now + shutdown_timeout; + // Sub-select handling of runner `shutdown`. select! { + // Graceful shutdown. Ok(()) = self.shutdown(rpc_sender) => { break now.elapsed(); }, + // Force shutdown upon drain timeout. _ = time::sleep_until(drain_timeout) => { info!("shutdown timeout reached, shutting down runner anyway"); break now.elapsed(); @@ -191,9 +299,15 @@ impl Runner { self.event_sender.clone() } + /// Getter for the [RunningTaskSet], cloned as an [Arc]. + pub fn running_tasks(&self) -> Arc { + self.running_tasks.clone() + } + /// [tokio::sync::broadcast::Sender] for sending messages through the /// webSocket server to subscribers. #[cfg(feature = "websocket-server")] + #[cfg_attr(docsrs, doc(cfg(feature = "websocket-server")))] pub fn ws_msg_sender(&self) -> &ws::Sender { &self.ws_msg_sender } @@ -201,54 +315,71 @@ impl Runner { /// Garbage-collect task [AbortHandle]s in the [RunningTaskSet] and /// workers in the [RunningWorkerSet]. #[allow(dead_code)] - pub(crate) fn gc(&mut self) { + fn gc(&self) -> Result<()> { self.running_tasks.retain(|_cid, handles| { handles.retain(|handle| !handle.is_finished()); !handles.is_empty() }); + let mut expiration_q = self + .expiration_queue + .try_borrow_mut() + .map_err(|e| anyhow!("failed to borrow expiration queue: {e}"))?; + + for worker in self.running_workers.iter_mut() { + let (handle, delay_key) = worker.value(); + if handle.is_finished() { + let _ = expiration_q.try_remove(delay_key); + } + } + self.running_workers - .retain(|_cid, handle| !handle.is_finished()); + .retain(|_cid, (handle, _delay_key)| !handle.is_finished()); + + Ok(()) } - /// Garbage-collect task [AbortHandle]s in the [RunningTaskSet] and a - /// worker's [JoinHandle] in the [RunningWorkerSet] for a specific workflow - /// [Cid], running on a worker. + /// Abort and gc/cleanup all workers and tasks. #[allow(dead_code)] - pub(crate) fn gc_worker(&mut self, cid: Cid) { - if let Some(mut handles) = self.running_tasks.get_mut(&cid) { - handles.retain(|handle| !handle.is_finished()); - } - - self.running_tasks - .retain(|_cid, handles| !handles.is_empty()); + fn abort_and_cleanup_workers(&self) -> Result<()> { + self.abort_workers(); + self.cleanup_workers()?; - if let Some(handle) = self.running_workers.get_mut(&cid) { - if handle.is_finished() { - self.running_workers.remove(&cid); - } - } + Ok(()) } /// Abort all workers. #[allow(dead_code)] - pub(crate) fn abort_workers(&mut self) { - self.running_workers - .iter_mut() - .for_each(|handle| handle.abort()); + fn abort_workers(&self) { + self.running_workers.iter_mut().for_each(|data| { + let (handle, _delay_key) = data.value(); + handle.abort() + }); + self.abort_tasks(); } - /// Abort a specific worker given a [Cid]. + /// Cleanup all workers, tasks, and the expiration queue. #[allow(dead_code)] - pub(crate) fn abort_worker(&mut self, cid: Cid) { - if let Some(handle) = self.running_workers.get_mut(&cid) { - handle.abort() - } + fn cleanup_workers(&self) -> Result<()> { + self.running_workers.clear(); + self.expiration_queue + .try_borrow_mut() + .map_err(|e| anyhow!("failed to borrow expiration queue: {e}"))? + .clear(); + self.cleanup_tasks(); + Ok(()) } - /// Abort all tasks running within all workers. + /// Cleanup all tasks in the [RunningTaskSet]. #[allow(dead_code)] - pub(crate) fn abort_tasks(&mut self) { + fn cleanup_tasks(&self) { + self.running_tasks.clear(); + } + + /// Aborts and garbage-collects a set of task [AbortHandle]s running for all + /// workers. + #[allow(dead_code)] + fn abort_tasks(&self) { self.running_tasks.iter_mut().for_each(|handles| { for abort_handle in &*handles { abort_handle.abort(); @@ -256,10 +387,27 @@ impl Runner { }); } - /// Abort a specific worker's tasks given a [Cid]. + /// Aborts and removes a specific worker's [JoinHandle] and + /// set of task [AbortHandle]s given a [Cid]. #[allow(dead_code)] - pub(crate) fn abort_worker_tasks(&mut self, cid: Cid) { - if let Some(handles) = self.running_tasks.get_mut(&cid) { + fn abort_worker(&self, cid: Cid) -> Result<()> { + let mut expiration_q = self + .expiration_queue + .try_borrow_mut() + .map_err(|e| anyhow!("failed to borrow expiration queue: {e}"))?; + + if let Some((cid, (handle, delay_key))) = self.running_workers.remove(&cid) { + let _ = expiration_q.try_remove(&delay_key); + handle.abort(); + self.abort_worker_tasks(cid); + } + + Ok(()) + } + + /// Abort a specific worker's tasks given a [Cid]. + fn abort_worker_tasks(&self, cid: Cid) { + if let Some((_cid, handles)) = self.running_tasks.remove(&cid) { for abort_handle in &*handles { abort_handle.abort(); } @@ -267,6 +415,7 @@ impl Runner { } /// Captures shutdown signals for [Runner]. + #[allow(dead_code)] async fn shutdown_signal() -> Result<()> { let mut sigint = signal(SignalKind::interrupt())?; let mut sigterm = signal(SignalKind::terminate())?; @@ -281,9 +430,9 @@ impl Runner { } /// Sequence for shutting down a [Runner], including: - /// a) event-handler channels, - /// b) Running workers - /// c) [Runner] channels. + /// a) RPC and runner-related channels. + /// b) Event-handler channels. + /// c) Running workers async fn shutdown( &self, rpc_sender: Arc>, @@ -307,88 +456,121 @@ impl Runner { shutdown_receiver.await?; } - // TODO: shutdown workers + // abort all workers + self.abort_workers(); Ok(()) } - fn init( - settings: Arc, + #[allow(dead_code)] + async fn handle_command_message( + &self, + msg: rpc::ServerMessage, + rpc_sender: Arc>, + runner_sender: mpsc::Sender, db: impl Database + 'static, - runtime: tokio::runtime::Runtime, - ) -> Result { - let swarm = runtime.block_on(swarm::new(settings.node()))?; - - let event_handler = EventHandler::new(swarm, db, settings.node()); - let event_sender = event_handler.sender(); - - #[cfg(feature = "ipfs")] - let _event_handler_hdl = runtime.spawn({ - let ipfs = IpfsCli::default(); - event_handler.start(ipfs) - }); - - #[cfg(not(feature = "ipfs"))] - let _event_handler_hdl = runtime.spawn(event_handler.start()); - - #[cfg(feature = "websocket-server")] - { - // Setup websocket communication. - let ws_server = ws::Server::new(settings.node.network.clone())?; - let ws_msg_tx = ws_server.sender(); + now: time::Instant, + ) -> Result> { + info!("received message: {:?}", msg); + match msg { + rpc::ServerMessage::ShutdownCmd => { + info!("RPC shutdown signal received, shutting down runner"); + + let drain_timeout = now + self.settings.node.shutdown_timeout; + select! { + Ok(()) = self.shutdown(rpc_sender) => { + Ok(ControlFlow::Break(())) + }, + _ = time::sleep_until(drain_timeout) => { + info!("shutdown timeout reached, shutting down runner anyway"); + Ok(ControlFlow::Break(())) + } + } + } + rpc::ServerMessage::Run(workflow_file) => { + let (workflow, workflow_settings) = + workflow_file.validate_and_parse().await.with_context(|| { + format!("failed to validate/parse workflow @ path: {workflow_file}",) + })?; + + #[cfg(feature = "ipfs")] + let ipfs = IpfsCli::default(); + + #[cfg(feature = "ipfs")] + let worker = { + Worker::new( + workflow, + workflow_settings, + self.event_sender(), + runner_sender, + db.clone(), + ipfs, + ) + .await? + }; + + #[cfg(not(feature = "ipfs"))] + let worker = Worker::new( + workflow, + workflow_settings, + self.event_sender(), + runner_sender.into(), + db.clone(), + ) + .await?; - let (ws_tx, ws_rx) = mpsc::channel(settings.node.network.websocket_capacity); - let _ws_hdl = runtime.spawn(ws_server.start(ws_rx)); + // Deliberate use of Arc::clone for readability, could just be + // `clone`, as the underlying type is an `Arc`. + let initial_info = Arc::clone(&worker.workflow_info); + let workflow_timeout = worker.workflow_settings.timeout; + let timestamp = worker.workflow_started; - Ok(Self { - message_buffer_len: settings.node.network.events_buffer_len, - event_sender, - expiration_queue: DelayQueue::new(), - running_tasks: DashMap::new(), - running_workers: DashMap::new(), - runtime, - ws_msg_sender: ws_msg_tx, - ws_mpsc_sender: ws_tx, - }) + // Spawn worker, which schedules execution graph and runs it. + info!( + cid = worker.workflow_info.cid.to_string(), + "running workflow with settings: {:#?}", worker.workflow_settings + ); + let handle = self.runtime.spawn(worker.run(self.running_tasks())); + + // Add Cid to expirations timing wheel + let delay_key = self + .expiration_queue + .try_borrow_mut() + .map_err(|e| anyhow!("failed to borrow expiration queue: {e}"))? + .insert(initial_info.cid, workflow_timeout); + + // Insert handle into running workers map + self.running_workers + .insert(initial_info.cid, (handle, delay_key)); + + Ok(ControlFlow::Continue(rpc::ServerMessage::RunAck( + response::AckWorkflow::new(initial_info, timestamp), + ))) + } + msg => { + warn!("received unexpected message: {:?}", msg); + Ok(ControlFlow::Continue(rpc::ServerMessage::Skip)) + } } - - #[cfg(not(feature = "websocket-server"))] - Ok(Self { - message_buffer_len: settings.node.network.events_buffer_len, - event_sender, - expiration_queue: DelayQueue::new(), - running_tasks: DashMap::new(), - running_workers: DashMap::new(), - runtime, - }) } } #[cfg(test)] mod test { use super::*; - use crate::network::rpc::Client; - use homestar_core::test_utils; + use crate::{network::rpc::Client, test_utils::WorkerBuilder}; + use homestar_core::test_utils as core_test_utils; use rand::thread_rng; use std::net::SocketAddr; + use tarpc::context; use tokio::net::TcpStream; - fn setup() -> (Runner, Settings) { - let mut settings = Settings::load().unwrap(); - settings.node.network.websocket_port = test_utils::ports::get_port() as u16; - settings.node.network.rpc_port = test_utils::ports::get_port() as u16; - let db = crate::test_utils::db::MemoryDb::setup_connection_pool(&settings.node).unwrap(); - - let runner = Runner::start(settings.clone().into(), db).unwrap(); - (runner, settings) - } - - #[test] + #[homestar_runtime_proc_macro::runner_test] fn shutdown() { - let (runner, settings) = setup(); + let TestRunner { runner, settings } = TestRunner::start(); - let (tx, _rx) = Runner::setup_channel(1); - let rpc_server = rpc::Server::new(settings.node.network.clone(), Arc::new(tx)); + let (tx, _rx) = Runner::setup_rpc_channel(1); + let rpc_server = rpc::Server::new(settings.node.network(), Arc::new(tx)); let rpc_sender = rpc_server.sender(); let addr = SocketAddr::new( @@ -397,10 +579,7 @@ mod test { ); runner.runtime.block_on(async { - rpc_server - .spawn(runner.runtime.handle().clone()) - .await - .unwrap(); + rpc_server.spawn().await.unwrap(); let _stream = TcpStream::connect(addr).await.expect("Connection error"); let _another_stream = TcpStream::connect(addr).await.expect("Connection error"); @@ -430,17 +609,14 @@ mod test { }); } - #[test] + #[homestar_runtime_proc_macro::runner_test] fn spawn_rpc_server_and_ping() { - let (runner, settings) = setup(); + let TestRunner { runner, settings } = TestRunner::start(); - let (tx, _rx) = Runner::setup_channel(1); - let rpc_server = rpc::Server::new(settings.node.network.clone(), Arc::new(tx)); + let (tx, _rx) = Runner::setup_rpc_channel(1); + let rpc_server = rpc::Server::new(settings.node.network(), tx.into()); - runner - .runtime - .block_on(rpc_server.spawn(runner.runtime.handle().clone())) - .unwrap(); + runner.runtime.block_on(rpc_server.spawn()).unwrap(); runner.runtime.spawn(async move { let addr = SocketAddr::new( @@ -448,22 +624,137 @@ mod test { settings.node.network.rpc_port, ); - let client = Client::new(addr).await.unwrap(); + let client = Client::new(addr, context::current()).await.unwrap(); let response = client.ping().await.unwrap(); assert_eq!(response, "pong".to_string()); }); } - #[test] + #[homestar_runtime_proc_macro::runner_test] + fn abort_all_workers() { + let TestRunner { runner, settings } = TestRunner::start(); + + runner.runtime.block_on(async { + let worker = WorkerBuilder::new(settings.node).build().await; + let workflow_cid = worker.workflow_info.cid; + let workflow_timeout = worker.workflow_settings.timeout; + let handle = runner.runtime.spawn(worker.run(runner.running_tasks())); + let delay_key = runner + .expiration_queue + .try_borrow_mut() + .unwrap() + .insert(workflow_cid, workflow_timeout); + runner + .running_workers + .insert(workflow_cid, (handle, delay_key)); + }); + + runner.abort_workers(); + runner.runtime.block_on(async { + for (_, (handle, _)) in runner.running_workers { + assert!(!handle.is_finished()); + assert!(handle.await.unwrap_err().is_cancelled()); + } + }); + runner.running_tasks.iter().for_each(|handles| { + for handle in &*handles { + assert!(handle.is_finished()); + } + }); + } + + #[homestar_runtime_proc_macro::runner_test] + fn abort_and_cleanup_all_workers() { + let TestRunner { runner, settings } = TestRunner::start(); + + runner.runtime.block_on(async { + let worker = WorkerBuilder::new(settings.node).build().await; + let workflow_cid = worker.workflow_info.cid; + let workflow_timeout = worker.workflow_settings.timeout; + let handle = runner.runtime.spawn(worker.run(runner.running_tasks())); + let delay_key = runner + .expiration_queue + .try_borrow_mut() + .unwrap() + .insert(workflow_cid, workflow_timeout); + runner + .running_workers + .insert(workflow_cid, (handle, delay_key)); + }); + + runner.abort_and_cleanup_workers().unwrap(); + assert!(runner.running_workers.is_empty()); + assert!(runner.running_tasks.is_empty()); + } + + #[homestar_runtime_proc_macro::runner_test] + fn gc_while_workers_still_running() { + let TestRunner { runner, settings } = TestRunner::start(); + + runner.runtime.block_on(async { + let worker = WorkerBuilder::new(settings.node).build().await; + let workflow_cid = worker.workflow_info.cid; + let workflow_timeout = worker.workflow_settings.timeout; + let handle = runner.runtime.spawn(worker.run(runner.running_tasks())); + let delay_key = runner + .expiration_queue + .try_borrow_mut() + .unwrap() + .insert(workflow_cid, workflow_timeout); + + runner + .running_workers + .insert(workflow_cid, (handle, delay_key)); + }); + + runner.gc().unwrap(); + assert!(!runner.running_workers.is_empty()); + + runner.runtime.block_on(async { + for (_, (handle, _)) in runner.running_workers { + assert!(!handle.is_finished()); + let _ = handle.await.unwrap(); + } + }); + + runner.running_tasks.iter().for_each(|handles| { + for handle in &*handles { + assert!(handle.is_finished()); + } + }); + + assert!(!runner.running_tasks.is_empty()); + assert!(!runner.expiration_queue.try_borrow_mut().unwrap().is_empty()); + } + + #[homestar_runtime_proc_macro::runner_test] + fn gc_while_workers_finished() { + let TestRunner { runner, settings } = TestRunner::start(); + + runner.runtime.block_on(async { + let worker = WorkerBuilder::new(settings.node).build().await; + let _ = worker.run(runner.running_tasks()).await; + }); + + runner.running_tasks.iter().for_each(|handles| { + for handle in &*handles { + assert!(handle.is_finished()); + } + }); + + runner.gc().unwrap(); + assert!(runner.running_tasks.is_empty()); + } + + #[homestar_runtime_proc_macro::runner_test] fn abort_all_tasks() { - let (mut runner, _) = setup(); + let TestRunner { runner, .. } = TestRunner::start(); let mut set = tokio::task::JoinSet::new(); - runner.runtime.block_on(async { for i in 0..3 { let handle = set.spawn(async move { i }); runner.running_tasks.append_or_insert( - test_utils::cid::generate_cid(&mut thread_rng()), + core_test_utils::cid::generate_cid(&mut thread_rng()), vec![handle], ); } @@ -472,21 +763,19 @@ mod test { }); runner.abort_tasks(); - assert!(!runner.running_tasks.is_empty()); - - runner.gc(); + runner.cleanup_tasks(); assert!(runner.running_tasks.is_empty()); } - #[test] + #[homestar_runtime_proc_macro::runner_test] fn abort_one_task() { - let (mut runner, _) = setup(); + let TestRunner { runner, .. } = TestRunner::start(); let mut set = tokio::task::JoinSet::new(); let mut cids = vec![]; runner.runtime.block_on(async { for i in 0..3 { let handle = set.spawn(async move { i }); - let cid = test_utils::cid::generate_cid(&mut thread_rng()); + let cid = core_test_utils::cid::generate_cid(&mut thread_rng()); runner.running_tasks.append_or_insert(cid, vec![handle]); cids.push(cid); } @@ -494,10 +783,8 @@ mod test { while set.join_next().await.is_some() {} }); - runner.abort_worker_tasks(cids[0]); assert!(runner.running_tasks.len() == 3); - - runner.gc_worker(cids[0]); + runner.abort_worker_tasks(cids[0]); assert!(runner.running_tasks.len() == 2); } } diff --git a/homestar-runtime/src/runner/error.rs b/homestar-runtime/src/runner/error.rs new file mode 100644 index 00000000..55740427 --- /dev/null +++ b/homestar-runtime/src/runner/error.rs @@ -0,0 +1,22 @@ +//! Error types related to the Homestar runtime/[Runner]. +//! +//! [Runner]: crate::Runner + +use std::io; + +/// Error types related to running [Workflow]s and other runtime +/// components. +/// +/// [Workflow]: homestar_core::Workflow +#[derive(thiserror::Error, Debug)] +pub(crate) enum Error { + /// Unsupported workflow type. + #[error("unsupported workflow file type: {0}")] + UnsupportedWorkflow(String), + /// Propagated IO error. + #[error("error reading data: {0}")] + Io(#[from] io::Error), + /// Propagated, general runtime error. + #[error(transparent)] + Runtime(#[from] anyhow::Error), +} diff --git a/homestar-runtime/src/runner/file.rs b/homestar-runtime/src/runner/file.rs new file mode 100644 index 00000000..761691f4 --- /dev/null +++ b/homestar-runtime/src/runner/file.rs @@ -0,0 +1,118 @@ +//! File configuration for [Workflow]s. +//! +//! [Workflow]: homestar_core::Workflow + +use super::Error; +use crate::workflow; +use homestar_core::{ipld::DagJson, Workflow}; +use homestar_wasm::io::Arg; +use serde::{Deserialize, Serialize}; +use std::{ffi::OsStr, fmt, path::PathBuf, str::FromStr}; +use tokio::fs; + +/// Data structure for a workflow file path. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReadWorkflow { + /// Workflow file to run. + file: PathBuf, +} + +impl FromStr for ReadWorkflow { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(Self { + file: s.parse().map_err(|e| format!("{e}"))?, + }) + } +} + +impl fmt::Display for ReadWorkflow { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.file) + } +} + +impl ReadWorkflow { + /// Validate and parse the workflow file. + /// + /// Validation is currently limited to checking the file extension, + /// or attempting to treat the file as JSON if no extension is provided. + pub(crate) async fn validate_and_parse<'a>( + &self, + ) -> Result<(Workflow<'a, Arg>, workflow::Settings), Error> { + match self.file.extension().and_then(OsStr::to_str) { + None | Some("json") => { + let data = fs::read_to_string(&self.file.canonicalize()?).await?; + // TODO: Parse this from the workflow file + let workflow_settings = workflow::Settings::default(); + Ok(( + DagJson::from_json_string(data).map_err(anyhow::Error::new)?, + workflow_settings, + )) + } + + Some(ext) => Err(Error::UnsupportedWorkflow(ext.to_string())), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use homestar_core::{ + test_utils::workflow as workflow_test_utils, + workflow::{config::Resources, instruction::RunInstruction, prf::UcanPrf, Task}, + }; + + #[tokio::test] + async fn validate_and_parse_workflow() { + let path = PathBuf::from("./fixtures/test.json"); + let config = Resources::default(); + let (instruction1, instruction2, _) = + workflow_test_utils::related_wasm_instructions::(); + + let task1 = Task::new( + RunInstruction::Expanded(instruction1.clone()), + config.clone().into(), + UcanPrf::default(), + ); + + let task2 = Task::new( + RunInstruction::Expanded(instruction2.clone()), + config.into(), + UcanPrf::default(), + ); + + let workflow = Workflow::new(vec![task1, task2]); + + workflow.to_file(path.display().to_string()).unwrap(); + let workflow_file = ReadWorkflow { file: path.clone() }; + + let (validated_workflow, _settings) = workflow_file.validate_and_parse().await.unwrap(); + + assert_eq!(workflow, validated_workflow); + + // rename file extension + fs::rename(path, "./fixtures/test.txt").await.unwrap(); + let new_path = PathBuf::from("./fixtures/test.txt"); + let workflow_file = ReadWorkflow { + file: new_path.clone(), + }; + let error = workflow_file.validate_and_parse().await; + assert_eq!( + error.unwrap_err().to_string(), + "unsupported workflow file type: txt" + ); + + // rename to no file extension + fs::rename(new_path, "./fixtures/test_fam").await.unwrap(); + let new_path = PathBuf::from("./fixtures/test_fam"); + let workflow_file = ReadWorkflow { + file: new_path.clone(), + }; + let (newly_validated_workflow, _settings) = + workflow_file.validate_and_parse().await.unwrap(); + assert_eq!(workflow, newly_validated_workflow); + } +} diff --git a/homestar-runtime/src/runner/response.rs b/homestar-runtime/src/runner/response.rs new file mode 100644 index 00000000..7020c076 --- /dev/null +++ b/homestar-runtime/src/runner/response.rs @@ -0,0 +1,101 @@ +//! Responses for display/return to the user for +//! Client requests. + +use crate::{ + cli::show::{self, ApplyStyle}, + workflow::{self, IndexedResources}, +}; +use chrono::NaiveDateTime; +use libipld::Cid; +use serde::{Deserialize, Serialize}; +use std::{fmt, net::SocketAddr, sync::Arc}; +use tabled::{ + col, + settings::{object::Rows, Format, Modify}, + Table, Tabled, +}; + +/// Workflow information specified for response / display upon +/// acknowledgement of running a workflow. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Tabled)] +pub struct AckWorkflow { + pub(crate) cid: Cid, + pub(crate) num_tasks: u32, + #[tabled(skip)] + pub(crate) progress: Vec, + pub(crate) progress_count: u32, + #[tabled(skip)] + pub(crate) resources: IndexedResources, + pub(crate) timestamp: String, +} + +impl fmt::Display for AckWorkflow { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "cid: {}, progress: {}/{}, timestamp: {}", + self.cid, self.progress_count, self.num_tasks, self.timestamp + ) + } +} + +impl AckWorkflow { + /// Workflow information for response / display. + pub(crate) fn new(workflow_info: Arc, timestamp: NaiveDateTime) -> Self { + Self { + cid: workflow_info.cid, + num_tasks: workflow_info.num_tasks, + progress: workflow_info.progress.clone(), + progress_count: workflow_info.progress_count, + resources: workflow_info.resources.clone(), + timestamp: timestamp.format("%Y-%m-%d %H:%M:%S").to_string(), + } + } +} + +impl show::ConsoleTable for AckWorkflow { + fn table(&self) -> show::Output { + show::Output::new(Table::new(vec![self]).to_string()) + } + + fn echo_table(&self) -> Result<(), std::io::Error> { + let table = self.table(); + let mut resource_table = Table::new( + self.resources + .rscs() + .map(|v| v.to_string()) + .collect::>(), + ); + + resource_table + .with(Modify::new(Rows::first()).with(Format::content(|_s| "Resources".to_string()))); + + let tbl = col![table, resource_table].default(); + + tbl.echo() + } +} + +/// Ping response for display. +#[derive(Tabled)] +pub(crate) struct Ping { + address: SocketAddr, + response: String, +} + +impl Ping { + /// Create a new [Ping] response. + pub(crate) fn new(address: SocketAddr, response: String) -> Self { + Self { address, response } + } +} + +impl show::ConsoleTable for Ping { + fn table(&self) -> show::Output { + Table::new(vec![&self]).default() + } + + fn echo_table(&self) -> Result<(), std::io::Error> { + self.table().echo() + } +} diff --git a/homestar-runtime/src/scheduler.rs b/homestar-runtime/src/scheduler.rs index cc0e0c1d..f8b23d8d 100644 --- a/homestar-runtime/src/scheduler.rs +++ b/homestar-runtime/src/scheduler.rs @@ -12,12 +12,12 @@ use crate::{ Event, }, network::swarm::CapsuleTag, - workflow::{self, Builder, Resource, Vertex}, + workflow::{self, Builder, IndexedResources, Resource, Vertex}, Db, }; -use anyhow::Result; +use anyhow::{anyhow, Result}; use dagga::Node; -use futures::future::BoxFuture; +use futures::future::LocalBoxFuture; use homestar_core::{ workflow::{InstructionResult, LinkMap, Pointer}, Workflow, @@ -27,7 +27,7 @@ use indexmap::IndexMap; use libipld::Cid; use std::{ops::ControlFlow, str::FromStr, sync::Arc, time::Instant}; use tokio::sync::{mpsc, RwLock}; -use tracing::debug; +use tracing::info; type Schedule<'a> = Vec, usize>>>; @@ -44,7 +44,7 @@ pub(crate) struct ExecutionGraph<'a> { /// Vector of [resources] to fetch for executing functions in [Workflow]. /// /// [resources]: Resource - pub(crate) resources: Vec, + pub(crate) indexed_resources: IndexedResources, } /// Scheduler for a series of tasks, including what's run, @@ -71,47 +71,70 @@ pub(crate) struct TaskScheduler<'a> { pub(crate) resume_step: Option, /// Resources that tasks within a [Workflow] rely on, retrieved - /// through the IPFS Client, or over HTTP, or thorugh the DHT directly - /// ahead-of-time. + /// through over the network, ahead-of-time. /// - /// This is transferred from the [ExecutionGraph] for actually executing the - /// schedule. + /// This is transferred from the [ExecutionGraph] for executing the + /// schedule by a worker. pub(crate) resources: IndexMap>, } +/// Scheduler context containing the a schedule for executing tasks +/// and a map of [IndexedResources]. +pub(crate) struct SchedulerContext<'a> { + /// Scheduler for a series of tasks, including what's run. + pub(crate) scheduler: TaskScheduler<'a>, + /// Map of instructions => resources, for a [Workflow]. + pub(crate) indexed_resources: IndexedResources, +} + impl<'a> TaskScheduler<'a> { - /// Initialize Task Scheduler, given [Receipt] cache. + /// Initialize Task Scheduler for a [Workflow] + /// + /// The scheduler will attempt to find already-executed tasks (via [Receipts]) + /// either in the database or through a [Swarm]/DHT query on a short(er) + /// timeout. /// - /// [Receipt]: crate::Receipt + /// [Receipts]: crate::Receipt + /// [Swarm]: crate::network::swarm + /// [Workflow]: homestar_core::Workflow + #[allow(unknown_lints, clippy::needless_pass_by_ref_mut)] pub(crate) async fn init( workflow: Workflow<'a, Arg>, + workflow_cid: Cid, settings: Arc, event_sender: Arc>, conn: &mut Connection, fetch_fn: F, - ) -> Result> + ) -> Result> where - F: FnOnce(Vec) -> BoxFuture<'a, Result>>>, + F: FnOnce(Vec) -> LocalBoxFuture<'a, Result>>>, { let builder = Builder::new(workflow); let graph = builder.graph()?; let mut schedule = graph.schedule; let schedule_length = schedule.len(); - let fetched = fetch_fn(graph.resources).await?; - + let mut resources_to_fetch: Vec = vec![]; let resume = schedule .iter() .enumerate() .rev() .try_for_each(|(idx, vec)| { let folded_pointers = vec.iter().try_fold(vec![], |mut ptrs, node| { - ptrs.push(Pointer::new(Cid::from_str(node.name())?)); + let cid = Cid::from_str(node.name())?; + graph + .indexed_resources + .get(&cid) + .map(|resource| { + resources_to_fetch.push(resource.to_owned()); + ptrs.push(Pointer::new(cid)); + }) + .ok_or_else(|| anyhow!("resource not found for instruction {cid}"))?; Ok::<_, anyhow::Error>(ptrs) }); if let Ok(pointers) = folded_pointers { let pointers_len = pointers.len(); - match Db::find_instructions(&pointers, conn) { + match Db::find_instruction_pointers(&pointers, conn) { Ok(found) => { let linkmap = found.iter().fold( LinkMap::>::new(), @@ -132,7 +155,7 @@ impl<'a> TaskScheduler<'a> { } } Err(_) => { - debug!("receipt not available in the database"); + info!("receipt not available in the database"); let (tx, rx) = BoundedChannel::with(pointers_len); for ptr in &pointers { let _ = event_sender.try_send(Event::FindRecord( @@ -147,7 +170,11 @@ impl<'a> TaskScheduler<'a> { { if pointers.contains(&Pointer::new(found.cid())) { if let Ok(cid) = found.instruction().try_into() { - let _ = linkmap.insert(cid, found.output_as_arg()); + let stored_receipt = + Db::commit_receipt(workflow_cid, found.clone(), conn) + .unwrap_or(found); + + let _ = linkmap.insert(cid, stored_receipt.output_as_arg()); counter += 1; } } @@ -167,6 +194,8 @@ impl<'a> TaskScheduler<'a> { } }); + let fetched = fetch_fn(resources_to_fetch).await?; + match resume { ControlFlow::Break((idx, linkmap)) => { let pivot = schedule.split_off(idx); @@ -176,20 +205,26 @@ impl<'a> TaskScheduler<'a> { Some(idx) }; - Ok(Self { - linkmap: Arc::new(linkmap.into()), - ran: Some(schedule), - run: pivot, - resume_step: step, - resources: fetched, + Ok(SchedulerContext { + scheduler: Self { + linkmap: Arc::new(linkmap.into()), + ran: Some(schedule), + run: pivot, + resume_step: step, + resources: fetched, + }, + indexed_resources: graph.indexed_resources, }) } - _ => Ok(Self { - linkmap: Arc::new(LinkMap::>::new().into()), - ran: None, - run: schedule, - resume_step: None, - resources: fetched, + _ => Ok(SchedulerContext { + scheduler: Self { + linkmap: Arc::new(LinkMap::>::new().into()), + ran: None, + run: schedule, + resume_step: None, + resources: fetched, + }, + indexed_resources: graph.indexed_resources, }), } } @@ -198,7 +233,11 @@ impl<'a> TaskScheduler<'a> { #[cfg(test)] mod test { use super::*; - use crate::{db::Database, settings::Settings, test_utils, workflow as wf, Receipt}; + use crate::{ + db::Database, + test_utils::{self, db::MemoryDb}, + workflow as wf, Receipt, + }; use futures::FutureExt; use homestar_core::{ ipld::DagCbor, @@ -210,34 +249,46 @@ mod test { }; use libipld::Ipld; - #[tokio::test] - async fn initialize_task_scheduler() { + #[homestar_runtime_proc_macro::db_async_test] + fn initialize_task_scheduler() { + let settings = TestSettings::load(); let config = Resources::default(); let (instruction1, instruction2, _) = workflow_test_utils::related_wasm_instructions::(); let task1 = Task::new( - RunInstruction::Expanded(instruction1), + RunInstruction::Expanded(instruction1.clone()), config.clone().into(), UcanPrf::default(), ); let task2 = Task::new( - RunInstruction::Expanded(instruction2), + RunInstruction::Expanded(instruction2.clone()), config.into(), UcanPrf::default(), ); - let settings = Settings::load().unwrap(); - let db = test_utils::db::MemoryDb::setup_connection_pool(Settings::load().unwrap().node()) - .unwrap(); + let db = MemoryDb::setup_connection_pool(&settings.node).unwrap(); let mut conn = db.conn().unwrap(); let workflow = Workflow::new(vec![task1.clone(), task2.clone()]); + let workflow_cid = workflow.clone().to_cid().unwrap(); let workflow_settings = wf::Settings::default(); - let fetch_fn = |_rscs: Vec| { async { Ok(IndexMap::default()) } }.boxed(); + let fetch_fn = |_rscs: Vec| { + { + async { + let mut index_map = IndexMap::new(); + index_map.insert(Resource::Url(instruction1.resource().to_owned()), vec![]); + index_map.insert(Resource::Url(instruction2.resource().to_owned()), vec![]); + + Ok(index_map) + } + } + .boxed_local() + }; - let (tx, mut _rx) = test_utils::event::setup_channel(settings); + let (tx, mut _rx) = test_utils::event::setup_event_channel(settings.node); - let scheduler = TaskScheduler::init( + let scheduler_ctx = TaskScheduler::init( workflow, + workflow_cid, workflow_settings.into(), tx.into(), &mut conn, @@ -246,14 +297,17 @@ mod test { .await .unwrap(); - assert!(scheduler.linkmap.read().await.is_empty()); - assert!(scheduler.ran.is_none()); - assert_eq!(scheduler.run.len(), 2); - assert_eq!(scheduler.resume_step, None); + let ctx = scheduler_ctx.scheduler; + + assert!(ctx.linkmap.read().await.is_empty()); + assert!(ctx.ran.is_none()); + assert_eq!(ctx.run.len(), 2); + assert_eq!(ctx.resume_step, None); } - #[tokio::test] - async fn initialize_task_scheduler_with_receipted_instruction() { + #[homestar_runtime_proc_macro::db_async_test] + fn initialize_task_scheduler_with_receipted_instruction() { + let settings = TestSettings::load(); let config = Resources::default(); let (instruction1, instruction2, _) = workflow_test_utils::related_wasm_instructions::(); @@ -263,7 +317,7 @@ mod test { UcanPrf::default(), ); let task2 = Task::new( - RunInstruction::Expanded(instruction2), + RunInstruction::Expanded(instruction2.clone()), config.into(), UcanPrf::default(), ); @@ -281,24 +335,33 @@ mod test { ) .unwrap(); - let settings = Settings::load().unwrap(); - let db = test_utils::db::MemoryDb::setup_connection_pool(Settings::load().unwrap().node()) - .unwrap(); + let db = MemoryDb::setup_connection_pool(&settings.node).unwrap(); let mut conn = db.conn().unwrap(); - - let stored_receipt = - test_utils::db::MemoryDb::store_receipt(receipt.clone(), &mut conn).unwrap(); + let stored_receipt = MemoryDb::store_receipt(receipt.clone(), &mut conn).unwrap(); assert_eq!(receipt, stored_receipt); let workflow = Workflow::new(vec![task1.clone(), task2.clone()]); + let workflow_cid = workflow.clone().to_cid().unwrap(); let workflow_settings = wf::Settings::default(); - let fetch_fn = |_rscs: Vec| { async { Ok(IndexMap::default()) } }.boxed(); + let fetch_fn = |_rscs: Vec| { + { + async { + let mut index_map = IndexMap::new(); + index_map.insert(Resource::Url(instruction1.resource().to_owned()), vec![]); + index_map.insert(Resource::Url(instruction2.resource().to_owned()), vec![]); + + Ok(index_map) + } + } + .boxed_local() + }; - let (tx, mut _rx) = test_utils::event::setup_channel(settings); + let (tx, mut _rx) = test_utils::event::setup_event_channel(settings.node); - let scheduler = TaskScheduler::init( + let scheduler_ctx = TaskScheduler::init( workflow, + workflow_cid, workflow_settings.into(), tx.into(), &mut conn, @@ -307,21 +370,23 @@ mod test { .await .unwrap(); - let ran = scheduler.ran.as_ref().unwrap(); + let ctx = scheduler_ctx.scheduler; + let ran = ctx.ran.as_ref().unwrap(); - assert_eq!(scheduler.linkmap.read().await.len(), 1); - assert!(scheduler + assert_eq!(ctx.linkmap.read().await.len(), 1); + assert!(ctx .linkmap .read() .await - .contains_key(&instruction1.to_cid().unwrap())); + .contains_key(&instruction1.clone().to_cid().unwrap())); assert_eq!(ran.len(), 1); - assert_eq!(scheduler.run.len(), 1); - assert_eq!(scheduler.resume_step, Some(1)); + assert_eq!(ctx.run.len(), 1); + assert_eq!(ctx.resume_step, Some(1)); } - #[tokio::test] - async fn initialize_task_scheduler_with_all_receipted_instruction() { + #[homestar_runtime_proc_macro::db_async_test] + fn initialize_task_scheduler_with_all_receipted_instruction() { + let settings = TestSettings::load(); let config = Resources::default(); let (instruction1, instruction2, _) = workflow_test_utils::related_wasm_instructions::(); @@ -364,24 +429,29 @@ mod test { ) .unwrap(); - let settings = Settings::load().unwrap(); - let db = test_utils::db::MemoryDb::setup_connection_pool(Settings::load().unwrap().node()) - .unwrap(); + let db = MemoryDb::setup_connection_pool(&settings.node).unwrap(); let mut conn = db.conn().unwrap(); - - let rows_inserted = - test_utils::db::MemoryDb::store_receipts(vec![receipt1, receipt2], &mut conn).unwrap(); - + let rows_inserted = MemoryDb::store_receipts(vec![receipt1, receipt2], &mut conn).unwrap(); assert_eq!(2, rows_inserted); let workflow = Workflow::new(vec![task1.clone(), task2.clone()]); + let workflow_cid = workflow.clone().to_cid().unwrap(); let workflow_settings = wf::Settings::default(); - let fetch_fn = |_rscs: Vec| { async { Ok(IndexMap::default()) } }.boxed(); + let fetch_fn = |_rscs: Vec| { + async { + let mut index_map = IndexMap::new(); + index_map.insert(Resource::Url(instruction1.resource().to_owned()), vec![]); + index_map.insert(Resource::Url(instruction2.resource().to_owned()), vec![]); + Ok(index_map) + } + .boxed_local() + }; - let (tx, mut _rx) = test_utils::event::setup_channel(settings); + let (tx, mut _rx) = test_utils::event::setup_event_channel(settings.node); - let scheduler = TaskScheduler::init( + let scheduler_ctx = TaskScheduler::init( workflow, + workflow_cid, workflow_settings.into(), tx.into(), &mut conn, @@ -390,21 +460,22 @@ mod test { .await .unwrap(); - let ran = scheduler.ran.as_ref().unwrap(); + let ctx = scheduler_ctx.scheduler; + let ran = ctx.ran.as_ref().unwrap(); - assert_eq!(scheduler.linkmap.read().await.len(), 1); - assert!(!scheduler + assert_eq!(ctx.linkmap.read().await.len(), 1); + assert!(!ctx .linkmap .read() .await - .contains_key(&instruction1.to_cid().unwrap())); - assert!(scheduler + .contains_key(&instruction1.clone().to_cid().unwrap())); + assert!(ctx .linkmap .read() .await - .contains_key(&instruction2.to_cid().unwrap())); + .contains_key(&instruction2.clone().to_cid().unwrap())); assert_eq!(ran.len(), 2); - assert!(scheduler.run.is_empty()); - assert_eq!(scheduler.resume_step, None); + assert!(ctx.run.is_empty()); + assert_eq!(ctx.resume_step, None); } } diff --git a/homestar-runtime/src/settings.rs b/homestar-runtime/src/settings.rs index a569c5b7..58c8b79e 100644 --- a/homestar-runtime/src/settings.rs +++ b/homestar-runtime/src/settings.rs @@ -1,23 +1,20 @@ //! Settings / Configuration. -use anyhow::{anyhow, Context}; use config::{Config, ConfigError, Environment, File}; use http::Uri; -use libp2p::{identity, identity::secp256k1}; -use rand::{Rng, SeedableRng}; -use sec1::der::Decode; -use serde::Deserialize; -use serde_with::{base64::Base64, serde_as, DisplayFromStr, DurationSeconds}; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DisplayFromStr, DurationSeconds}; use std::{ - io::Read, net::{IpAddr, Ipv6Addr}, - path::{Path, PathBuf}, + path::PathBuf, time::Duration, }; -use tracing::info; + +mod pubkey_config; +pub(crate) use pubkey_config::PubkeyConfig; /// Application settings. -#[derive(Clone, Debug, Deserialize, PartialEq)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct Settings { pub(crate) monitoring: Monitoring, pub(crate) node: Node, @@ -36,7 +33,7 @@ impl Settings { } /// Process monitoring settings. -#[derive(Clone, Debug, Deserialize, PartialEq)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct Monitoring { /// Monitoring collection interval. #[allow(dead_code)] @@ -45,12 +42,19 @@ pub struct Monitoring { /// Server settings. #[serde_as] -#[derive(Clone, Debug, Default, Deserialize, PartialEq)] +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] pub struct Node { + /// Network settings. #[serde(default)] pub(crate) network: Network, + /// Database settings. #[serde(default)] pub(crate) db: Database, + /// Garbage collection interval. + #[serde_as(as = "DurationSeconds")] + #[serde(default = "default_gc_interval")] + pub(crate) gc_interval: Duration, + /// Shutdown timeout. #[serde_as(as = "DurationSeconds")] #[serde(default = "default_shutdown_timeout")] pub(crate) shutdown_timeout: Duration, @@ -58,7 +62,7 @@ pub struct Node { /// Network-related settings for a homestar node. #[serde_as] -#[derive(Clone, Debug, Deserialize, PartialEq)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] #[serde(default)] pub struct Network { /// Buffer-length for event(s) / command(s) channels. @@ -89,6 +93,9 @@ pub struct Network { pub(crate) rpc_max_connections: usize, /// RPC-server port. pub(crate) rpc_port: u16, + #[serde_as(as = "DurationSeconds")] + /// RPC-server timeout. + pub(crate) rpc_server_timeout: Duration, /// Transport connection timeout. #[serde_as(as = "DurationSeconds")] pub(crate) transport_connection_timeout: Duration, @@ -109,59 +116,28 @@ pub struct Network { } /// Database-related settings for a homestar node. -#[derive(Clone, Debug, Deserialize, PartialEq)] +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] #[serde(default)] pub(crate) struct Database { + /// Database Url provided within the configuration file. + /// + /// Note: This is not used if the `DATABASE_URL` environment variable + /// is set. + #[serde_as(as = "Option")] + pub(crate) url: Option, /// Maximum number of connections managed by the [pool]. /// /// [pool]: crate::db::Pool pub(crate) max_pool_size: u32, } -#[derive(Clone, Debug, Deserialize, PartialEq)] -/// Configure how the Network keypair is generated or using an existing one -pub(crate) enum PubkeyConfig { - #[serde(rename = "random")] - Random, - /// Seed string should be a base64 encoded 32 bytes. This is used as the RNG seed to generate a ed25519 key. - #[serde(rename = "random_seed")] - GenerateFromSeed(PupkeyRNGSeed), - /// File path to a PEM encoded ed25519 key - #[serde(rename = "existing")] - Existing(ExistingKeyPath), -} - -/// Supported key types of homestar -#[derive(Clone, Debug, Default, Deserialize, PartialEq)] -pub(crate) enum KeyType { - #[default] - #[serde(rename = "ed25519")] - Ed25519, - #[serde(rename = "secp256k1")] - Secp256k1, -} - -/// Seed material for RNG generated keys -#[serde_as] -#[derive(Clone, Debug, Deserialize, PartialEq)] -pub(crate) struct PupkeyRNGSeed { - #[serde(default)] - key_type: KeyType, - #[serde_as(as = "Base64")] - seed: [u8; 32], -} - -/// Info on where and what the Key file is -#[derive(Clone, Debug, Deserialize, PartialEq)] -pub(crate) struct ExistingKeyPath { - #[serde(default)] - key_type: KeyType, - path: String, -} - impl Default for Database { fn default() -> Self { - Self { max_pool_size: 100 } + Self { + max_pool_size: 100, + url: None, + } } } @@ -178,6 +154,7 @@ impl Default for Network { rpc_host: IpAddr::V6(Ipv6Addr::LOCALHOST), rpc_max_connections: 10, rpc_port: 3030, + rpc_server_timeout: Duration::new(120, 0), transport_connection_timeout: Duration::new(20, 0), websocket_host: Uri::from_static("127.0.0.1"), websocket_port: 1337, @@ -203,83 +180,8 @@ fn default_shutdown_timeout() -> Duration { Duration::new(20, 0) } -impl PubkeyConfig { - /// Produce a Keypair using the given configuration. - /// Calling this function will access the filesystem if configured to import a key. - pub(crate) fn keypair(&self) -> anyhow::Result { - match self { - PubkeyConfig::Random => { - info!("generating random ed25519 key"); - Ok(identity::Keypair::generate_ed25519()) - } - PubkeyConfig::GenerateFromSeed(PupkeyRNGSeed { key_type, seed }) => { - // seed RNG with supplied seed - let mut r = rand::prelude::StdRng::from_seed(*seed); - let mut new_key: [u8; 32] = r.gen(); - - match key_type { - KeyType::Ed25519 => { - info!("generating radom ed25519 key from seed"); - - identity::Keypair::ed25519_from_bytes(new_key).map_err(|e| { - anyhow!("failed to generate ed25519 key from random: {:?}", e) - }) - } - KeyType::Secp256k1 => { - info!("generating radom secp256k1 key from seed"); - - let sk = - secp256k1::SecretKey::try_from_bytes(&mut new_key).map_err(|e| { - anyhow!("failed to generate secp256k1 key from random: {:?}", e) - })?; - let kp = secp256k1::Keypair::from(sk); - Ok(identity::Keypair::from(kp)) - } - } - } - PubkeyConfig::Existing(ExistingKeyPath { key_type, path }) => { - let path = Path::new(&path); - let mut file = std::fs::File::open(path).context("unable to read key file")?; - - let mut buf = Vec::new(); - file.read_to_end(&mut buf) - .context("unable to read bytes from file, is the file corrupted?")?; - - match key_type { - KeyType::Ed25519 => { - const PEM_HEADER: &str = "PRIVATE KEY"; - - info!("importing ed25519 key from: {}", path.display()); - - let (tag, mut key) = sec1::der::pem::decode_vec(&buf) - .map_err(|e| anyhow!("key file must be PEM formatted: {:#?}", e))?; - if tag != PEM_HEADER { - return Err(anyhow!("imported key file had a header of '{tag}', expected '{PEM_HEADER}' for ed25519")); - } - - // raw bytes of ed25519 secret key from PEM file - identity::Keypair::ed25519_from_bytes(&mut key) - .with_context(|| "imported key material was invalid for ed25519") - } - KeyType::Secp256k1 => { - info!("importing secp256k1 key from: {}", path.display()); - - let sk = match path.extension().and_then(|ext| ext.to_str()) { - Some("der") => sec1::EcPrivateKey::from_der(buf.as_slice()).map_err(|e| anyhow!("failed to parse DER encoded secp256k1 key: {e:#?}")), - Some("pem") => { - Err(anyhow!("PEM encoded secp256k1 keys are unsupported at the moment. Please file an issue if you require this.")) - }, - _ => Err(anyhow!("please disambiguate file from either PEM or DER with a file extension.")) - }?; - let kp = secp256k1::SecretKey::try_from_bytes(sk.private_key.to_vec()) - .map(secp256k1::Keypair::from) - .map_err(|e| anyhow!("failed to import secp256k1 key: {:#?}", e))?; - Ok(identity::Keypair::from(kp)) - } - } - } - } - } +fn default_gc_interval() -> Duration { + Duration::new(1800, 0) } impl Settings { @@ -299,7 +201,11 @@ impl Settings { } /// Load settings from file string that must conform to a [PathBuf]. - pub fn load_from_file(file: String) -> Result { + pub fn load_from_file(file: F) -> Result + where + F: AsRef, + PathBuf: From, + { let path = PathBuf::from(file); Self::build(path) } @@ -331,6 +237,7 @@ mod test { let node_settings = settings.node; let default_settings = Node { + gc_interval: Duration::from_secs(1800), shutdown_timeout: Duration::from_secs(20), ..Default::default() }; @@ -345,6 +252,7 @@ mod test { let mut default_modded_settings = Node::default(); default_modded_settings.network.events_buffer_len = 1000; default_modded_settings.network.websocket_port = 9999; + default_modded_settings.gc_interval = Duration::from_secs(1800); default_modded_settings.shutdown_timeout = Duration::from_secs(20); assert_eq!(settings.node, default_modded_settings); diff --git a/homestar-runtime/src/settings/pubkey_config.rs b/homestar-runtime/src/settings/pubkey_config.rs new file mode 100644 index 00000000..dd1bdfea --- /dev/null +++ b/homestar-runtime/src/settings/pubkey_config.rs @@ -0,0 +1,128 @@ +use anyhow::{anyhow, Context}; +use libp2p::{identity, identity::secp256k1}; +use rand::{Rng, SeedableRng}; +use sec1::der::Decode; +use serde::{Deserialize, Serialize}; +use serde_with::{base64::Base64, serde_as}; +use std::{io::Read, path::Path}; +use tracing::info; + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +/// Configure how the Network keypair is generated or using an existing one +pub(crate) enum PubkeyConfig { + #[serde(rename = "random")] + Random, + /// Seed string should be a base64 encoded 32 bytes. This is used as the RNG seed to generate a ed25519 key. + #[serde(rename = "random_seed")] + GenerateFromSeed(RNGSeed), + /// File path to a PEM encoded ed25519 key + #[serde(rename = "existing")] + Existing(ExistingKeyPath), +} + +/// Supported key types of homestar +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] +pub(crate) enum KeyType { + #[default] + #[serde(rename = "ed25519")] + Ed25519, + #[serde(rename = "secp256k1")] + Secp256k1, +} + +/// Seed material for RNG generated keys +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub(crate) struct RNGSeed { + #[serde(default)] + key_type: KeyType, + #[serde_as(as = "Base64")] + seed: [u8; 32], +} + +/// Info on where and what the Key file is +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub(crate) struct ExistingKeyPath { + #[serde(default)] + key_type: KeyType, + path: String, +} + +impl PubkeyConfig { + /// Produce a Keypair using the given configuration. + /// Calling this function will access the filesystem if configured to import a key. + pub(crate) fn keypair(&self) -> anyhow::Result { + match self { + PubkeyConfig::Random => { + info!("generating random ed25519 key"); + Ok(identity::Keypair::generate_ed25519()) + } + PubkeyConfig::GenerateFromSeed(RNGSeed { key_type, seed }) => { + // seed RNG with supplied seed + let mut r = rand::prelude::StdRng::from_seed(*seed); + let mut new_key: [u8; 32] = r.gen(); + + match key_type { + KeyType::Ed25519 => { + info!("generating radom ed25519 key from seed"); + + identity::Keypair::ed25519_from_bytes(new_key).map_err(|e| { + anyhow!("failed to generate ed25519 key from random: {:?}", e) + }) + } + KeyType::Secp256k1 => { + info!("generating radom secp256k1 key from seed"); + + let sk = + secp256k1::SecretKey::try_from_bytes(&mut new_key).map_err(|e| { + anyhow!("failed to generate secp256k1 key from random: {:?}", e) + })?; + let kp = secp256k1::Keypair::from(sk); + Ok(identity::Keypair::from(kp)) + } + } + } + PubkeyConfig::Existing(ExistingKeyPath { key_type, path }) => { + let path = Path::new(&path); + let mut file = std::fs::File::open(path).context("unable to read key file")?; + + let mut buf = Vec::new(); + file.read_to_end(&mut buf) + .context("unable to read bytes from file, is the file corrupted?")?; + + match key_type { + KeyType::Ed25519 => { + const PEM_HEADER: &str = "PRIVATE KEY"; + + info!("importing ed25519 key from: {}", path.display()); + + let (tag, mut key) = sec1::der::pem::decode_vec(&buf) + .map_err(|e| anyhow!("key file must be PEM formatted: {:#?}", e))?; + if tag != PEM_HEADER { + return Err(anyhow!("imported key file had a header of '{tag}', expected '{PEM_HEADER}' for ed25519")); + } + + // raw bytes of ed25519 secret key from PEM file + identity::Keypair::ed25519_from_bytes(&mut key) + .with_context(|| "imported key material was invalid for ed25519") + } + KeyType::Secp256k1 => { + info!("importing secp256k1 key from: {}", path.display()); + + let sk = match path.extension().and_then(|ext| ext.to_str()) { + Some("der") => sec1::EcPrivateKey::from_der(buf.as_slice()).map_err(|e| anyhow!("failed to parse DER encoded secp256k1 key: {e:#?}")), + Some("pem") => { + Err(anyhow!("PEM encoded secp256k1 keys are unsupported at the moment. Please file an issue if you require this.")) + }, + _ => Err(anyhow!("please disambiguate file from either PEM or DER with a file extension.")) + }?; + let kp = secp256k1::SecretKey::try_from_bytes(sk.private_key.to_vec()) + .map(secp256k1::Keypair::from) + .map_err(|e| anyhow!("failed to import secp256k1 key: {:#?}", e))?; + Ok(identity::Keypair::from(kp)) + } + } + } + } + } +} diff --git a/homestar-runtime/src/test_utils/db.rs b/homestar-runtime/src/test_utils/db.rs index 51a3907e..4a7f3ecd 100644 --- a/homestar-runtime/src/test_utils/db.rs +++ b/homestar-runtime/src/test_utils/db.rs @@ -4,13 +4,16 @@ use crate::{ }; use anyhow::Result; use diesel::r2d2::{self, CustomizeConnection, ManageConnection}; -use std::sync::Arc; +use std::{env, sync::Arc}; const PRAGMAS: &str = " PRAGMA busy_timeout = 1000; -- sleep if the database is busy PRAGMA foreign_keys = ON; -- enforce foreign keys "; +/// Environment variable name for a test database URL. +pub(crate) const ENV: &str = "TEST_DATABASE_URL"; + /// Database connection options. #[derive(Debug, Clone)] struct ConnectionCustomizer; @@ -26,7 +29,7 @@ where /// Sqlite in-memory [Database] [Pool]. #[derive(Debug)] -pub struct MemoryDb(Arc); +pub(crate) struct MemoryDb(Arc); impl Clone for MemoryDb { fn clone(&self) -> Self { @@ -35,8 +38,19 @@ impl Clone for MemoryDb { } impl Database for MemoryDb { - fn setup_connection_pool(_settings: &settings::Node) -> Result { - let manager = r2d2::ConnectionManager::::new(":memory:"); + fn setup_connection_pool(settings: &settings::Node) -> Result { + let database_url = env::var(ENV).unwrap_or_else(|_| { + settings + .db + .url + .as_ref() + .map_or_else(|| "test.db".to_string(), |url| url.to_string()) + }); + + let manager = r2d2::ConnectionManager::::new(format!( + "file:{}?mode=memory&cache=shared", + database_url + )); // setup PRAGMAs manager @@ -44,7 +58,7 @@ impl Database for MemoryDb { .and_then(|mut conn| ConnectionCustomizer.on_acquire(&mut conn))?; let pool = r2d2::Pool::builder() - .max_size(1) + .max_size(3) .connection_customizer(Box::new(ConnectionCustomizer)) .build(manager) .expect("DATABASE_URL must be set to an SQLite DB file"); diff --git a/homestar-runtime/src/test_utils/event.rs b/homestar-runtime/src/test_utils/event.rs index 8b55861e..c18945b3 100644 --- a/homestar-runtime/src/test_utils/event.rs +++ b/homestar-runtime/src/test_utils/event.rs @@ -1,7 +1,16 @@ -use crate::{event_handler::Event, settings::Settings}; +use crate::{event_handler::Event, settings, worker::WorkerMessage}; use tokio::sync::mpsc; /// Create an [mpsc::Sender], [mpsc::Receiver] pair for [Event]s. -pub fn setup_channel(settings: Settings) -> (mpsc::Sender, mpsc::Receiver) { - mpsc::channel(settings.node.network.events_buffer_len) +pub(crate) fn setup_event_channel( + settings: settings::Node, +) -> (mpsc::Sender, mpsc::Receiver) { + mpsc::channel(settings.network.events_buffer_len) +} + +/// Create an [mpsc::Sender], [mpsc::Receiver] pair for worker messages. +pub(crate) fn setup_worker_channel( + settings: settings::Node, +) -> (mpsc::Sender, mpsc::Receiver) { + mpsc::channel(settings.network.events_buffer_len) } diff --git a/homestar-runtime/src/test_utils/mod.rs b/homestar-runtime/src/test_utils/mod.rs index 4be4e9e7..255c0289 100644 --- a/homestar-runtime/src/test_utils/mod.rs +++ b/homestar-runtime/src/test_utils/mod.rs @@ -1,6 +1,17 @@ -#[cfg(test)] -pub mod db; -#[cfg(test)] -pub mod event; -#[cfg(test)] -pub mod receipt; +#[cfg(any(test, feature = "test-utils"))] +#[cfg_attr(docsrs, doc(cfg(feature = "test-utils")))] +pub(crate) mod db; +#[cfg(any(test, feature = "test-utils"))] +#[cfg_attr(docsrs, doc(cfg(feature = "test-utils")))] +pub(crate) mod event; +#[cfg(any(test, feature = "test-utils"))] +#[cfg_attr(docsrs, doc(cfg(feature = "test-utils")))] +pub(crate) mod receipt; +#[cfg(any(test, feature = "test-utils"))] +#[cfg_attr(docsrs, doc(cfg(feature = "test-utils")))] +mod worker_builder; + +#[cfg(any(test, feature = "test-utils"))] +#[cfg_attr(docsrs, doc(cfg(feature = "test-utils")))] +#[allow(unused_imports)] +pub(crate) use worker_builder::WorkerBuilder; diff --git a/homestar-runtime/src/test_utils/proc_macro/Cargo.toml b/homestar-runtime/src/test_utils/proc_macro/Cargo.toml new file mode 100644 index 00000000..6852c581 --- /dev/null +++ b/homestar-runtime/src/test_utils/proc_macro/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "homestar-runtime-tests-proc-macro" +version = "0.0.0" +publish = false +edition = { workspace = true } +rust-version = { workspace = true } + +[lib] +proc_macro = true + +[dependencies] +proc-macro2 = "1.0" +quote = "1.0" +syn = "2.0" diff --git a/homestar-runtime/src/test_utils/proc_macro/src/lib.rs b/homestar-runtime/src/test_utils/proc_macro/src/lib.rs new file mode 100644 index 00000000..43fc1fe1 --- /dev/null +++ b/homestar-runtime/src/test_utils/proc_macro/src/lib.rs @@ -0,0 +1,113 @@ +//! Procedural macros for homestar-runtime testing. + +use proc_macro::TokenStream; +use quote::quote; + +/// [Attribute macro] used for async tests that require a database to run +/// in parallel. +/// +/// This macro will wrap a function in a `tokio::test` attribute automatically +/// and amend the signature to be async. +/// +/// # Example +/// +/// ```ignore +/// #[homestar_runtime_proc_macro::db_async_test] +/// fn initialize_a_worker() { +/// // Injected by the macro +/// let settings = TestSettings::load(); +/// +/// let (tx, mut rx) = test_utils::event::setup_channel(settings.clone().node); +/// let db = builder.db(); +/// let builder = WorkerBuilder::new(settings.node).with_event_sender(tx.into()); +/// let worker = builder.build().await; +/// let running_tasks = Arc::new(DashMap::new()); +/// worker.run(running_tasks.clone()).await.unwrap(); +/// assert_eq!(running_tasks.len(), 1); +/// } +/// ``` +/// +/// [Attribute macro]: +#[proc_macro_attribute] +pub fn db_async_test(_attr: TokenStream, item: TokenStream) -> TokenStream { + let func = syn::parse_macro_input!(item as syn::ItemFn); + let func_name = func.sig.ident; + let func_name_as_string = func_name.to_string(); + let body = func.block; + + quote! { + #[allow(clippy::needless_return)] + #[::tokio::test] + async fn #func_name() { + struct TestSettings; + impl TestSettings { + fn load() -> crate::Settings { + let mut settings = crate::Settings::load().unwrap(); + settings.node.db.url = Some(format!("{}.db", #func_name_as_string)); + settings + } + } + #body + } + } + .into() +} + +/// [Attribute macro] used for homestar-runtime-related tests that require a +/// database to run in parallel. +/// +/// This macro will wrap a function in a `#[test]` attribute automatically and +/// start a homestar-runtime instance with a temporary database. +/// +/// # Example +/// +/// ```ignore +/// #[homestar_runtime_proc_macro::runner_test] +/// fn spawn_an_rpc_server_and_ping_it() { +/// let TestRunner { runner, settings } = TestRunner::start(); +/// let (tx, _rx) = Runner::setup_channel(1); +/// let rpc_server = rpc::Server::new(settings.node.network(), tx.into()); +/// runner.runtime.block_on(rpc_server.spawn()).unwrap(); +/// runner.runtime.spawn(async move { +/// let addr = SocketAddr::new( +/// settings.node.network.rpc_host, +/// settings.node.network.rpc_port, +/// ); +/// let client = Client::new(addr, context::current()).await.unwrap(); +/// let response = client.ping().await.unwrap(); +/// assert_eq!(response, "pong".to_string()); +/// }); +/// } +/// ``` +/// +/// [Attribute macro]: +#[proc_macro_attribute] +pub fn runner_test(_attr: TokenStream, item: TokenStream) -> TokenStream { + let func = syn::parse_macro_input!(item as syn::ItemFn); + let func_name = func.sig.ident; + let func_name_as_string = func_name.to_string(); + let body = func.block; + + quote! { + #[test] + fn #func_name() { + struct TestRunner { + runner: crate::Runner, + settings: crate::Settings, + } + impl TestRunner { + fn start() -> TestRunner { + let mut settings = crate::Settings::load().unwrap(); + settings.node.network.websocket_port = ::homestar_core::test_utils::ports::get_port() as u16; + settings.node.network.rpc_port = ::homestar_core::test_utils::ports::get_port() as u16; + settings.node.db.url = Some(format!("{}.db", #func_name_as_string)); + let db = crate::test_utils::db::MemoryDb::setup_connection_pool(&settings.node).unwrap(); + let runner = crate::Runner::start(settings.clone(), db).unwrap(); + TestRunner { runner, settings } + } + } + #body + } + } + .into() +} diff --git a/homestar-runtime/src/test_utils/receipt.rs b/homestar-runtime/src/test_utils/receipt.rs index ac1fa9a1..a37a7547 100644 --- a/homestar-runtime/src/test_utils/receipt.rs +++ b/homestar-runtime/src/test_utils/receipt.rs @@ -13,12 +13,14 @@ use libipld::{ Ipld, Link, }; +#[allow(dead_code)] const RAW: u64 = 0x55; /// Return both a `mocked` [Ucan Invocation Receipt] and a runtime [Receipt] /// /// [UCAN Invocation Receipt]: homestar_core::workflow::Receipt -pub fn receipts() -> (InvocationReceipt, Receipt) { +#[allow(dead_code)] +pub(crate) fn receipts() -> (InvocationReceipt, Receipt) { let h = Code::Blake3_256.digest(b"beep boop"); let cid = Cid::new_v1(RAW, h); let link: Link = Link::new(cid); diff --git a/homestar-runtime/src/test_utils/worker_builder.rs b/homestar-runtime/src/test_utils/worker_builder.rs new file mode 100644 index 00000000..a759d5e9 --- /dev/null +++ b/homestar-runtime/src/test_utils/worker_builder.rs @@ -0,0 +1,179 @@ +//! Module for building out [Worker]s for testing purposes. + +use super::{db::MemoryDb, event}; +#[cfg(feature = "ipfs")] +use crate::network::IpfsCli; +use crate::{ + db::Database, event_handler::Event, settings, worker::WorkerMessage, workflow, Settings, Worker, +}; +use homestar_core::{ + ipld::DagCbor, + test_utils::workflow as workflow_test_utils, + workflow::{config::Resources, instruction::RunInstruction, prf::UcanPrf, Task}, + Workflow, +}; +use homestar_wasm::io::Arg; +use libipld::Cid; +use std::sync::Arc; +use tokio::sync::mpsc; + +#[cfg(feature = "ipfs")] +pub(crate) struct WorkerBuilder<'a> { + db: MemoryDb, + event_sender: Arc>, + runner_sender: mpsc::Sender, + ipfs: IpfsCli, + workflow: Workflow<'a, Arg>, + workflow_settings: workflow::Settings, +} + +#[cfg(not(feature = "ipfs"))] +pub(crate) struct WorkerBuilder<'a> { + db: MemoryDb, + event_sender: Arc>, + runner_sender: mpsc::Sender, + workflow: Workflow<'a, Arg>, + workflow_settings: workflow::Settings, +} + +impl<'a> WorkerBuilder<'a> { + /// Create a new, default instance of a builder to generate a test [Worker]. + #[cfg(feature = "ipfs")] + pub(crate) fn new(settings: settings::Node) -> Self { + let ipfs = IpfsCli::default(); + + let config = Resources::default(); + let (instruction1, instruction2, _) = + workflow_test_utils::related_wasm_instructions::(); + let task1 = Task::new( + RunInstruction::Expanded(instruction1), + config.clone().into(), + UcanPrf::default(), + ); + let task2 = Task::new( + RunInstruction::Expanded(instruction2), + config.into(), + UcanPrf::default(), + ); + + let (evt_tx, _rx) = event::setup_event_channel(settings.clone()); + let (wk_tx, _rx) = event::setup_worker_channel(settings.clone()); + Self { + db: MemoryDb::setup_connection_pool(&settings).unwrap(), + event_sender: evt_tx.into(), + runner_sender: wk_tx, + ipfs, + workflow: Workflow::new(vec![task1, task2]), + workflow_settings: workflow::Settings::default(), + } + } + + /// Create a new, default instance of a builder to generate a test [Worker]. + #[cfg(not(feature = "ipfs"))] + pub(crate) fn new(settings: settings::Node) -> Self { + let config = Resources::default(); + let (instruction1, instruction2, _) = + workflow_test_utils::related_wasm_instructions::(); + let task1 = Task::new( + RunInstruction::Expanded(instruction1), + config.clone().into(), + UcanPrf::default(), + ); + let task2 = Task::new( + RunInstruction::Expanded(instruction2), + config.into(), + UcanPrf::default(), + ); + + let (evt_tx, _rx) = event::setup_event_channel(settings.clone()); + let (wk_tx, _rx) = event::setup_worker_channel(settings.clone()); + Self { + db: MemoryDb::setup_connection_pool(&settings).unwrap(), + event_sender: evt_tx.into(), + runner_sender: wk_tx, + workflow: Workflow::new(vec![task1, task2]), + workflow_settings: workflow::Settings::default(), + } + } + + /// Build a [Worker] from the current state of the builder. + #[cfg(feature = "ipfs")] + #[allow(dead_code)] + pub(crate) async fn build(self) -> Worker<'a, MemoryDb> { + Worker::new( + self.workflow, + self.workflow_settings, + self.event_sender, + self.runner_sender, + self.db, + self.ipfs, + ) + .await + .unwrap() + } + + /// Build a [Worker] from the current state of the builder. + #[cfg(not(feature = "ipfs"))] + #[allow(dead_code)] + pub(crate) async fn build(self) -> Worker<'a, MemoryDb> { + Worker::new( + self.workflow, + self.workflow_settings, + self.event_sender, + self.runner_sender, + self.db, + ) + .await + .unwrap() + } + + /// Get the [Cid] of the workflow from the builder state. + #[allow(dead_code)] + pub(crate) fn workflow_cid(&self) -> Cid { + self.workflow.clone().to_cid().unwrap() + } + + /// Get the length of the workflow from the builder state. + #[allow(dead_code)] + pub(crate) fn workflow_len(&self) -> u32 { + self.workflow.len() + } + + /// Get the in-memory [db] from the builder state. + /// + /// [db]: MemoryDb + #[allow(dead_code)] + pub(crate) fn db(&self) -> MemoryDb { + self.db.clone() + } + + /// Build a [Worker] with a specific [Workflow] from a set of tasks. + /// + /// [tasks]: Task + #[allow(dead_code)] + pub(crate) fn with_tasks(mut self, tasks: Vec>) -> Self { + self.workflow = Workflow::new(tasks); + self + } + + /// Build a [Worker] with a specific Event [mpsc::Sender]. + #[allow(dead_code)] + pub(crate) fn with_event_sender(mut self, event_sender: Arc>) -> Self { + self.event_sender = event_sender; + self + } + + /// Build a [Worker] with a specific [workflow::Settings]. + #[allow(dead_code)] + pub(crate) fn with_workflow_settings(mut self, workflow_settings: workflow::Settings) -> Self { + self.workflow_settings = workflow_settings; + self + } +} + +impl Default for WorkerBuilder<'_> { + fn default() -> Self { + let settings = Settings::load().unwrap(); + Self::new(settings.node) + } +} diff --git a/homestar-runtime/src/worker.rs b/homestar-runtime/src/worker.rs index e083b207..8b3eec40 100644 --- a/homestar-runtime/src/worker.rs +++ b/homestar-runtime/src/worker.rs @@ -6,10 +6,10 @@ #[cfg(feature = "ipfs")] use crate::network::IpfsCli; -#[cfg(feature = "ipfs")] +#[cfg(all(feature = "ipfs", not(test), not(feature = "test-utils")))] use crate::workflow::settings::BackoffStrategy; use crate::{ - db::{Connection, Database}, + db::Database, event_handler::{ channel::BoundedChannel, event::{Captured, QueryRecord}, @@ -24,11 +24,13 @@ use crate::{ Db, Receipt, }; use anyhow::{anyhow, Result}; +use chrono::NaiveDateTime; use futures::FutureExt; -#[cfg(feature = "ipfs")] +#[cfg(all(feature = "ipfs", not(test), not(feature = "test-utils")))] use futures::StreamExt; use homestar_core::{ bail, + ipld::DagCbor, workflow::{ error::ResolveError, prf::UcanPrf, @@ -49,54 +51,86 @@ use tokio::{ task::JoinSet, }; use tracing::{debug, error}; -#[cfg(feature = "ipfs")] +#[cfg(all(feature = "ipfs", not(test), not(feature = "test-utils")))] use tryhard::RetryFutureConfig; /// [JoinSet] of tasks run by a [Worker]. #[allow(dead_code)] pub(crate) type TaskSet = JoinSet>; +#[derive(Debug, Clone, PartialEq)] +pub(crate) enum WorkerMessage { + Dropped(Cid), +} + /// Worker that operates over a given [TaskScheduler]. #[allow(dead_code)] -#[derive(Debug)] -pub(crate) struct Worker<'a> { +#[allow(missing_debug_implementations)] +pub(crate) struct Worker<'a, DB: Database> { pub(crate) scheduler: TaskScheduler<'a>, pub(crate) event_sender: Arc>, + pub(crate) runner_sender: mpsc::Sender, + pub(crate) db: DB, pub(crate) workflow_info: Arc, pub(crate) workflow_settings: Arc, + pub(crate) workflow_started: NaiveDateTime, } -impl<'a> Worker<'a> { +impl<'a, DB> Worker<'a, DB> +where + DB: Database + 'static, +{ /// Instantiate a new [Worker] for a [Workflow]. #[cfg(not(feature = "ipfs"))] #[allow(dead_code)] pub(crate) async fn new( workflow: Workflow<'a, Arg>, - workflow_info: Arc, - workflow_settings: Arc, + settings: workflow::Settings, event_sender: Arc>, - mut conn: Connection, - ) -> Result> { + runner_sender: mpsc::Sender, + db: DB, + ) -> Result> { + let p2p_timeout = settings.p2p_timeout; + let workflow_len = workflow.len(); + let workflow_settings = Arc::new(settings); let workflow_settings_scheduler = workflow_settings.clone(); let workflow_settings_worker = workflow_settings.clone(); + let fetch_fn = |rscs: Vec| { - async { Self::get_resources(rscs, workflow_settings).await }.boxed() + async { Self::get_resources(rscs, workflow_settings).await }.boxed_local() }; - let scheduler = TaskScheduler::init( + // Need to take ownership here to get the cid. + let workflow_cid = workflow.to_owned().to_cid()?; + + let scheduler_ctx = TaskScheduler::init( workflow, + workflow_cid, workflow_settings_scheduler, event_sender.clone(), - &mut conn, + &mut db.conn()?, fetch_fn, ) .await?; + let (workflow_info, timestamp) = workflow::Info::init( + workflow_cid, + workflow_len, + scheduler_ctx.indexed_resources, + p2p_timeout, + event_sender.clone(), + db.conn()?, + ) + .await?; + Ok(Self { - scheduler, + scheduler: scheduler_ctx.scheduler, event_sender, - workflow_info, + runner_sender, + db, + workflow_info: workflow_info.into(), workflow_settings: workflow_settings_worker, + workflow_started: timestamp, }) } @@ -106,51 +140,79 @@ impl<'a> Worker<'a> { #[allow(dead_code)] pub(crate) async fn new( workflow: Workflow<'a, Arg>, - workflow_info: Arc, - workflow_settings: Arc, + settings: workflow::Settings, event_sender: Arc>, - mut conn: Connection, - ipfs: &'a IpfsCli, - ) -> Result> { + runner_sender: mpsc::Sender, + db: DB, + ipfs: IpfsCli, + ) -> Result> { + let p2p_timeout = settings.p2p_timeout; + let workflow_len = workflow.len(); + let workflow_settings = Arc::new(settings); let workflow_settings_scheduler = workflow_settings.clone(); let workflow_settings_worker = workflow_settings.clone(); + let fetch_fn = |rscs: Vec| { - async { Self::get_resources(rscs, workflow_settings, ipfs).await }.boxed() + async { Self::get_resources(rscs, workflow_settings, ipfs).await }.boxed_local() }; - let scheduler = TaskScheduler::init( + // Need to take ownership here to get the cid. + let workflow_cid = workflow.to_owned().to_cid()?; + + let scheduler_ctx = TaskScheduler::init( workflow, + workflow_cid, workflow_settings_scheduler, event_sender.clone(), - &mut conn, + &mut db.conn()?, fetch_fn, ) .await?; + let (workflow_info, timestamp) = workflow::Info::init( + workflow_cid, + workflow_len, + scheduler_ctx.indexed_resources, + p2p_timeout, + event_sender.clone(), + db.conn()?, + ) + .await?; + Ok(Self { - scheduler, + scheduler: scheduler_ctx.scheduler, event_sender, - workflow_info, + runner_sender, + db, + workflow_info: workflow_info.into(), workflow_settings: workflow_settings_worker, + workflow_started: timestamp, }) } /// Run [Worker]'s tasks in task-queue with access to the [Db] object - /// to use a connection from the Database pool per run. - #[allow(dead_code)] - pub(crate) async fn run( - self, - db: impl Database + 'static, - running_tasks: &'a mut RunningTaskSet, - ) -> Result<()> { - self.run_queue(db, running_tasks).await + /// to use connections from the Database pool per run. + /// + /// This is the main entry point for running a workflow. + /// + /// Within this function, the [Worker] executes tasks and resolves + /// [Instruction] [Cid]s. + /// + /// [Instruction] [Cid]s being awaited on are resolved via 3 lookups: + /// * a check in the [LinkMap], which is an in-memory cache of resolved + /// [InstructionResult]s (this may have been pre-filled out by + /// scheduler initialization); + /// * a check in the database, which may have been updated at the point of + /// execution; + /// * a [Swarm]/DHT query to find the [Receipt] in the network. + /// + /// [Instruction]: homestar_core::workflow::Instruction + /// [Swarm]: crate::network::swarm + pub(crate) async fn run(self, running_tasks: Arc) -> Result<()> { + self.run_queue(running_tasks).await } - async fn run_queue( - mut self, - db: impl Database + 'static, - running_tasks: &'a mut RunningTaskSet, - ) -> Result<()> { + async fn run_queue(mut self, running_tasks: Arc) -> Result<()> { async fn insert_into_map(map: Arc>>, key: Cid, value: T) where T: Clone, @@ -163,6 +225,7 @@ impl<'a> Worker<'a> { async fn resolve_cid( cid: Cid, + workflow_cid: Cid, workflow_settings: Arc, linkmap: Arc>>>, db: impl Database, @@ -171,7 +234,7 @@ impl<'a> Worker<'a> { if let Some(result) = linkmap.read().await.get(&cid) { Ok(result.to_owned()) } else { - match Db::find_instruction(Pointer::new(cid), &mut db.conn()?) { + match Db::find_instruction(cid, &mut db.conn()?) { Ok(found) => Ok(found.output_as_arg()), Err(_) => { debug!("no related instruction receipt found in the DB"); @@ -182,26 +245,30 @@ impl<'a> Worker<'a> { CapsuleTag::Receipt, tx, ))) - .map_err(|err| ResolveError::TransportError(err.to_string()))?; + .map_err(|err| ResolveError::Transport(err.to_string()))?; let found = match rx .recv_deadline(Instant::now() + workflow_settings.p2p_timeout) { Ok(ResponseEvent::Found(Ok(FoundEvent::Receipt(found)))) => found, Ok(ResponseEvent::Found(Err(err))) => { - bail!(ResolveError::UnresolvedCidError(format!( + bail!(ResolveError::UnresolvedCid(format!( "failure in attempting to find event: {err}" ))) } - Ok(_) => bail!(ResolveError::UnresolvedCidError( + Ok(_) => bail!(ResolveError::UnresolvedCid( "wrong or unexpected event message received".to_string(), )), - Err(err) => bail!(ResolveError::UnresolvedCidError(format!( + Err(err) => bail!(ResolveError::UnresolvedCid(format!( "timeout deadline reached for invocation receipt @ {cid}: {err}", ))), }; - let found_result = found.output_as_arg(); + let receipt = + Db::commit_receipt(workflow_cid, found.clone(), &mut db.conn()?) + .unwrap_or(found); + let found_result = receipt.output_as_arg(); + // Store the result in the linkmap for use in next iterations. insert_into_map(linkmap.clone(), cid, found_result.clone()).await; Ok(found_result) @@ -209,7 +276,8 @@ impl<'a> Worker<'a> { } } } - for batch in self.scheduler.run.into_iter() { + // Need to take ownership of the schedule + for batch in self.scheduler.run.clone().into_iter() { let (mut task_set, handles) = batch.into_iter().try_fold( (TaskSet::new(), vec![]), |(mut task_set, mut handles), node| { @@ -239,19 +307,22 @@ impl<'a> Worker<'a> { let state = State::default(); let mut wasm_ctx = WasmContext::new(state)?; - let db = db.clone(); + let db = self.db.clone(); let settings = self.workflow_settings.clone(); let linkmap = self.scheduler.linkmap.clone(); let event_sender = self.event_sender.clone(); + let workflow_cid = self.workflow_info.cid(); let resolved = args.resolve(move |cid| { - Box::pin(resolve_cid( + resolve_cid( cid, + workflow_cid, settings.clone(), linkmap.clone(), db.clone(), event_sender.clone(), - )) + ) + .boxed() }); let handle = task_set.spawn(async move { @@ -284,21 +355,19 @@ impl<'a> Worker<'a> { let invocation_receipt = InvocationReceipt::new( invocation_ptr, InstructionResult::Ok(output_to_store), - Ipld::Null, + meta, None, UcanPrf::default(), ); - let mut receipt = Receipt::try_with(instruction_ptr, &invocation_receipt)?; + let receipt = Receipt::try_with(instruction_ptr, &invocation_receipt)?; self.scheduler.linkmap.write().await.insert( Cid::try_from(receipt.instruction())?, receipt.output_as_arg(), ); - // set receipt metadata - receipt.set_meta(meta); // modify workflow info before progress update, in case - // that we timed out getting info from the network, but later + // that we time out getting info from the network, but later // recovered where we last started from. if let Some(step) = self.scheduler.resume_step { let current_progress_count = self.workflow_info.progress_count; @@ -306,67 +375,37 @@ impl<'a> Worker<'a> { .set_progress_count(std::cmp::max(current_progress_count, step as u32)) }; - let stored_receipt = Db::store_receipt(receipt, &mut db.conn()?)?; + let stored_receipt = + Db::commit_receipt(self.workflow_info.cid, receipt, &mut self.db.conn()?)?; // send internal event let (tx, _rx) = BoundedChannel::oneshot(); - self.event_sender - .try_send(Event::CapturedReceipt(Captured::with( + let _ = self + .event_sender + .send(Event::CapturedReceipt(Captured::with( stored_receipt, self.workflow_info.clone(), tx, - )))? + ))) + .await; } } Ok(()) } - #[cfg(feature = "ipfs")] + #[cfg(all(feature = "ipfs", not(test), not(feature = "test-utils")))] #[cfg_attr(docsrs, doc(cfg(feature = "ipfs")))] async fn get_resources( resources: Vec, settings: Arc, - ipfs: &'a IpfsCli, + ipfs: IpfsCli, ) -> Result>> { - /// TODO: http(s) calls - async fn fetch(rsc: Resource, client: IpfsCli) -> Result<(Resource, Result>)> { - match rsc { - Resource::Url(url) => { - let bytes = match (url.scheme(), url.domain(), url.path()) { - ("ipfs", Some(cid), _) => { - let cid = Cid::try_from(cid)?; - client.get_cid(cid).await - } - (_, Some("ipfs.io"), _) => client.get_resource(&url).await, - (_, _, path) if path.contains("/ipfs/") || path.contains("/ipns/") => { - client.get_resource(&url).await - } - (_, Some(domain), _) => { - let split: Vec<&str> = domain.splitn(3, '.').collect(); - // subdomain-gateway case: - // - if let (Ok(_cid), "ipfs") = (Cid::try_from(split[0]), split[1]) { - client.get_resource(&url).await - } else { - // TODO: reqwest call - todo!() - } - } - // TODO: reqwest call - (_, _, _) => todo!(), - }; - Ok((Resource::Url(url), bytes)) - } - - Resource::Cid(cid) => { - let bytes = client.get_cid(cid).await; - Ok((Resource::Cid(cid), bytes)) - } - } - } + use tokio::runtime::Handle; let num_requests = resources.len(); let settings = settings.as_ref(); - futures::stream::iter(resources.into_iter().map(|rsc| async move { + futures::stream::iter(resources.iter().map(|rsc| async { + let ipfs = ipfs.clone(); + let handle = Handle::current(); // Have to enumerate configs here, as type variants are different // and cannot be matched on. match settings.retry_backoff_strategy { @@ -374,7 +413,7 @@ impl<'a> Worker<'a> { tryhard::retry_fn(|| { let rsc = rsc.clone(); let client = ipfs.clone(); - tokio::spawn(async move { fetch(rsc, client).await }) + handle.spawn(async move { Self::fetch(rsc, client).await }) }) .with_config( RetryFutureConfig::new(settings.retries) @@ -387,7 +426,7 @@ impl<'a> Worker<'a> { tryhard::retry_fn(|| { let rsc = rsc.clone(); let client = ipfs.clone(); - tokio::spawn(async move { fetch(rsc, client).await }) + handle.spawn(async move { Self::fetch(rsc, client).await }) }) .with_config( RetryFutureConfig::new(settings.retries) @@ -400,7 +439,7 @@ impl<'a> Worker<'a> { tryhard::retry_fn(|| { let rsc = rsc.clone(); let client = ipfs.clone(); - tokio::spawn(async move { fetch(rsc, client).await }) + handle.spawn(async move { Self::fetch(rsc, client).await }) }) .with_config( RetryFutureConfig::new(settings.retries) @@ -413,7 +452,7 @@ impl<'a> Worker<'a> { tryhard::retry_fn(|| { let rsc = rsc.clone(); let client = ipfs.clone(); - tokio::spawn(async move { fetch(rsc, client).await }) + handle.spawn(async move { Self::fetch(rsc, client).await }) }) .with_config(RetryFutureConfig::new(settings.retries).no_backoff()) .await @@ -432,8 +471,47 @@ impl<'a> Worker<'a> { }) } + #[cfg(all(feature = "ipfs", not(test), not(feature = "test-utils")))] + #[cfg_attr(docsrs, doc(cfg(feature = "ipfs")))] + async fn fetch(rsc: Resource, client: IpfsCli) -> Result<(Resource, Result>)> { + match rsc { + Resource::Url(url) => { + let bytes = match (url.scheme(), url.domain(), url.path()) { + ("ipfs", Some(cid), _) => { + let cid = Cid::try_from(cid)?; + client.get_cid(cid).await + } + (_, Some("ipfs.io"), _) => client.get_resource(&url).await, + (_, _, path) if path.contains("/ipfs/") || path.contains("/ipns/") => { + client.get_resource(&url).await + } + (_, Some(domain), _) => { + let split: Vec<&str> = domain.splitn(3, '.').collect(); + // subdomain-gateway case: + // + if let (Ok(_cid), "ipfs") = (Cid::try_from(split[0]), split[1]) { + client.get_resource(&url).await + } else { + // TODO: reqwest call + todo!() + } + } + // TODO: reqwest call + (_, _, _) => todo!(), + }; + Ok((Resource::Url(url), bytes)) + } + + Resource::Cid(cid) => { + let bytes = client.get_cid(cid).await; + Ok((Resource::Cid(cid), bytes)) + } + } + } + /// TODO: Client calls (only) over http(s). - #[cfg(not(feature = "ipfs"))] + #[cfg(all(not(feature = "ipfs"), not(test), not(feature = "test-utils")))] + #[doc(hidden)] #[allow(dead_code)] async fn get_resources( _resources: Vec, @@ -441,14 +519,68 @@ impl<'a> Worker<'a> { ) -> Result> { Ok(IndexMap::default()) } + + #[cfg(all(not(feature = "ipfs"), any(test, feature = "test-utils")))] + #[doc(hidden)] + #[allow(dead_code)] + async fn get_resources( + _resources: Vec, + _settings: Arc, + ) -> Result>> { + println!("Running in test mode"); + use crate::tasks::FileLoad; + let path = std::path::PathBuf::from(format!( + "{}/../homestar-wasm/fixtures/example_test.wasm", + env!("CARGO_MANIFEST_DIR") + )); + let bytes = WasmContext::load(path).await?; + let mut map = IndexMap::default(); + let rsc = "ipfs://bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy"; + map.insert(Resource::Url(url::Url::parse(rsc)?), bytes); + Ok(map) + } + + #[cfg(all(feature = "ipfs", any(test, feature = "test-utils")))] + #[doc(hidden)] + #[allow(dead_code)] + async fn get_resources( + _resources: Vec, + _settings: Arc, + _ipfs: IpfsCli, + ) -> Result>> { + println!("Running in test mode"); + use crate::tasks::FileLoad; + let path = std::path::PathBuf::from(format!( + "{}/../homestar-wasm/fixtures/example_test.wasm", + env!("CARGO_MANIFEST_DIR") + )); + let bytes = WasmContext::load(path).await?; + let mut map = IndexMap::default(); + let rsc = "ipfs://bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy"; + map.insert(Resource::Url(url::Url::parse(rsc)?), bytes); + Ok(map) + } +} + +impl<'a, DB> Drop for Worker<'a, DB> +where + DB: Database, +{ + fn drop(&mut self) { + let _ = self + .runner_sender + .try_send(WorkerMessage::Dropped(self.workflow_info.cid)); + } } #[cfg(test)] mod test { use super::*; - use crate::{db::Database, test_utils, workflow as wf, Settings}; - #[cfg(feature = "ipfs")] - use dashmap::DashMap; + use crate::{ + db::Database, + test_utils::{self, db::MemoryDb, WorkerBuilder}, + workflow::IndexedResources, + }; use homestar_core::{ ipld::DagCbor, test_utils::workflow as workflow_test_utils, @@ -457,69 +589,16 @@ mod test { }, }; - #[tokio::test] - async fn initialize_worker() { - let config = Resources::default(); - let (instruction1, instruction2, _) = - workflow_test_utils::related_wasm_instructions::(); - let task1 = Task::new( - RunInstruction::Expanded(instruction1), - config.clone().into(), - UcanPrf::default(), - ); - let task2 = Task::new( - RunInstruction::Expanded(instruction2), - config.into(), - UcanPrf::default(), - ); - - let db = test_utils::db::MemoryDb::setup_connection_pool(Settings::load().unwrap().node()) - .unwrap(); - let mut conn = db.conn().unwrap(); + #[homestar_runtime_proc_macro::db_async_test] + fn initialize_worker() { + let settings = TestSettings::load(); - let workflow = Workflow::new(vec![task1.clone(), task2.clone()]); - let workflow_cid = workflow.clone().to_cid().unwrap(); - let workflow_settings = Arc::new(wf::Settings::default()); - let settings = Settings::load().unwrap(); + let (tx, mut rx) = test_utils::event::setup_event_channel(settings.clone().node); - #[cfg(feature = "ipfs")] - let (tx, mut rx) = test_utils::event::setup_channel(settings); - #[cfg(not(feature = "ipfs"))] - let (tx, mut _rx) = test_utils::event::setup_channel(settings); - - #[cfg(feature = "ipfs")] - let ipfs = IpfsCli::default(); - - let workflow_info = wf::Info::init( - workflow.clone(), - workflow_settings.p2p_timeout, - tx.clone().into(), - &mut conn, - ) - .await - .unwrap(); - - #[cfg(feature = "ipfs")] - let worker = Worker::new( - workflow, - workflow_info.into(), - workflow_settings, - tx.into(), - conn, - &ipfs, - ) - .await - .unwrap(); - #[cfg(not(feature = "ipfs"))] - let worker = Worker::new( - workflow, - workflow_info.into(), - workflow_settings.clone(), - tx.into(), - conn, - ) - .await - .unwrap(); + let builder = WorkerBuilder::new(settings.node).with_event_sender(tx.into()); + let db = builder.db(); + let worker = builder.build().await; + let workflow_cid = worker.workflow_info.cid; assert!(worker.scheduler.linkmap.read().await.is_empty()); assert!(worker.scheduler.ran.is_none()); @@ -527,97 +606,97 @@ mod test { assert_eq!(worker.scheduler.resume_step, None); assert_eq!(worker.workflow_info.cid, workflow_cid); assert_eq!(worker.workflow_info.num_tasks, 2); + assert_eq!(worker.workflow_info.resources.len(), 2); + assert_eq!( + worker + .workflow_info + .resources + .rscs() + .collect::>() + .len(), + 1 + ); - #[cfg(feature = "ipfs")] - { - let mut running_tasks = DashMap::new(); - let worker_workflow_cid = worker.workflow_info.cid; - worker.run(db.clone(), &mut running_tasks).await.unwrap(); - assert_eq!(running_tasks.len(), 1); - assert!(running_tasks.contains_key(&worker_workflow_cid)); - assert_eq!(running_tasks.get(&worker_workflow_cid).unwrap().len(), 2); - - // first time check DHT for workflow info - let workflow_info_event = rx.recv().await.unwrap(); - // we should have received 2 receipts - let next_run_receipt = rx.recv().await.unwrap(); - let next_next_run_receipt = rx.recv().await.unwrap(); - - match workflow_info_event { - Event::FindRecord(QueryRecord { cid, .. }) => assert_eq!(cid, worker_workflow_cid), - _ => panic!("Wrong event type"), - }; - - let (next_receipt, _wf_info) = match next_run_receipt { - Event::CapturedReceipt(Captured { - receipt: next_receipt, - .. - }) => { - let mut conn = db.conn().unwrap(); - let _ = Db::store_workflow_receipt(workflow_cid, next_receipt.cid(), &mut conn); - let mut info = workflow::Info::default(workflow_cid, 2); - info.increment_progress(next_receipt.cid()); - - (next_receipt, info) - } - _ => panic!("Wrong event type"), - }; - - let (_next_next_receipt, wf_info) = match next_next_run_receipt { - Event::CapturedReceipt(Captured { - receipt: next_next_receipt, - .. - }) => { - let mut conn = db.conn().unwrap(); - let _ = Db::store_workflow_receipt( - workflow_cid, - next_next_receipt.cid(), - &mut conn, - ); - let mut info = workflow::Info::default(workflow_cid, 2); - info.increment_progress(next_next_receipt.cid()); - - assert_ne!(next_next_receipt, next_receipt); - - (next_next_receipt, info) - } - _ => panic!("Wrong event type"), - }; + let running_tasks = Arc::new(RunningTaskSet::new()); + let worker_workflow_cid = worker.workflow_info.cid; + worker.run(running_tasks.clone()).await.unwrap(); + assert_eq!(running_tasks.len(), 1); + assert!(running_tasks.contains_key(&worker_workflow_cid)); + assert_eq!(running_tasks.get(&worker_workflow_cid).unwrap().len(), 2); - assert!(rx.recv().await.is_none()); + // first time check DHT for workflow info + let workflow_info_event = rx.recv().await.unwrap(); - let mut conn = db.conn().unwrap(); - let workflow_info = - test_utils::db::MemoryDb::get_workflow_info(workflow_cid, &mut conn).unwrap(); + // we should have received 2 receipts + let next_run_receipt = rx.recv().await.unwrap(); + let next_next_run_receipt = rx.recv().await.unwrap(); - assert_eq!(workflow_info.num_tasks, 2); - assert_eq!(workflow_info.cid, workflow_cid); - assert_eq!(workflow_info.progress.len(), 2); - assert_eq!(wf_info.progress_count, 2); - assert_eq!(wf_info.progress_count, workflow_info.progress_count); - } + match workflow_info_event { + Event::FindRecord(QueryRecord { cid, .. }) => assert_eq!(cid, worker_workflow_cid), + _ => panic!("Wrong event type"), + }; + + let (next_receipt, _wf_info) = match next_run_receipt { + Event::CapturedReceipt(Captured { + receipt: next_receipt, + .. + }) => { + let mut info = workflow::Info::default(workflow_cid, 2); + info.increment_progress(next_receipt.cid()); + + (next_receipt, info) + } + _ => panic!("Wrong event type"), + }; + + let (_next_next_receipt, wf_info) = match next_next_run_receipt { + Event::CapturedReceipt(Captured { + receipt: next_next_receipt, + .. + }) => { + let mut info = workflow::Info::default(workflow_cid, 2); + info.increment_progress(next_next_receipt.cid()); + + assert_ne!(next_next_receipt, next_receipt); + + (next_next_receipt, info) + } + _ => panic!("Wrong event type"), + }; + + assert!(rx.recv().await.is_none()); + + let mut conn = db.conn().unwrap(); + let workflow_info = MemoryDb::get_workflow_info(workflow_cid, &mut conn).unwrap(); + + assert_eq!(workflow_info.num_tasks, 2); + assert_eq!(workflow_info.cid, workflow_cid); + assert_eq!(workflow_info.progress.len(), 2); + assert_eq!(workflow_info.resources.len(), 2); + assert_eq!(wf_info.progress_count, 2); + assert_eq!(wf_info.progress_count, workflow_info.progress_count); } - #[tokio::test] + #[homestar_runtime_proc_macro::db_async_test] async fn initialize_worker_with_run_instructions_and_run() { + let settings = TestSettings::load(); + let config = Resources::default(); let (instruction1, instruction2, _) = workflow_test_utils::related_wasm_instructions::(); + let task1 = Task::new( RunInstruction::Expanded(instruction1.clone()), config.clone().into(), UcanPrf::default(), ); + let task2 = Task::new( - RunInstruction::Expanded(instruction2), + RunInstruction::Expanded(instruction2.clone()), config.into(), UcanPrf::default(), ); - let db = test_utils::db::MemoryDb::setup_connection_pool(Settings::load().unwrap().node()) - .unwrap(); - let mut conn = db.conn().unwrap(); - let invocation_receipt = InvocationReceipt::new( Invocation::new(task1.clone()).try_into().unwrap(), InstructionResult::Ok(Ipld::Integer(4)), @@ -631,64 +710,39 @@ mod test { ) .unwrap(); - let _ = test_utils::db::MemoryDb::store_receipt(receipt.clone(), &mut conn).unwrap(); + let (tx, mut rx) = test_utils::event::setup_event_channel(settings.node.clone()); - let workflow = Workflow::new(vec![task1.clone(), task2.clone()]); - let workflow_cid = workflow.clone().to_cid().unwrap(); - let workflow_settings = Arc::new(wf::Settings::default()); - let settings = Settings::load().unwrap(); + let builder = WorkerBuilder::new(settings.node) + .with_event_sender(tx.into()) + .with_tasks(vec![task1, task2]); + let db = builder.db(); + let workflow_cid = builder.workflow_cid(); - // already have stored workflow information (from a previous run) - let _ = test_utils::db::MemoryDb::store_workflow( - workflow::Stored::new(Pointer::new(workflow_cid), workflow.len() as i32), - &mut conn, - ) - .unwrap(); - let _ = test_utils::db::MemoryDb::store_workflow_receipt( - workflow_cid, - receipt.cid(), - &mut conn, - ) - .unwrap(); - - #[cfg(feature = "ipfs")] - let (tx, mut rx) = test_utils::event::setup_channel(settings); - #[cfg(not(feature = "ipfs"))] - let (tx, mut _rx) = test_utils::event::setup_channel(settings); - - #[cfg(feature = "ipfs")] - let ipfs = IpfsCli::default(); + let mut index_map = IndexMap::new(); + index_map.insert( + instruction1.clone().to_cid().unwrap(), + Resource::Url(instruction1.resource().to_owned()), + ); + index_map.insert( + instruction2.clone().to_cid().unwrap(), + Resource::Url(instruction2.resource().to_owned()), + ); - let workflow_info = wf::Info::init( - workflow.clone(), - workflow_settings.p2p_timeout, - tx.clone().into(), + let mut conn = db.conn().unwrap(); + let _ = MemoryDb::store_workflow( + workflow::Stored::new_with_resources( + Pointer::new(workflow_cid), + builder.workflow_len() as i32, + IndexedResources::new(index_map), + ), &mut conn, - ) - .await - .unwrap(); + ); + let _ = MemoryDb::commit_receipt(workflow_cid, receipt.clone(), &mut conn).unwrap(); - #[cfg(feature = "ipfs")] - let worker = Worker::new( - workflow, - workflow_info.into(), - workflow_settings, - tx.into(), - conn, - &ipfs, - ) - .await - .unwrap(); - #[cfg(not(feature = "ipfs"))] - let worker = Worker::new( - workflow, - workflow_info.into(), - workflow_settings.clone(), - tx.into(), - conn, - ) - .await - .unwrap(); + let worker = builder.build().await; + let info = MemoryDb::get_workflow_info(workflow_cid, &mut conn).unwrap(); + + assert_eq!(Arc::new(info), worker.workflow_info); assert_eq!(worker.scheduler.linkmap.read().await.len(), 1); assert!(worker @@ -702,52 +756,58 @@ mod test { assert_eq!(worker.scheduler.resume_step, Some(1)); assert_eq!(worker.workflow_info.cid, workflow_cid); assert_eq!(worker.workflow_info.num_tasks, 2); + assert_eq!(worker.workflow_info.resources.len(), 2); + assert_eq!( + worker + .workflow_info + .resources + .rscs() + .collect::>() + .len(), + 1 + ); - #[cfg(feature = "ipfs")] - { - let mut running_tasks = DashMap::new(); - let worker_workflow_cid = worker.workflow_info.cid; - worker.run(db.clone(), &mut running_tasks).await.unwrap(); - assert_eq!(running_tasks.len(), 1); - assert!(running_tasks.contains_key(&worker_workflow_cid)); - assert_eq!(running_tasks.get(&worker_workflow_cid).unwrap().len(), 1); - - // we should have received 1 receipt - let next_run_receipt = rx.recv().await.unwrap(); - - let (_next_receipt, wf_info) = match next_run_receipt { - Event::CapturedReceipt(Captured { - receipt: next_receipt, - .. - }) => { - let mut conn = db.conn().unwrap(); - let _ = Db::store_workflow_receipt(workflow_cid, next_receipt.cid(), &mut conn); - let mut info = workflow::Info::default(workflow_cid, 2); - info.increment_progress(next_receipt.cid()); - - assert_ne!(next_receipt, receipt); - - (next_receipt, info) - } - _ => panic!("Wrong event type"), - }; + let running_tasks = Arc::new(RunningTaskSet::new()); + let worker_workflow_cid = worker.workflow_info.cid; + worker.run(running_tasks.clone()).await.unwrap(); + assert_eq!(running_tasks.len(), 1); + assert!(running_tasks.contains_key(&worker_workflow_cid)); + assert_eq!(running_tasks.get(&worker_workflow_cid).unwrap().len(), 1); - assert!(rx.recv().await.is_none()); + // we should have received 1 receipt + let next_run_receipt = rx.recv().await.unwrap(); - let mut conn = db.conn().unwrap(); - let workflow_info = - test_utils::db::MemoryDb::get_workflow_info(workflow_cid, &mut conn).unwrap(); + let (_next_receipt, wf_info) = match next_run_receipt { + Event::CapturedReceipt(Captured { + receipt: next_receipt, + .. + }) => { + let mut info = workflow::Info::default(workflow_cid, 2); + info.increment_progress(next_receipt.cid()); - assert_eq!(workflow_info.num_tasks, 2); - assert_eq!(workflow_info.cid, workflow_cid); - assert_eq!(workflow_info.progress.len(), 2); - assert_eq!(wf_info.progress_count, 2); - assert_eq!(wf_info.progress_count, workflow_info.progress_count); - } + assert_ne!(next_receipt, receipt); + + (next_receipt, info) + } + _ => panic!("Wrong event type"), + }; + + assert!(rx.recv().await.is_none()); + + let mut conn = db.conn().unwrap(); + let workflow_info = MemoryDb::get_workflow_info(workflow_cid, &mut conn).unwrap(); + + assert_eq!(workflow_info.num_tasks, 2); + assert_eq!(workflow_info.cid, workflow_cid); + assert_eq!(workflow_info.progress.len(), 2); + assert_eq!(wf_info.progress_count, 2); + assert_eq!(wf_info.progress_count, workflow_info.progress_count); } - #[tokio::test] - async fn initialize_wroker_with_all_receipted_instruction() { + #[homestar_runtime_proc_macro::db_async_test] + fn initialize_worker_with_all_receipted_instruction() { + let settings = TestSettings::load(); + let config = Resources::default(); let (instruction1, instruction2, _) = workflow_test_utils::related_wasm_instructions::(); @@ -790,77 +850,42 @@ mod test { ) .unwrap(); - let db = test_utils::db::MemoryDb::setup_connection_pool(Settings::load().unwrap().node()) - .unwrap(); - let mut conn = db.conn().unwrap(); - - let rows_inserted = test_utils::db::MemoryDb::store_receipts( - vec![receipt1.clone(), receipt2.clone()], - &mut conn, - ) - .unwrap(); + let (tx, mut rx) = test_utils::event::setup_event_channel(settings.node.clone()); - assert_eq!(2, rows_inserted); + let builder = WorkerBuilder::new(settings.node) + .with_event_sender(tx.into()) + .with_tasks(vec![task1, task2]); + let db = builder.db(); + let workflow_cid = builder.workflow_cid(); - let workflow = Workflow::new(vec![task1.clone(), task2.clone()]); - let workflow_cid = workflow.clone().to_cid().unwrap(); - let workflow_settings = Arc::new(wf::Settings::default()); - let settings = Settings::load().unwrap(); + let mut index_map = IndexMap::new(); + index_map.insert( + instruction1.clone().to_cid().unwrap(), + Resource::Url(instruction1.resource().to_owned()), + ); + index_map.insert( + instruction2.clone().to_cid().unwrap(), + Resource::Url(instruction2.resource().to_owned()), + ); - // already have stored workflow information (from a previous run) - let _ = test_utils::db::MemoryDb::store_workflow( - workflow::Stored::new(Pointer::new(workflow_cid), workflow.len() as i32), - &mut conn, - ) - .unwrap(); - let _ = test_utils::db::MemoryDb::store_workflow_receipt( - workflow_cid, - receipt1.cid(), - &mut conn, - ) - .unwrap(); - let _ = test_utils::db::MemoryDb::store_workflow_receipt( - workflow_cid, - receipt2.cid(), + let mut conn = db.conn().unwrap(); + let _ = MemoryDb::store_workflow( + workflow::Stored::new_with_resources( + Pointer::new(workflow_cid), + builder.workflow_len() as i32, + IndexedResources::new(index_map), + ), &mut conn, - ) - .unwrap(); + ); - let (tx, mut rx) = test_utils::event::setup_channel(settings); + let rows_inserted = + MemoryDb::store_receipts(vec![receipt1.clone(), receipt2.clone()], &mut conn).unwrap(); + assert_eq!(2, rows_inserted); - #[cfg(feature = "ipfs")] - let ipfs = IpfsCli::default(); + let _ = MemoryDb::store_workflow_receipt(workflow_cid, receipt1.cid(), &mut conn).unwrap(); + let _ = MemoryDb::store_workflow_receipt(workflow_cid, receipt2.cid(), &mut conn).unwrap(); - let workflow_info = wf::Info::init( - workflow.clone(), - workflow_settings.p2p_timeout, - tx.clone().into(), - &mut conn, - ) - .await - .unwrap(); - - #[cfg(feature = "ipfs")] - let worker = Worker::new( - workflow, - workflow_info.into(), - workflow_settings, - tx.into(), - conn, - &ipfs, - ) - .await - .unwrap(); - #[cfg(not(feature = "ipfs"))] - let worker = Worker::new( - workflow, - workflow_info.into(), - workflow_settings, - tx.into(), - conn, - ) - .await - .unwrap(); + let worker = builder.build().await; assert_eq!(worker.scheduler.linkmap.read().await.len(), 1); assert!(!worker @@ -880,14 +905,24 @@ mod test { assert_eq!(worker.scheduler.resume_step, None); assert_eq!(worker.workflow_info.cid, workflow_cid); assert_eq!(worker.workflow_info.num_tasks, 2); + assert_eq!(worker.workflow_info.resources.len(), 2); + assert_eq!( + worker + .workflow_info + .resources + .rscs() + .collect::>() + .len(), + 1 + ); let mut conn = db.conn().unwrap(); - let workflow_info = - test_utils::db::MemoryDb::get_workflow_info(workflow_cid, &mut conn).unwrap(); + let workflow_info = MemoryDb::get_workflow_info(workflow_cid, &mut conn).unwrap(); assert_eq!(workflow_info.num_tasks, 2); assert_eq!(workflow_info.cid, workflow_cid); assert_eq!(workflow_info.progress.len(), 2); + assert!(rx.try_recv().is_err()) } } diff --git a/homestar-runtime/src/workflow.rs b/homestar-runtime/src/workflow.rs index 351ae197..8e467560 100644 --- a/homestar-runtime/src/workflow.rs +++ b/homestar-runtime/src/workflow.rs @@ -5,7 +5,16 @@ use crate::scheduler::ExecutionGraph; use anyhow::{anyhow, bail}; +use core::fmt; use dagga::{self, dot::DagLegend, Node}; +use diesel::{ + backend::Backend, + deserialize::{self, FromSql}, + serialize::{self, IsNull, Output, ToSql}, + sql_types::Binary, + sqlite::Sqlite, + AsExpression, FromSqlRow, +}; use homestar_core::{ workflow::{ input::{Parse, Parsed}, @@ -16,8 +25,11 @@ use homestar_core::{ }; use homestar_wasm::io::Arg; use indexmap::IndexMap; -use libipld::Cid; -use std::path::Path; +use itertools::Itertools; +use libipld::{cbor::DagCborCodec, cid::Cid, prelude::Codec, serde::from_ipld, Ipld}; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, path::Path}; +use strum::AsRefStr; use url::Url; mod info; @@ -37,7 +49,7 @@ pub struct Builder<'a>(Workflow<'a, Arg>); /// being accessed. /// /// [URI]: -#[derive(Debug, Clone, Eq, Hash, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq, AsRefStr, Hash, Serialize, Deserialize)] #[allow(dead_code)] pub(crate) enum Resource { /// Resource fetched by [Url]. @@ -46,6 +58,15 @@ pub(crate) enum Resource { Cid(Cid), } +impl fmt::Display for Resource { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Resource::Cid(cid) => write!(f, "{}", cid), + Resource::Url(ref url) => write!(f, "{}", url), + } + } +} + /// Ahead-of-time (AOT) context object, which includes the given /// [Workflow] as a executable [Dag] (directed acyclic graph) and /// the [Task] resources retrieved through IPFS Client or the DHT directly @@ -56,7 +77,7 @@ pub(crate) enum Resource { #[derive(Debug, Clone)] pub(crate) struct AOTContext<'a> { dag: Dag<'a>, - resources: Vec, + indexed_resources: IndexedResources, } impl AOTContext<'static> { @@ -122,7 +143,7 @@ impl<'a> Builder<'a> { match aot.dag.build_schedule() { Ok(schedule) => Ok(ExecutionGraph { schedule: schedule.batches, - resources: aot.resources, + indexed_resources: aot.indexed_resources, }), Err(e) => bail!("schedule could not be built from given workflow: {e}"), } @@ -133,9 +154,9 @@ impl<'a> Builder<'a> { let (dag, resources) = self.into_inner().tasks().into_iter().enumerate().try_fold( - (Dag::default(), vec![]), + (Dag::default(), IndexMap::new()), |(mut dag, mut resources), (i, task)| { - let instr_cid = task.instruction_cid()?.to_string(); + let instr_cid = task.instruction_cid()?; // Clone as we're owning the struct going backward. let ptr: Pointer = Invocation::::from(task.clone()).try_into()?; @@ -143,10 +164,9 @@ impl<'a> Builder<'a> { bail!("workflow tasks/instructions must be expanded / inlined") }; - // TODO: check if op is runnable on current node - // TODO LATER: check if op is registered on the network - - resources.push(Resource::Url(instr.resource().to_owned())); + resources + .entry(instr_cid) + .or_insert_with(|| Resource::Url(instr.resource().to_owned())); let parsed = instr.input().parse()?; let reads = parsed.args().deferreds().into_iter().fold( @@ -154,15 +174,15 @@ impl<'a> Builder<'a> { |mut in_flow_reads, cid| { if let Some(v) = lookup_table.get(&cid) { in_flow_reads.push(*v) - } else { - resources.push(Resource::Url(instr.resource().to_owned())); } + // TODO: else, it's a CID from another task outside + // of the workflow. in_flow_reads }, ); let node = Node::new(Vertex::new(instr.to_owned(), parsed, ptr)) - .with_name(instr_cid) + .with_name(instr_cid.to_string()) .with_result(i); dag.add_node(node.with_reads(reads)); @@ -170,7 +190,10 @@ impl<'a> Builder<'a> { }, )?; - Ok(AOTContext { dag, resources }) + Ok(AOTContext { + dag, + indexed_resources: IndexedResources(resources), + }) } /// Generate an [IndexMap] lookup table of task instruction CIDs to a @@ -187,6 +210,139 @@ impl<'a> Builder<'a> { } } +/// A container for [IndexMap]s from [Cid] => resource. +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize, AsExpression, FromSqlRow)] +#[diesel(sql_type = Binary)] +pub struct IndexedResources(IndexMap); + +impl IndexedResources { + /// Create a new [IndexedResources] container from an [IndexMap] of + /// [Resource]s. + #[allow(dead_code)] + pub(crate) fn new(map: IndexMap) -> IndexedResources { + IndexedResources(map) + } + + /// Reutrn a referenced [IndexMap] of [Resource]s. + #[allow(dead_code)] + pub(crate) fn inner(&self) -> &IndexMap { + &self.0 + } + + /// Return an owned [IndexMap] of [Resource]s. + #[allow(dead_code)] + pub(crate) fn into_inner(self) -> IndexMap { + self.0 + } + + /// Get length of [IndexedResources]. + #[allow(dead_code)] + pub(crate) fn len(&self) -> usize { + self.0.len() + } + + /// Check if [IndexedResources] is empty. + #[allow(dead_code)] + pub(crate) fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Get a [Resource] by [Instruction] [Cid]. + /// + /// [Instruction]: homestar_core::workflow::Instruction + #[allow(dead_code)] + pub(crate) fn get(&self, cid: &Cid) -> Option<&Resource> { + self.0.get(cid) + } + + /// Iterate over all [Resource]s as references. + #[allow(dead_code)] + pub(crate) fn rscs(&self) -> impl Iterator { + self.0.values().dedup() + } + + /// Iterate over all [Resource]s. + #[allow(dead_code)] + pub(crate) fn into_rscs(self) -> impl Iterator { + self.0.into_values().dedup() + } +} + +impl From for Ipld { + fn from(resources: IndexedResources) -> Self { + let btreemap: BTreeMap = resources + .0 + .into_iter() + .map(|(k, v)| { + ( + k.to_string(), + match v { + Resource::Url(url) => Ipld::String(url.to_string()), + Resource::Cid(cid) => Ipld::Link(cid), + }, + ) + }) + .collect(); + Ipld::Map(btreemap) + } +} + +impl TryFrom for IndexedResources { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)? + .into_iter() + .map(|(k, v)| { + let cid = Cid::try_from(k)?; + let resource = match v { + Ipld::String(url) => Resource::Url(Url::parse(&url)?), + Ipld::Link(cid) => Resource::Cid(cid), + _ => bail!("invalid resource type"), + }; + + Ok((cid, resource)) + }) + .collect::, anyhow::Error>>()?; + + Ok(IndexedResources(map)) + } +} + +impl TryFrom for Vec { + type Error = anyhow::Error; + + fn try_from(resources: IndexedResources) -> Result { + let ipld = Ipld::from(resources); + DagCborCodec.encode(&ipld) + } +} + +impl ToSql for IndexedResources +where + [u8]: ToSql, +{ + fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Sqlite>) -> serialize::Result { + let bytes: Vec = self.to_owned().try_into()?; + out.set_value(bytes); + Ok(IsNull::No) + } +} + +impl FromSql for IndexedResources +where + DB: Backend, + *const [u8]: FromSql, +{ + fn from_sql(bytes: DB::RawValue<'_>) -> deserialize::Result { + let raw_bytes = <*const [u8] as FromSql>::from_sql(bytes)?; + let raw_bytes: &[u8] = unsafe { &*raw_bytes }; + let ipld: Ipld = DagCborCodec.decode(raw_bytes)?; + let decoded: IndexedResources = ipld.try_into()?; + Ok(decoded) + } +} + #[cfg(test)] mod test { use super::*; @@ -245,7 +401,9 @@ mod test { let instr1 = task1.instruction_cid().unwrap().to_string(); let instr2 = task2.instruction_cid().unwrap().to_string(); - dagga::assert_batches(&[format!("{instr2}, {instr1}").as_str()], dag); + assert!(dag + .nodes() + .any(|node| node.name() == instr1 || node.name() == instr2)); } #[test] diff --git a/homestar-runtime/src/workflow/info.rs b/homestar-runtime/src/workflow/info.rs index 72b90037..61c8d498 100644 --- a/homestar-runtime/src/workflow/info.rs +++ b/homestar-runtime/src/workflow/info.rs @@ -1,3 +1,4 @@ +use super::IndexedResources; use crate::{ db::{Connection, Database}, event_handler::{ @@ -10,41 +11,86 @@ use crate::{ Db, Receipt, }; use anyhow::{anyhow, bail, Context, Result}; +use chrono::{NaiveDateTime, Utc}; use diesel::{Associations, Identifiable, Insertable, Queryable, Selectable}; -use homestar_core::{ipld::DagCbor, workflow::Pointer, Workflow}; -use homestar_wasm::io::Arg; +use homestar_core::{ipld::DagJson, workflow::Pointer}; use libipld::{cbor::DagCborCodec, prelude::Codec, serde::from_ipld, Cid, Ipld}; +use serde::{Deserialize, Serialize}; use std::{ collections::BTreeMap, sync::Arc, time::{Duration, Instant}, }; -use tokio::sync::mpsc; +use tokio::{runtime::Handle, sync::mpsc}; use tracing::info; /// [Workflow] header tag, for sharing workflow information over libp2p. /// -/// [Workflow]: Workflow +/// [Workflow]: homestar_core::Workflow pub const WORKFLOW_TAG: &str = "ipvm/workflow"; const CID_KEY: &str = "cid"; +const NUM_TASKS_KEY: &str = "num_tasks"; const PROGRESS_KEY: &str = "progress"; const PROGRESS_COUNT_KEY: &str = "progress_count"; -const NUM_TASKS_KEY: &str = "num_tasks"; +const RESOURCES_KEY: &str = "resources"; /// [Workflow] information stored in the database. /// /// [Workflow]: homestar_core::Workflow -#[derive(Debug, Clone, PartialEq, Queryable, Insertable, Identifiable, Selectable, Hash)] +#[derive(Debug, Clone, PartialEq, Queryable, Insertable, Identifiable, Selectable)] #[diesel(table_name = crate::db::schema::workflows, primary_key(cid))] pub struct Stored { pub(crate) cid: Pointer, pub(crate) num_tasks: i32, + pub(crate) resources: IndexedResources, + pub(crate) created_at: NaiveDateTime, + pub(crate) completed_at: Option, } impl Stored { - pub fn new(cid: Pointer, num_tasks: i32) -> Self { - Self { cid, num_tasks } + /// Create a new [Stored] workflow for the [db]. + /// + /// [db]: Database + pub fn new( + cid: Pointer, + num_tasks: i32, + resources: IndexedResources, + created_at: NaiveDateTime, + ) -> Self { + Self { + cid, + num_tasks, + resources, + created_at, + completed_at: None, + } + } + + /// Create a new [Stored] workflow for the [db] with a default timestamp. + /// + /// [db]: Database + pub fn new_with_resources(cid: Pointer, num_tasks: i32, resources: IndexedResources) -> Self { + Self { + cid, + num_tasks, + resources, + created_at: Utc::now().naive_utc(), + completed_at: None, + } + } + + /// Create a default [Stored] workflow for the [db]. + /// + /// [db]: Database + pub fn default(cid: Pointer, num_tasks: i32) -> Self { + Self { + cid, + num_tasks, + resources: IndexedResources::default(), + created_at: Utc::now().naive_utc(), + completed_at: None, + } } } @@ -77,34 +123,49 @@ impl StoredReceipt { /// cid => [Info]. /// /// [Workflow]: homestar_core::Workflow -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Info { pub(crate) cid: Cid, + pub(crate) num_tasks: u32, pub(crate) progress: Vec, pub(crate) progress_count: u32, - pub(crate) num_tasks: u32, + pub(crate) resources: IndexedResources, } impl Info { - /// Create a new [Info] given a [Cid], progress / step, and number - /// of tasks. - pub fn new(cid: Cid, progress: Vec, num_tasks: u32) -> Self { + /// Create a new workflow set of [Info] given a [Cid], progress / step, + /// [IndexedResources], and number of tasks. + pub fn new(cid: Cid, num_tasks: u32, progress: Vec, resources: IndexedResources) -> Self { let progress_count = progress.len() as u32; Self { cid, + num_tasks, progress, progress_count, - num_tasks, + resources, } } - /// Create a default [Info] given a [Cid] and number of tasks. + /// Create a default workflow [Info] given a [Cid] and number of tasks. pub fn default(cid: Cid, num_tasks: u32) -> Self { Self { cid, + num_tasks, progress: vec![], progress_count: 0, + resources: IndexedResources::default(), + } + } + + /// Create a default workflow [Info] given a [Cid], number of tasks, + /// and [IndexedResources]. + pub fn default_with_resources(cid: Cid, num_tasks: u32, resources: IndexedResources) -> Self { + Self { + cid, num_tasks, + progress: vec![], + progress_count: 0, + resources, } } @@ -130,8 +191,19 @@ impl Info { self.cid().to_bytes() } + /// Set map of [Instruction] [Cid]s to resources. + /// + /// [Instruction]: homestar_core::workflow::Instruction + #[allow(dead_code)] + pub(crate) fn set_resources(&mut self, resources: IndexedResources) { + self.resources = resources; + } + /// Set the progress / step of the [Workflow] completed, which /// may not be the same as the `progress` vector of [Cid]s. + /// + /// [Workflow]: homestar_core::Workflow + #[allow(dead_code)] pub(crate) fn set_progress_count(&mut self, progress_count: u32) { self.progress_count = progress_count; } @@ -164,43 +236,50 @@ impl Info { /// [Workflow], or return a default/new version of [Info] if none is found. /// /// [Gather]: Self::gather - #[allow(dead_code)] - pub(crate) async fn init<'a>( - workflow: Workflow<'a, Arg>, + /// [Workflow]: homestar_core::Workflow + pub(crate) async fn init( + workflow_cid: Cid, + workflow_len: u32, + resources: IndexedResources, p2p_timeout: Duration, event_sender: Arc>, - conn: &'a mut Connection, - ) -> Result { - let workflow_len = workflow.len(); - let workflow_cid = workflow.to_cid()?; - - let handle_timeout_fn = |workflow_cid, reused_conn: Option<&'a mut Connection>| { - let workflow_info = Self::default(workflow_cid, workflow_len); - // store workflow from info - - match reused_conn.and_then(|conn| { - Db::store_workflow( + mut conn: Connection, + ) -> Result<(Self, NaiveDateTime)> { + let timestamp = Utc::now().naive_utc(); + match Db::get_workflow_info(workflow_cid, &mut conn) { + Ok(info) => Ok((info, timestamp)), + Err(_err) => { + info!( + cid = workflow_cid.to_string(), + "workflow information not available in the database" + ); + let result = Db::store_workflow( Stored::new( - Pointer::new(workflow_info.cid), - workflow_info.num_tasks as i32, + Pointer::new(workflow_cid), + workflow_len as i32, + resources, + timestamp, ), - conn, - ) - .ok() - }) { - Some(_) => Ok(workflow_info), - None => bail!("failed to store workflow"), - } - }; + &mut conn, + )?; - Self::gather( - workflow_cid, - p2p_timeout, - event_sender, - Some(conn), - handle_timeout_fn, - ) - .await + let workflow_info = + Self::default_with_resources(workflow_cid, workflow_len, result.resources); + + // spawn a task to retrieve the workflow info from the + // network and store it in the database if it finds it. + let handle = Handle::current(); + handle.spawn(Self::retrieve_from_query( + workflow_cid, + p2p_timeout, + event_sender, + Some(conn), + None::) -> Result>, + )); + + Ok((workflow_info, timestamp)) + } + } } /// Gather available [Info] from the database or [libp2p] given a @@ -209,50 +288,9 @@ impl Info { workflow_cid: Cid, p2p_timeout: Duration, event_sender: Arc>, - mut conn: Option<&'a mut Connection>, - handle_timeout_fn: impl FnOnce(Cid, Option<&'a mut Connection>) -> Result, + mut conn: Option, + handle_timeout_fn: Option) -> Result>, ) -> Result { - async fn retrieve_from_query<'a>( - workflow_cid: Cid, - p2p_timeout: Duration, - event_sender: Arc>, - conn: Option<&'a mut Connection>, - handle_timeout_fn: impl FnOnce(Cid, Option<&'a mut Connection>) -> Result, - ) -> Result { - let (tx, rx) = BoundedChannel::oneshot(); - event_sender.try_send(Event::FindRecord(QueryRecord::with( - workflow_cid, - CapsuleTag::Workflow, - tx, - )))?; - - match rx.recv_deadline(Instant::now() + p2p_timeout) { - Ok(ResponseEvent::Found(Ok(FoundEvent::Workflow(workflow_info)))) => { - // store workflow from info - if let Some(conn) = conn { - Db::store_workflow( - Stored::new( - Pointer::new(workflow_info.cid), - workflow_info.num_tasks as i32, - ), - conn, - )?; - - Db::store_workflow_receipts(workflow_cid, &workflow_info.progress, conn)?; - } - - Ok(workflow_info) - } - Ok(ResponseEvent::Found(Err(err))) => { - bail!("failure in attempting to find event: {err}") - } - Ok(event) => { - bail!("received unexpected event {event:?} for workflow {workflow_cid}") - } - Err(err) => handle_timeout_fn(workflow_cid, conn).context(err), - } - } - let workflow_info = match conn .as_mut() .and_then(|conn| Db::get_workflow_info(workflow_cid, conn).ok()) @@ -264,7 +302,7 @@ impl Info { "workflow information not available in the database" ); - retrieve_from_query( + Self::retrieve_from_query( workflow_cid, p2p_timeout, event_sender, @@ -277,12 +315,52 @@ impl Info { Ok(workflow_info) } + + async fn retrieve_from_query<'a>( + workflow_cid: Cid, + p2p_timeout: Duration, + event_sender: Arc>, + conn: Option, + handle_timeout_fn: Option) -> Result>, + ) -> Result { + let (tx, rx) = BoundedChannel::oneshot(); + event_sender.try_send(Event::FindRecord(QueryRecord::with( + workflow_cid, + CapsuleTag::Workflow, + tx, + )))?; + + match rx.recv_deadline(Instant::now() + p2p_timeout) { + Ok(ResponseEvent::Found(Ok(FoundEvent::Workflow(workflow_info)))) => { + // store workflow receipts from info, as we've already stored + // the static information. + if let Some(mut conn) = conn { + Db::store_workflow_receipts(workflow_cid, &workflow_info.progress, &mut conn)?; + } + + Ok(workflow_info) + } + Ok(ResponseEvent::Found(Err(err))) => { + bail!("failure in attempting to find event: {err}") + } + Ok(event) => { + bail!("received unexpected event {event:?} for workflow {workflow_cid}") + } + Err(err) => handle_timeout_fn + .map(|f| f(workflow_cid, conn).context(err)) + .unwrap_or(Err(err.into())), + } + } } impl From for Ipld { fn from(workflow: Info) -> Self { Ipld::Map(BTreeMap::from([ (CID_KEY.into(), Ipld::Link(workflow.cid)), + ( + NUM_TASKS_KEY.into(), + Ipld::Integer(workflow.num_tasks as i128), + ), ( PROGRESS_KEY.into(), Ipld::List(workflow.progress.into_iter().map(Ipld::Link).collect()), @@ -291,10 +369,7 @@ impl From for Ipld { PROGRESS_COUNT_KEY.into(), Ipld::Integer(workflow.progress_count as i128), ), - ( - NUM_TASKS_KEY.into(), - Ipld::Integer(workflow.num_tasks as i128), - ), + (RESOURCES_KEY.into(), Ipld::from(workflow.resources)), ])) } } @@ -309,6 +384,11 @@ impl TryFrom for Info { .ok_or_else(|| anyhow!("no `cid` set"))? .to_owned(), )?; + let num_tasks = from_ipld( + map.get(NUM_TASKS_KEY) + .ok_or_else(|| anyhow!("no `num_tasks` set"))? + .to_owned(), + )?; let progress = from_ipld( map.get(PROGRESS_KEY) .ok_or_else(|| anyhow!("no `progress` set"))? @@ -319,17 +399,18 @@ impl TryFrom for Info { .ok_or_else(|| anyhow!("no `progress_count` set"))? .to_owned(), )?; - let num_tasks = from_ipld( - map.get(NUM_TASKS_KEY) - .ok_or_else(|| anyhow!("no `num_tasks` set"))? + let resources = from_ipld( + map.get(RESOURCES_KEY) + .ok_or_else(|| anyhow!("no `resources` set"))? .to_owned(), )?; Ok(Self { cid, + num_tasks, progress, progress_count, - num_tasks, + resources, }) } } @@ -352,6 +433,8 @@ impl TryFrom> for Info { } } +impl DagJson for Info where Ipld: From {} + #[cfg(test)] mod test { use super::*; diff --git a/homestar-runtime/src/workflow/settings.rs b/homestar-runtime/src/workflow/settings.rs index 27438803..4f6da295 100644 --- a/homestar-runtime/src/workflow/settings.rs +++ b/homestar-runtime/src/workflow/settings.rs @@ -13,9 +13,10 @@ pub struct Settings { pub(crate) retry_initial_delay: Duration, pub(crate) p2p_check_timeout: Duration, pub(crate) p2p_timeout: Duration, + pub(crate) timeout: Duration, } -#[cfg(not(test))] +#[cfg(all(not(test), not(feature = "test-utils")))] impl Default for Settings { fn default() -> Self { Self { @@ -25,11 +26,12 @@ impl Default for Settings { retry_initial_delay: Duration::from_millis(500), p2p_check_timeout: Duration::new(5, 0), p2p_timeout: Duration::new(60, 0), + timeout: Duration::new(3600, 0), } } } -#[cfg(test)] +#[cfg(any(test, feature = "test-utils"))] impl Default for Settings { fn default() -> Self { Self { @@ -37,8 +39,9 @@ impl Default for Settings { retry_backoff_strategy: BackoffStrategy::Exponential, retry_max_delay: Duration::new(1, 0), retry_initial_delay: Duration::from_millis(50), - p2p_check_timeout: Duration::new(1, 0), - p2p_timeout: Duration::new(1, 0), + p2p_check_timeout: Duration::from_millis(10), + p2p_timeout: Duration::from_millis(10), + timeout: Duration::from_secs(120), } } } diff --git a/homestar-runtime/tests/cli.rs b/homestar-runtime/tests/cli.rs index bbe741b7..33cfadfb 100644 --- a/homestar-runtime/tests/cli.rs +++ b/homestar-runtime/tests/cli.rs @@ -182,8 +182,80 @@ fn test_server_serial() -> Result<()> { Ok(()) } +#[cfg(feature = "test-utils")] #[test] #[serial] +fn test_workflow_run_serial() -> Result<()> { + let _ = stop_bin(); + + let mut homestar_proc = Command::new(BIN.as_os_str()) + .arg("start") + .arg("--db") + .arg("homestar.db") + //.stdout(Stdio::piped()) + .spawn() + .unwrap(); + + let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 3030); + let result = retry(Fixed::from_millis(500), || { + TcpStream::connect(socket).map(|stream| stream.shutdown(Shutdown::Both)) + }); + + if result.is_err() { + homestar_proc.kill().unwrap(); + panic!("Homestar server/runtime failed to start in time"); + } + + Command::new(BIN.as_os_str()) + .arg("run") + .arg("-w") + .arg("./fixtures/test-workflow-add-one.json") + .assert() + .success() + .stdout(predicate::str::contains( + "bafyrmibcfltf6vhtfdson5z4av4r4wg3rccpt4hxajt54msacojeecazqy", + )) + .stdout(predicate::str::contains( + "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4", + )) + .stdout(predicate::str::contains("num_tasks")) + .stdout(predicate::str::contains("progress_count")); + + // run another one of the same! + Command::new(BIN.as_os_str()) + .arg("run") + .arg("-w") + .arg("./fixtures/test-workflow-add-one.json") + .assert() + .success() + .stdout(predicate::str::contains( + "bafyrmibcfltf6vhtfdson5z4av4r4wg3rccpt4hxajt54msacojeecazqy", + )) + .stdout(predicate::str::contains( + "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4", + )) + .stdout(predicate::str::contains("num_tasks")) + .stdout(predicate::str::contains("progress_count")); + + let _ = Command::new(BIN.as_os_str()).arg("stop").output(); + + if let Ok(None) = homestar_proc.try_wait() { + let _status_code = match homestar_proc.wait_timeout(Duration::from_secs(1)).unwrap() { + Some(status) => status.code(), + None => { + homestar_proc.kill().unwrap(); + homestar_proc.wait().unwrap().code() + } + }; + } + let _ = stop_bin(); + + Ok(()) +} + +#[test] +#[serial] +#[cfg(not(windows))] fn test_daemon_serial() -> Result<()> { let _ = stop_bin(); @@ -226,8 +298,13 @@ fn test_daemon_serial() -> Result<()> { .stdout(predicate::str::contains("pong")); let _result = signal::kill(Pid::from_raw(pid.try_into().unwrap()), Signal::SIGTERM); + let _result = retry(Fixed::from_millis(500), || { + Command::new(BIN.as_os_str()) + .arg("ping") + .assert() + .try_failure() + }); - Command::new(BIN.as_os_str()).arg("ping").assert().failure(); let _ = stop_bin(); Ok(()) diff --git a/homestar-wasm/Cargo.toml b/homestar-wasm/Cargo.toml index a15bea08..82c4d9c1 100644 --- a/homestar-wasm/Cargo.toml +++ b/homestar-wasm/Cargo.toml @@ -23,28 +23,26 @@ doctest = true # https://github.com/DevinR528/cargo-sort/issues/47 anyhow = { workspace = true } async-trait = { workspace = true } -atomic_refcell = "0.1" +atomic_refcell = { workspace = true } enum-as-inner = { workspace = true } heck = "0.4" homestar-core = { version = "0.1", path = "../homestar-core" } -itertools = "0.11" +itertools = { workspace = true } libipld = { workspace = true } rust_decimal = "1.31" stacker = "0.1" thiserror = { workspace = true } tracing = { workspace = true } -wasi-common = "10.0" +wasi-common = "11.0" wasmparser = "0.110" -#wasmtime = { version = "10.0", features = ["async", "component-model", "default"] } -wasmtime = { version = "10.0", features = ["async", "component-model", "default"] } +wasmtime = { version = "11.0", features = ["async", "component-model", "default"] } wasmtime-component-util = "11.0" -wasmtime-wasi = { version = "10.0", features = ["tokio"] } +wasmtime-wasi = { version = "11.0", features = ["tokio"] } wat = "1.0" wit-component = "0.13" [dev-dependencies] criterion = "0.5" -image = "0.24" serde_ipld_dagcbor = { workspace = true } tokio = { workspace = true } diff --git a/homestar-wasm/src/error.rs b/homestar-wasm/src/error.rs index 18a328d0..f8a948b0 100644 --- a/homestar-wasm/src/error.rs +++ b/homestar-wasm/src/error.rs @@ -11,39 +11,39 @@ pub enum InterpreterError { /// /// [Cid]: libipld::cid::Cid #[error("failed to encode CID: {0}")] - CidError(#[from] libipld::cid::Error), + CidEncode(#[from] libipld::cid::Error), /// Error converting from [Decimal] precision to [f64]. /// /// [Decimal]: rust_decimal::Decimal /// [f64]: f64 #[error("Failed to convert from decimal to f64 float {0}")] - DecimalToFloatError(rust_decimal::Decimal), + DecimalToFloat(rust_decimal::Decimal), /// Error converting from from [f32] to [Decimal]. /// /// [Decimal]: rust_decimal::Decimal #[error("Failed to convert from f32 float {0} to decimal")] - FloatToDecimalError(f32), + FloatToDecimal(f32), /// Error converting from [Ipld] structure. /// /// [Ipld]: libipld::Ipld #[error("cannot convert from Ipld structure: {0}")] - FromIpldError(#[from] libipld::error::SerdeError), + FromIpld(#[from] libipld::error::SerdeError), /// Error casting from [Ipld] [i128] structure to a lower precision integer. /// /// [Ipld]: libipld::Ipld #[error("failed to cast Ipld i128 to integer type: {0}")] - IpldToIntError(#[from] std::num::TryFromIntError), + IpldToInt(#[from] std::num::TryFromIntError), /// Error converting from [Ipld] structure to [Wit] structure. /// /// [Ipld]: libipld::Ipld /// [Wit]: wasmtime::component::Val #[error("no compatible Ipld type for Wit structure: {0:#?}")] - IpldToWitError(String), + IpldToWit(String), /// Error involving mismatches with [Ipld] maps. /// /// [Ipld]: libipld::Ipld #[error("{0}")] - MapTypeError(String), + MapType(String), /// Failure to match or find [Wit union] discriminant. /// /// [Wit union]: wasmtime::component::Union @@ -51,10 +51,10 @@ pub enum InterpreterError { NoDiscriminantMatched(String), /// Bubble-up [TagsError] errors while executing the interpreter. #[error(transparent)] - TagsError(#[from] TagsError), + Tags(#[from] TagsError), /// Type mismatch error between expected and given types. #[error("component type mismatch: expected: {expected}, found: {given:#?}")] - TypeMismatchError { + TypeMismatch { /// Expected type. expected: String, /// Given type or lack thereof. @@ -66,13 +66,13 @@ pub enum InterpreterError { /// The underlying error is a [anyhow::Error], per the /// [wasmtime::component::types::Type] implementation. #[error(transparent)] - WitTypeError(#[from] anyhow::Error), + WitType(#[from] anyhow::Error), /// Error converting from [Wit] structure to [Ipld] structure. /// /// [Ipld]: libipld::Ipld /// [Wit]: wasmtime::component::Val #[error("no compatible WIT type for Ipld structure: {0:#?}")] - WitToIpldError(libipld::Ipld), + WitToIpld(libipld::Ipld), } /// Error type for handling [Tags] stack structure. @@ -83,25 +83,25 @@ pub enum InterpreterError { pub enum TagsError { /// An error returned by [atomic_refcell::AtomicRefCell::try_borrow]. #[error("{0}")] - BorrowError(atomic_refcell::BorrowError), + Borrow(atomic_refcell::BorrowError), /// An error returned by [atomic_refcell::AtomicRefCell::try_borrow_mut]. #[error("{0}")] - BorrowMutError(atomic_refcell::BorrowMutError), + BorrowMut(atomic_refcell::BorrowMutError), /// Working with [Tags] stack structure should never be empty. /// /// [Tags]: crate::wasmtime::ipld::Tags #[error("structure must contain at least one element")] - TagsEmptyError, + TagsEmpty, } impl From for TagsError { fn from(e: atomic_refcell::BorrowError) -> Self { - TagsError::BorrowError(e) + TagsError::Borrow(e) } } impl From for TagsError { fn from(e: atomic_refcell::BorrowMutError) -> Self { - TagsError::BorrowMutError(e) + TagsError::BorrowMut(e) } } diff --git a/homestar-wasm/src/io.rs b/homestar-wasm/src/io.rs index 735e2cab..f0cb457a 100644 --- a/homestar-wasm/src/io.rs +++ b/homestar-wasm/src/io.rs @@ -61,11 +61,11 @@ impl input::Parse for Input { let map = from_ipld::>(ipld.to_owned())?; let func = map.get("func").ok_or_else(|| { - InputParseError::WorkflowError(WorkflowError::MissingFieldError("func".to_string())) + InputParseError::Workflow(WorkflowError::MissingField("func".to_string())) })?; let wasm_args = map.get("args").ok_or_else(|| { - InputParseError::WorkflowError(WorkflowError::MissingFieldError("args".to_string())) + InputParseError::Workflow(WorkflowError::MissingField("args".to_string())) })?; let args: Args = wasm_args.to_owned().try_into()?; diff --git a/homestar-wasm/src/wasmtime/error.rs b/homestar-wasm/src/wasmtime/error.rs index 217bedf2..e346464e 100644 --- a/homestar-wasm/src/wasmtime/error.rs +++ b/homestar-wasm/src/wasmtime/error.rs @@ -8,43 +8,43 @@ pub enum Error { /// Failure to convert from Wasm binary into Wasm component. #[error("cannot convert from binary structure to Wasm component")] - IntoWasmComponentError(#[source] anyhow::Error), + IntoWasmComponent(#[source] anyhow::Error), /// Bubble-up [ResolveError]s for [Cid]s still awaiting resolution. /// /// [ResolveError]: homestar_core::workflow::error::ResolveError /// [Cid]: libipld::Cid #[error(transparent)] - PromiseError(#[from] homestar_core::workflow::error::ResolveError), + ResolvePromise(#[from] homestar_core::workflow::error::ResolveError), /// Generic unknown error. #[error("unknown error")] - UnknownError, + Unknown, /// Failure to instantiate Wasm component and its host bindings. #[error("bindings not yet instantiated for wasm environment")] - WasmInstantiationError, + WasmInstantiation, /// Failure to parse Wasm binary. /// /// Transparently forwards from [wasmparser::BinaryReaderError]'s `source` /// and `Display` methods through to an underlying error. #[error(transparent)] - WasmParserError(#[from] wasmparser::BinaryReaderError), + WasmParser(#[from] wasmparser::BinaryReaderError), /// Generic [wasmtime] runtime error. /// /// Transparently forwards from [anyhow::Error]'s `source` and /// `Display` methods through to an underlying error. #[error(transparent)] - WasmRuntimeError(#[from] anyhow::Error), + WasmRuntime(#[from] anyhow::Error), /// Failure to find Wasm function for execution. #[error("Wasm function {0} not found")] - WasmFunctionNotFoundError(String), + WasmFunctionNotFound(String), /// [Wat] as Wasm component error. /// /// [Wat]: wat #[error("{0}")] - WatComponentError(String), + WatComponent(String), /// [wat]-related error. /// /// Transparently forwards from [wat::Error]'s `source` /// and `Display` methods through to an underlying error. #[error(transparent)] - WatError(#[from] wat::Error), + Wat(#[from] wat::Error), } diff --git a/homestar-wasm/src/wasmtime/ipld.rs b/homestar-wasm/src/wasmtime/ipld.rs index 25ba6de8..d5f0c942 100644 --- a/homestar-wasm/src/wasmtime/ipld.rs +++ b/homestar-wasm/src/wasmtime/ipld.rs @@ -122,7 +122,7 @@ impl Tags { fn pop(&self) -> Result { self.try_borrow_mut()? .pop_front() - .ok_or(TagsError::TagsEmptyError) + .ok_or(TagsError::TagsEmpty) } fn empty(&self) -> bool { @@ -184,18 +184,19 @@ impl RuntimeVal { Ipld::Map(mut v) if matches!(interface_ty.inner(), Some(Type::Union(_))) && v.len() == 1 => { - let inner = interface_ty.inner().ok_or_else(|| { - InterpreterError::TypeMismatchError { - expected: "".to_string(), - given: interface_ty.inner().map(|t| format!("{t:#?}")), - } - })?; + let inner = + interface_ty + .inner() + .ok_or_else(|| InterpreterError::TypeMismatch { + expected: "".to_string(), + given: interface_ty.inner().map(|t| format!("{t:#?}")), + })?; // already pattern matched against let union_inst = inner.unwrap_union(); let (key, elem) = v.pop_first().ok_or_else(|| { - InterpreterError::MapTypeError( + InterpreterError::MapType( "IPLD map must contain at least one discriminant".to_string(), ) })?; @@ -234,12 +235,13 @@ impl RuntimeVal { RuntimeVal::new_with_tags(discriminant?, tags) } v if matches!(interface_ty.inner(), Some(Type::Union(_))) => { - let inner = interface_ty.inner().ok_or_else(|| { - InterpreterError::TypeMismatchError { - expected: "".to_string(), - given: interface_ty.inner().map(|t| format!("{t:#?}")), - } - })?; + let inner = + interface_ty + .inner() + .ok_or_else(|| InterpreterError::TypeMismatch { + expected: "".to_string(), + given: interface_ty.inner().map(|t| format!("{t:#?}")), + })?; // already pattern matched against let union_inst = inner.unwrap_union(); @@ -280,13 +282,13 @@ impl RuntimeVal { InterfaceType::Type(Type::String) | InterfaceType::TypeRef(Type::String) | InterfaceType::Any => RuntimeVal::new(Val::String(Box::from("null"))), - _ => Err(InterpreterError::WitToIpldError(Ipld::Null))?, + _ => Err(InterpreterError::WitToIpld(Ipld::Null))?, }, Ipld::Bool(v) => match interface_ty { InterfaceType::Type(Type::Bool) | InterfaceType::TypeRef(Type::Bool) | InterfaceType::Any => RuntimeVal::new(Val::Bool(v)), - _ => Err(InterpreterError::WitToIpldError(Ipld::Bool(v)))?, + _ => Err(InterpreterError::WitToIpld(Ipld::Bool(v)))?, }, Ipld::Integer(v) => match interface_ty { InterfaceType::Type(Type::U8) | InterfaceType::TypeRef(Type::U8) => { @@ -313,7 +315,7 @@ impl RuntimeVal { InterfaceType::Any | InterfaceType::Type(Type::S64) | InterfaceType::TypeRef(Type::S64) => RuntimeVal::new(Val::S64(v.try_into()?)), - _ => Err(InterpreterError::WitToIpldError(Ipld::Integer(v)))?, + _ => Err(InterpreterError::WitToIpld(Ipld::Integer(v)))?, }, Ipld::Float(v) => match interface_ty { InterfaceType::Type(Type::Float32) | InterfaceType::TypeRef(Type::Float32) => { @@ -323,12 +325,13 @@ impl RuntimeVal { }, Ipld::String(v) => RuntimeVal::new(Val::String(Box::from(v))), Ipld::Bytes(v) if matches!(interface_ty.inner(), Some(Type::List(_))) => { - let inner = interface_ty.inner().ok_or_else(|| { - InterpreterError::TypeMismatchError { - expected: ">".to_string(), - given: interface_ty.inner().map(|t| format!("{t:#?}")), - } - })?; + let inner = + interface_ty + .inner() + .ok_or_else(|| InterpreterError::TypeMismatch { + expected: ">".to_string(), + given: interface_ty.inner().map(|t| format!("{t:#?}")), + })?; // already pattern matched against let list_inst = inner.unwrap_list(); @@ -351,12 +354,13 @@ impl RuntimeVal { ))), }, Ipld::List(v) if matches!(interface_ty.inner(), Some(Type::List(_))) => { - let inner = interface_ty.inner().ok_or_else(|| { - InterpreterError::TypeMismatchError { - expected: "".to_string(), - given: interface_ty.inner().map(|t| format!("{t:#?}")), - } - })?; + let inner = + interface_ty + .inner() + .ok_or_else(|| InterpreterError::TypeMismatch { + expected: "".to_string(), + given: interface_ty.inner().map(|t| format!("{t:#?}")), + })?; // already pattern matched against let list_inst = inner.unwrap_list(); @@ -383,30 +387,31 @@ impl RuntimeVal { .into_inner()?, Ipld::Map(v) => { - let inner = interface_ty.inner().ok_or_else(|| { - InterpreterError::TypeMismatchError { - expected: "".to_string(), - given: interface_ty.inner().map(|t| format!("{t:#?}")), - } - })?; + let inner = + interface_ty + .inner() + .ok_or_else(|| InterpreterError::TypeMismatch { + expected: "".to_string(), + given: interface_ty.inner().map(|t| format!("{t:#?}")), + })?; let list_inst = matches!(inner, Type::List(_)) .then_some(inner.unwrap_list()) - .ok_or_else(|| InterpreterError::TypeMismatchError { + .ok_or_else(|| InterpreterError::TypeMismatch { expected: "".to_string(), given: Some(format!("{inner:#?}")), })?; let tuple_inst = matches!(list_inst.ty(), Type::Tuple(_)) .then_some(list_inst.ty().unwrap_tuple()) - .ok_or_else(|| InterpreterError::TypeMismatchError { + .ok_or_else(|| InterpreterError::TypeMismatch { expected: "".to_string(), given: Some(format!("{inner:#?}")), })? .to_owned(); let ty = tuple_inst.types().nth(1).ok_or_else(|| { - InterpreterError::MapTypeError( + InterpreterError::MapType( "IPLD map must have tuples of two elements".to_string(), ) })?; @@ -475,11 +480,11 @@ impl TryFrom for Ipld { RuntimeVal(Val::Float32(v), _) => { // Convert to decimal for handling precision issues going from // f32 => f64. - let dec = Decimal::from_f32(v) - .ok_or_else(|| InterpreterError::FloatToDecimalError(v))?; + let dec = + Decimal::from_f32(v).ok_or_else(|| InterpreterError::FloatToDecimal(v))?; Ipld::Float( dec.to_f64() - .ok_or_else(|| InterpreterError::DecimalToFloatError(dec))?, + .ok_or_else(|| InterpreterError::DecimalToFloat(dec))?, ) } RuntimeVal(Val::Float64(v), _) => Ipld::Float(v), @@ -495,13 +500,13 @@ impl TryFrom for Ipld { acc.insert(s.to_string(), ipld); Ok::<_, Self::Error>(acc) } else { - Err(InterpreterError::TypeMismatchError { + Err(InterpreterError::TypeMismatch { expected: " of (, <&wasmtime::Val>)".to_string(), given: Some(format!("{tup_values:#?}")), })? } } else { - Err(InterpreterError::TypeMismatchError { + Err(InterpreterError::TypeMismatch { expected: "".to_string(), given: Some(format!("{elem:#?}")), })? @@ -516,7 +521,7 @@ impl TryFrom for Ipld { acc.push(v.to_owned()); Ok::<_, Self::Error>(acc) } else { - Err(InterpreterError::TypeMismatchError { + Err(InterpreterError::TypeMismatch { expected: "all types".to_string(), given: Some(format!("{elem:#?}")), })? @@ -546,7 +551,7 @@ impl TryFrom for Ipld { Ipld::try_from(RuntimeVal::new(u.payload().to_owned()))? } // Rest of Wit types are unhandled going to Ipld. - v => Err(InterpreterError::IpldToWitError(format!("{v:#?}")))?, + v => Err(InterpreterError::IpldToWit(format!("{v:#?}")))?, }; Ok(ipld) diff --git a/homestar-wasm/src/wasmtime/world.rs b/homestar-wasm/src/wasmtime/world.rs index d82aa745..41177265 100644 --- a/homestar-wasm/src/wasmtime/world.rs +++ b/homestar-wasm/src/wasmtime/world.rs @@ -100,13 +100,13 @@ impl Env { let param_types = self .bindings .as_mut() - .ok_or(Error::WasmInstantiationError)? + .ok_or(Error::WasmInstantiation)? .func() .params(&self.store); let result_types = self .bindings .as_mut() - .ok_or(Error::WasmInstantiationError)? + .ok_or(Error::WasmInstantiation)? .func() .results(&self.store); @@ -123,13 +123,13 @@ impl Env { .value(), Arg::Value(v) => v, }, - Input::Deferred(await_promise) => bail!(Error::PromiseError( - ResolveError::UnresolvedCidError(format!( + Input::Deferred(await_promise) => { + bail!(Error::ResolvePromise(ResolveError::UnresolvedCid(format!( "deferred task not yet resolved for {}: {}", await_promise.result(), await_promise.instruction_cid() - )) - )), + )))) + } }; acc.push(v); Ok::<_, Error>(acc) @@ -142,14 +142,14 @@ impl Env { self.bindings .as_mut() - .ok_or(Error::WasmInstantiationError)? + .ok_or(Error::WasmInstantiation)? .func() .call_async(&mut self.store, ¶ms, &mut results_alloc) .await?; self.bindings .as_mut() - .ok_or(Error::WasmInstantiationError)? + .ok_or(Error::WasmInstantiation)? .func() .post_return_async(&mut self.store) .await?; @@ -320,7 +320,7 @@ impl World { .func(fun_name) .or_else(|| __exports.func(&fun_name.to_kebab_case())) .or_else(|| __exports.func(&fun_name.to_snake_case())) - .ok_or_else(|| Error::WasmFunctionNotFoundError(fun_name.to_string()))?; + .ok_or_else(|| Error::WasmFunctionNotFound(fun_name.to_string()))?; Ok(World(func)) } @@ -343,21 +343,21 @@ fn component_from_bytes(bytes: &[u8], engine: Engine) -> Result { if is_component(chunk) { - Component::from_binary(&engine, bytes).map_err(Error::IntoWasmComponentError) + Component::from_binary(&engine, bytes).map_err(Error::IntoWasmComponent) } else { let component = ComponentEncoder::default() .module(bytes)? .validate(true) .encode()?; - Component::from_binary(&engine, &component).map_err(Error::IntoWasmComponentError) + Component::from_binary(&engine, &component).map_err(Error::IntoWasmComponent) } } Err(_) => { let wasm_bytes = wat::parse_bytes(bytes)?; if is_component(wasmparser::Parser::new(0).parse(&wasm_bytes, true)?) { - Component::from_binary(&engine, &wasm_bytes).map_err(Error::IntoWasmComponentError) + Component::from_binary(&engine, &wasm_bytes).map_err(Error::IntoWasmComponent) } else { - Err(Error::WatComponentError( + Err(Error::WatComponent( "WAT must reference a Wasm component.".to_string(), )) } diff --git a/homestar-wasm/tests/execute_wasm.rs b/homestar-wasm/tests/execute_wasm.rs index 375c4f4c..3d3e8062 100644 --- a/homestar-wasm/tests/execute_wasm.rs +++ b/homestar-wasm/tests/execute_wasm.rs @@ -30,7 +30,7 @@ async fn test_wasm_exceeds_max_memory() { ) .await; - if let Err(Error::WasmRuntimeError(err)) = env { + if let Err(Error::WasmRuntime(err)) = env { assert!(err.to_string().contains("exceeds memory limits")); } else { panic!("Expected WasmRuntimeError")