From a6b7f5bb929caeb5136cd59cea76acc90bb1a735 Mon Sep 17 00:00:00 2001 From: Samuel <39674930+sagojez@users.noreply.github.com> Date: Thu, 19 Dec 2024 21:44:07 +0000 Subject: [PATCH] chore: remove event emitter package (#210) --- .github/workflows/build-emit.yaml | 37 - .github/workflows/tests.yaml | 6 - Cargo.lock | 1104 +---------------- Cargo.toml | 3 +- integrationos-api/src/domain/config.rs | 5 - integrationos-api/src/logic/connection.rs | 2 - integrationos-api/src/logic/openapi/mod.rs | 2 +- integrationos-database/src/service/init.rs | 39 +- integrationos-database/src/service/mod.rs | 60 +- integrationos-database/tests/context.rs | 1 - .../tests/http/connection.rs | 2 - integrationos-database/tests/http/signal.rs | 27 +- .../src/domain/configuration/database.rs | 11 - integrationos-domain/src/domain/error/mod.rs | 73 +- .../src/domain/event/emitted_events.rs | 21 - .../src/domain/pipeline/middleware.rs | 2 +- integrationos-emit/Cargo.toml | 38 - integrationos-emit/Dockerfile | 10 - integrationos-emit/README.md | 39 - integrationos-emit/src/algebra/event.rs | 61 - integrationos-emit/src/algebra/metrics.rs | 103 -- integrationos-emit/src/algebra/mod.rs | 2 - integrationos-emit/src/domain/config.rs | 266 ---- .../src/domain/deduplication.rs | 11 - integrationos-emit/src/domain/event.rs | 131 -- integrationos-emit/src/domain/idempotency.rs | 38 - integrationos-emit/src/domain/mod.rs | 4 - integrationos-emit/src/lib.rs | 7 - integrationos-emit/src/logic/emitter.rs | 104 -- integrationos-emit/src/logic/mod.rs | 1 - integrationos-emit/src/main.rs | 42 - .../src/middleware/idempotency.rs | 34 - integrationos-emit/src/middleware/mod.rs | 1 - integrationos-emit/src/router/emitter.rs | 31 - integrationos-emit/src/router/metrics.rs | 8 - integrationos-emit/src/router/mod.rs | 2 - integrationos-emit/src/server.rs | 206 --- .../src/stream/fluvio_driver.rs | 585 --------- .../src/stream/logger_driver.rs | 49 - integrationos-emit/src/stream/mod.rs | 47 - integrationos-emit/src/stream/pusher.rs | 213 ---- integrationos-emit/src/stream/scheduler.rs | 135 -- integrationos-emit/tests/context.rs | 178 --- integrationos-emit/tests/http/emitter.rs | 140 --- integrationos-emit/tests/http/mod.rs | 1 - integrationos-emit/tests/main.rs | 2 - integrationos-emit/tests/resource/Dockerfile | 8 - .../tests/resource/docker-compose.yml | 47 - .../src/mongo_control_data_store.rs | 3 +- 49 files changed, 111 insertions(+), 3831 deletions(-) delete mode 100644 .github/workflows/build-emit.yaml delete mode 100644 integrationos-emit/Cargo.toml delete mode 100644 integrationos-emit/Dockerfile delete mode 100644 integrationos-emit/README.md delete mode 100644 integrationos-emit/src/algebra/event.rs delete mode 100644 integrationos-emit/src/algebra/metrics.rs delete mode 100644 integrationos-emit/src/algebra/mod.rs delete mode 100644 integrationos-emit/src/domain/config.rs delete mode 100644 integrationos-emit/src/domain/deduplication.rs delete mode 100644 integrationos-emit/src/domain/event.rs delete mode 100644 integrationos-emit/src/domain/idempotency.rs delete mode 100644 integrationos-emit/src/domain/mod.rs delete mode 100644 integrationos-emit/src/lib.rs delete mode 100644 integrationos-emit/src/logic/emitter.rs delete mode 100644 integrationos-emit/src/logic/mod.rs delete mode 100644 integrationos-emit/src/main.rs delete mode 100644 integrationos-emit/src/middleware/idempotency.rs delete mode 100644 integrationos-emit/src/middleware/mod.rs delete mode 100644 integrationos-emit/src/router/emitter.rs delete mode 100644 integrationos-emit/src/router/metrics.rs delete mode 100644 integrationos-emit/src/router/mod.rs delete mode 100644 integrationos-emit/src/server.rs delete mode 100644 integrationos-emit/src/stream/fluvio_driver.rs delete mode 100644 integrationos-emit/src/stream/logger_driver.rs delete mode 100644 integrationos-emit/src/stream/mod.rs delete mode 100644 integrationos-emit/src/stream/pusher.rs delete mode 100644 integrationos-emit/src/stream/scheduler.rs delete mode 100644 integrationos-emit/tests/context.rs delete mode 100644 integrationos-emit/tests/http/emitter.rs delete mode 100644 integrationos-emit/tests/http/mod.rs delete mode 100644 integrationos-emit/tests/main.rs delete mode 100644 integrationos-emit/tests/resource/Dockerfile delete mode 100644 integrationos-emit/tests/resource/docker-compose.yml diff --git a/.github/workflows/build-emit.yaml b/.github/workflows/build-emit.yaml deleted file mode 100644 index 82b7579c..00000000 --- a/.github/workflows/build-emit.yaml +++ /dev/null @@ -1,37 +0,0 @@ -on: - push: - branches: - - main - tags: - - "[0-9]+.[0-9]+.[0-9]+" - paths: - - .github/workflows/build-emit.yaml - - "integrationos-domain/**" - - "integrationos-emit/**" - - Cargo.lock - - Dockerfile.common - - integrationos-emit/Dockerfile - -env: - docker_image_tag: ${{ github.ref == 'refs/heads/main' && github.sha || github.ref_name }} - -jobs: - build: - runs-on: ubuntu-latest - - permissions: - contents: read - id-token: write - - steps: - - uses: actions/checkout@v3 - - uses: integration-os/google-artifact-registry-action@v2 - - with: - image: "us-docker.pkg.dev/integrationos/docker-oss/emit:${{ env.docker_image_tag }}" - service_account: github-actions@integrationos.iam.gserviceaccount.com - workload_identity_provider: projects/356173785332/locations/global/workloadIdentityPools/github-actions/providers/github-actions - file: integrationos-emit/Dockerfile - context: . - build-args: | - "EXECUTABLE=integrationos-emit" diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 1375145b..2fb27a44 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -52,12 +52,6 @@ jobs: sudo rm -rf "/usr/local/share/boost" sudo rm -rf "$AGENT_TOOLSDIRECTORY" - uses: actions/checkout@v4 - - name: Starting up Docker 🐳 - run: ls && docker compose -f ./integrationos-emit/tests/resource/docker-compose.yml up -d - - name: Install fluvio CLI - run: curl -fsS https://hub.infinyon.cloud/install/install.sh | bash - - name: Create fluvio topic - run: ~/.fluvio/bin/fluvio profile add docker 127.0.0.1:9103 docker && ~/.fluvio/bin/fluvio topic create events && ~/.fluvio/bin/fluvio topic create dlq - name: Install protoc run: sudo apt-get update && sudo apt-get install -y protobuf-compiler - uses: dtolnay/rust-toolchain@stable diff --git a/Cargo.lock b/Cargo.lock index b7062251..575b6e67 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -132,123 +132,23 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20cd0e2e25ea8e5f7e9df04578dc6cf5c83577fd09b1a46aaf5c85e1c33f2a7e" dependencies = [ - "event-listener 5.3.1", + "event-listener", "event-listener-strategy", "futures-core", "pin-project-lite", ] -[[package]] -name = "async-channel" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" -dependencies = [ - "concurrent-queue", - "event-listener 2.5.3", - "futures-core", -] - -[[package]] -name = "async-channel" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" -dependencies = [ - "concurrent-queue", - "event-listener-strategy", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-executor" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" -dependencies = [ - "async-task", - "concurrent-queue", - "fastrand", - "futures-lite", - "slab", -] - -[[package]] -name = "async-global-executor" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" -dependencies = [ - "async-channel 2.3.1", - "async-executor", - "async-io", - "async-lock", - "blocking", - "futures-lite", - "once_cell", -] - -[[package]] -name = "async-io" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" -dependencies = [ - "async-lock", - "cfg-if", - "concurrent-queue", - "futures-io", - "futures-lite", - "parking", - "polling", - "rustix", - "slab", - "tracing", - "windows-sys 0.59.0", -] - [[package]] name = "async-lock" version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 5.3.1", + "event-listener", "event-listener-strategy", "pin-project-lite", ] -[[package]] -name = "async-net" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" -dependencies = [ - "async-io", - "blocking", - "futures-lite", -] - -[[package]] -name = "async-process" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" -dependencies = [ - "async-channel 2.3.1", - "async-io", - "async-lock", - "async-signal", - "async-task", - "blocking", - "cfg-if", - "event-listener 5.3.1", - "futures-lite", - "rustix", - "tracing", -] - [[package]] name = "async-recursion" version = "1.1.1" @@ -260,51 +160,6 @@ dependencies = [ "syn 2.0.89", ] -[[package]] -name = "async-signal" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" -dependencies = [ - "async-io", - "async-lock", - "atomic-waker", - "cfg-if", - "futures-core", - "futures-io", - "rustix", - "signal-hook-registry", - "slab", - "windows-sys 0.59.0", -] - -[[package]] -name = "async-std" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" -dependencies = [ - "async-channel 1.9.0", - "async-global-executor", - "async-io", - "async-lock", - "async-process", - "crossbeam-utils", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "once_cell", - "pin-project-lite", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - [[package]] name = "async-stream" version = "0.3.6" @@ -327,12 +182,6 @@ dependencies = [ "syn 2.0.89", ] -[[package]] -name = "async-task" -version = "4.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" - [[package]] name = "async-trait" version = "0.1.83" @@ -344,17 +193,6 @@ dependencies = [ "syn 2.0.89", ] -[[package]] -name = "async_io_stream" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" -dependencies = [ - "futures", - "pharos", - "rustc_version 0.4.1", -] - [[package]] name = "atoi" version = "2.0.0" @@ -474,27 +312,6 @@ dependencies = [ "tower-http", ] -[[package]] -name = "axum-prometheus" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "739e2585f5376f5bdd129324ded72d3261fdd5b7c411a645920328fb5dc875d4" -dependencies = [ - "axum", - "bytes", - "futures-core", - "http 1.1.0", - "http-body 1.0.1", - "matchit 0.7.3", - "metrics 0.23.0", - "metrics-exporter-prometheus 0.15.3", - "once_cell", - "pin-project", - "tokio", - "tower 0.4.13", - "tower-http", -] - [[package]] name = "backoff" version = "0.4.0" @@ -640,19 +457,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "blocking" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" -dependencies = [ - "async-channel 2.3.1", - "async-task", - "futures-io", - "futures-lite", - "piper", -] - [[package]] name = "bollard-stubs" version = "1.42.0-rc.3" @@ -707,12 +511,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "built" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c360505aed52b7ec96a3636c3f039d99103c37d1d9b4f7a8c743d3ea9ffcd03b" - [[package]] name = "bumpalo" version = "3.16.0" @@ -746,20 +544,6 @@ name = "bytemuck" version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" -dependencies = [ - "bytemuck_derive", -] - -[[package]] -name = "bytemuck_derive" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.89", -] [[package]] name = "byteorder" @@ -773,23 +557,12 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" -[[package]] -name = "bytesize" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" -dependencies = [ - "serde", -] - [[package]] name = "cc" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" dependencies = [ - "jobserver", - "libc", "shlex", ] @@ -914,15 +687,6 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" -[[package]] -name = "content_inspector" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7bda66e858c683005a53a9a60c69a4aca7eeaa45d124526e389f7aec8e62f38" -dependencies = [ - "memchr", -] - [[package]] name = "convert_case" version = "0.4.0" @@ -994,15 +758,6 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" -[[package]] -name = "crc32c" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47" -dependencies = [ - "rustc_version 0.4.1", -] - [[package]] name = "crc32fast" version = "1.4.2" @@ -1303,37 +1058,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "derive_builder" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" -dependencies = [ - "darling 0.20.10", - "proc-macro2", - "quote", - "syn 2.0.89", -] - -[[package]] -name = "derive_builder_macro" -version = "0.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" -dependencies = [ - "derive_builder_core", - "syn 2.0.89", -] - [[package]] name = "derive_more" version = "0.99.18" @@ -1365,27 +1089,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "dirs" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "dirs-sys" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" -dependencies = [ - "libc", - "option-ext", - "redox_users", - "windows-sys 0.48.0", -] - [[package]] name = "displaydoc" version = "0.2.5" @@ -1441,18 +1144,6 @@ dependencies = [ "spki", ] -[[package]] -name = "educe" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" -dependencies = [ - "enum-ordinalize", - "proc-macro2", - "quote", - "syn 2.0.89", -] - [[package]] name = "either" version = "1.13.0" @@ -1503,26 +1194,6 @@ dependencies = [ "syn 2.0.89", ] -[[package]] -name = "enum-ordinalize" -version = "4.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" -dependencies = [ - "enum-ordinalize-derive", -] - -[[package]] -name = "enum-ordinalize-derive" -version = "4.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.89", -] - [[package]] name = "envconfig" version = "0.10.0" @@ -1570,12 +1241,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "event-listener" -version = "2.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" - [[package]] name = "event-listener" version = "5.3.1" @@ -1593,20 +1258,10 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "event-listener 5.3.1", + "event-listener", "pin-project-lite", ] -[[package]] -name = "eyre" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" -dependencies = [ - "indenter", - "once_cell", -] - [[package]] name = "fake" version = "2.10.0" @@ -1671,305 +1326,6 @@ dependencies = [ "spin 0.9.8", ] -[[package]] -name = "fluvio" -version = "0.23.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd857d19ddae215c21fc7e28b608872637a2b40fb36c5801574d32932ccbf286" -dependencies = [ - "anyhow", - "async-channel 1.9.0", - "async-lock", - "async-trait", - "cfg-if", - "chrono", - "derive_builder", - "dirs", - "event-listener 5.3.1", - "fluvio-compression", - "fluvio-future", - "fluvio-protocol", - "fluvio-sc-schema", - "fluvio-smartmodule", - "fluvio-socket", - "fluvio-spu-schema", - "fluvio-stream-dispatcher", - "fluvio-types", - "futures-util", - "once_cell", - "pin-project", - "semver 1.0.23", - "serde", - "siphasher", - "thiserror 1.0.69", - "tokio", - "toml", - "tracing", -] - -[[package]] -name = "fluvio-compression" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928267ce67c1f2136c01193d90de8d9fd02e5749a4de5317ef86b290c92fb5e3" -dependencies = [ - "bytes", - "flate2", - "fluvio-types", - "lz4_flex", - "serde", - "snap", - "thiserror 1.0.69", - "zstd", -] - -[[package]] -name = "fluvio-controlplane-metadata" -version = "0.29.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aff317349d026baa29745a4dd7c3fc22e5b8652646bc3640150d6cee9abfda6" -dependencies = [ - "anyhow", - "base64 0.22.1", - "bytes", - "bytesize", - "derive_builder", - "flate2", - "fluvio-protocol", - "fluvio-stream-model", - "fluvio-types", - "flv-util", - "humantime-serde", - "lenient_semver", - "semver 1.0.23", - "serde", - "serde_yaml", - "thiserror 1.0.69", - "toml", - "tracing", -] - -[[package]] -name = "fluvio-future" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a28090046453db33a8bace0e1f71350b9878cd7fb576e48592ae8284bc83c7e" -dependencies = [ - "anyhow", - "async-io", - "async-net", - "async-std", - "async-trait", - "cfg-if", - "fluvio-wasm-timer", - "futures-lite", - "futures-util", - "openssl", - "openssl-sys", - "pin-project", - "socket2", - "thiserror 1.0.69", - "tracing", - "ws_stream_wasm", -] - -[[package]] -name = "fluvio-protocol" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d0d018de11a926dded54fa6ebbb0495cb15516f715392bebbcda76977fe12c1" -dependencies = [ - "bytes", - "cfg-if", - "content_inspector", - "crc32c", - "eyre", - "fluvio-compression", - "fluvio-future", - "fluvio-protocol-derive", - "fluvio-types", - "flv-util", - "once_cell", - "semver 1.0.23", - "thiserror 1.0.69", - "tokio-util", - "tracing", -] - -[[package]] -name = "fluvio-protocol-derive" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13a9820c3b6b95ee62deaa0b724f783bfb1a54f2ce00331274f1eec879f14ca0" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "tracing", -] - -[[package]] -name = "fluvio-sc-schema" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "507c721d21b8d6b2be4c7f9016803df9f3c69e6f83498f64fc12b1b565d93f9a" -dependencies = [ - "anyhow", - "fluvio-controlplane-metadata", - "fluvio-protocol", - "fluvio-socket", - "fluvio-stream-model", - "paste", - "static_assertions", - "thiserror 1.0.69", - "tracing", -] - -[[package]] -name = "fluvio-smartmodule" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d45e4940843a5674c862664d94e8da0d77c47c4a1794addbb8bf797f752534" -dependencies = [ - "eyre", - "fluvio-protocol", - "fluvio-smartmodule-derive", - "thiserror 1.0.69", - "tracing", -] - -[[package]] -name = "fluvio-smartmodule-derive" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c09b6d1bd7b2018e23ef00a9e78f5bf25dd14dc4bcab4d2f5047abfc7cb6fc07" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.89", -] - -[[package]] -name = "fluvio-socket" -version = "0.14.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be2eb33c1d1431ce997ab1ac742ab58a7fc835685a81ffa06f42ca6fd3b980fa" -dependencies = [ - "async-channel 1.9.0", - "async-lock", - "async-trait", - "built", - "bytes", - "cfg-if", - "event-listener 5.3.1", - "fluvio-future", - "fluvio-protocol", - "futures-util", - "nix", - "once_cell", - "pin-project", - "semver 1.0.23", - "thiserror 1.0.69", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "fluvio-spu-schema" -version = "0.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d0e9e63e7982553d32c0e4a0b101c3d168a5c88b8d1f44c24ed16816443560" -dependencies = [ - "bytes", - "derive_builder", - "educe", - "flate2", - "fluvio-future", - "fluvio-protocol", - "fluvio-smartmodule", - "fluvio-types", - "serde", - "static_assertions", - "tracing", -] - -[[package]] -name = "fluvio-stream-dispatcher" -version = "0.13.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0abebf845d315108e3f95d210d026456533edc52ef726903f12be102004541ab" -dependencies = [ - "anyhow", - "async-channel 1.9.0", - "async-lock", - "async-trait", - "cfg-if", - "fluvio-future", - "fluvio-stream-model", - "fluvio-types", - "futures-util", - "once_cell", - "parking_lot 0.12.3", - "serde", - "serde_yaml", - "tempfile", - "tokio", - "tracing", -] - -[[package]] -name = "fluvio-stream-model" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd883354fee545863d0983ef7bd74ad6b2d6aa58e74af3eafc6490939b8a16aa" -dependencies = [ - "async-lock", - "event-listener 5.3.1", - "k8-types", - "once_cell", - "serde", - "tracing", -] - -[[package]] -name = "fluvio-types" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a5892e3a0f8f4962386137ab3a39cc82568d2c662aed8e55deb13a9c0f1a06b" -dependencies = [ - "event-listener 5.3.1", - "serde", - "thiserror 1.0.69", - "toml", - "tracing", -] - -[[package]] -name = "fluvio-wasm-timer" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b768c170dc045fa587a8f948c91f9bcfb87f774930477c6215addf54317f137f" -dependencies = [ - "futures", - "js-sys", - "parking_lot 0.11.2", - "pin-utils", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - -[[package]] -name = "flv-util" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de89447c8b4aecfa4c0614d1a7be1c6ab4a0266b59bb2713fd746901f28d124e" -dependencies = [ - "log", - "tracing", -] - [[package]] name = "fnv" version = "1.0.7" @@ -2075,19 +1431,6 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" -[[package]] -name = "futures-lite" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" -dependencies = [ - "fastrand", - "futures-core", - "futures-io", - "parking", - "pin-project-lite", -] - [[package]] name = "futures-macro" version = "0.3.31" @@ -2175,18 +1518,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" -[[package]] -name = "gloo-timers" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", -] - [[package]] name = "google-cloud-auth" version = "0.17.2" @@ -2446,12 +1777,6 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" -[[package]] -name = "hermit-abi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" - [[package]] name = "hex" version = "0.4.3" @@ -2622,22 +1947,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "humantime-serde" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" -dependencies = [ - "humantime", - "serde", -] - [[package]] name = "hyper" version = "0.14.31" @@ -2954,12 +2263,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb56e1aa765b4b4f3aadfab769793b7087bb03a4ea4920644a6d238e2df5b9ed" -[[package]] -name = "indenter" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" - [[package]] name = "indexmap" version = "1.9.3" @@ -3198,43 +2501,7 @@ dependencies = [ "tracing", "tracing-bunyan-formatter", "tracing-log 0.2.0", - "tracing-opentelemetry", - "tracing-subscriber", - "uuid", -] - -[[package]] -name = "integrationos-emit" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "axum", - "axum-prometheus 0.7.0", - "chrono", - "dotenvy", - "envconfig", - "fluvio", - "futures", - "futures-util", - "http 1.1.0", - "http-serde-ext-ios", - "integrationos-domain", - "mockito", - "mongodb", - "num_cpus", - "reqwest", - "reqwest-middleware", - "reqwest-retry", - "reqwest-tracing", - "serde", - "serde_json", - "strum", - "testcontainers-modules", - "tokio", - "tokio-graceful-shutdown", - "tower-http", - "tracing", + "tracing-opentelemetry", "tracing-subscriber", "uuid", ] @@ -3280,7 +2547,7 @@ dependencies = [ "anyhow", "async-trait", "axum", - "axum-prometheus 0.6.1", + "axum-prometheus", "dotenvy", "envconfig", "http 1.1.0", @@ -3394,15 +2661,6 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" -[[package]] -name = "jobserver" -version = "0.1.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" -dependencies = [ - "libc", -] - [[package]] name = "js-sandbox-ios" version = "0.2.0" @@ -3528,16 +2786,6 @@ dependencies = [ "signature", ] -[[package]] -name = "k8-types" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b1996eb39fe3991c64d32262261d3a37a8a43fcf7bc3a5456ab399f02a50114" -dependencies = [ - "serde", - "serde_json", -] - [[package]] name = "k8s-openapi" version = "0.23.0" @@ -3670,15 +2918,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" -dependencies = [ - "log", -] - [[package]] name = "lazy_static" version = "1.5.0" @@ -3688,35 +2927,6 @@ dependencies = [ "spin 0.9.8", ] -[[package]] -name = "lenient_semver" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de8de3f4f3754c280ce1c8c42ed8dd26a9c8385c2e5ad4ec5a77e774cea9c1ec" -dependencies = [ - "lenient_semver_parser", - "semver 1.0.23", -] - -[[package]] -name = "lenient_semver_parser" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f650c1d024ddc26b4bb79c3076b30030f2cf2b18292af698c81f7337a64d7d6" -dependencies = [ - "lenient_semver_version_builder", - "semver 1.0.23", -] - -[[package]] -name = "lenient_semver_version_builder" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9049f8ff49f75b946f95557148e70230499c8a642bf2d6528246afc7d0282d17" -dependencies = [ - "semver 1.0.23", -] - [[package]] name = "libc" version = "0.2.165" @@ -3739,16 +2949,6 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" -[[package]] -name = "libredox" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" -dependencies = [ - "bitflags 2.6.0", - "libc", -] - [[package]] name = "libsqlite3-sys" version = "0.30.1" @@ -3793,9 +2993,6 @@ name = "log" version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" -dependencies = [ - "value-bag", -] [[package]] name = "lru-cache" @@ -3806,15 +3003,6 @@ dependencies = [ "linked-hash-map", ] -[[package]] -name = "lz4_flex" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5" -dependencies = [ - "twox-hash", -] - [[package]] name = "mach2" version = "0.4.2" @@ -3897,16 +3085,6 @@ dependencies = [ "portable-atomic", ] -[[package]] -name = "metrics" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884adb57038347dfbaf2d5065887b6cf4312330dc8e94bc30a1a839bd79d3261" -dependencies = [ - "ahash 0.8.11", - "portable-atomic", -] - [[package]] name = "metrics-exporter-prometheus" version = "0.12.2" @@ -3942,26 +3120,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "metrics-exporter-prometheus" -version = "0.15.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f0c8427b39666bf970460908b213ec09b3b350f20c0c2eabcbba51704a08e6" -dependencies = [ - "base64 0.22.1", - "http-body-util", - "hyper 1.5.1", - "hyper-util", - "indexmap 2.6.0", - "ipnet", - "metrics 0.23.0", - "metrics-util 0.17.0", - "quanta 0.12.3", - "thiserror 1.0.69", - "tokio", - "tracing", -] - [[package]] name = "metrics-macros" version = "0.7.1" @@ -4003,44 +3161,6 @@ dependencies = [ "sketches-ddsketch", ] -[[package]] -name = "metrics-util" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4259040465c955f9f2f1a4a8a16dc46726169bca0f88e8fb2dbeced487c3e828" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", - "hashbrown 0.14.5", - "metrics 0.23.0", - "num_cpus", - "quanta 0.12.3", - "sketches-ddsketch", -] - -[[package]] -name = "miette" -version = "7.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4edc8853320c2a0dab800fbda86253c8938f6ea88510dc92c5f1ed20e794afc1" -dependencies = [ - "cfg-if", - "miette-derive", - "thiserror 1.0.69", - "unicode-width", -] - -[[package]] -name = "miette-derive" -version = "7.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf09caffaac8068c346b6df2a7fc27a177fd20b39421a39ce0a211bde679a6c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.89", -] - [[package]] name = "mime" version = "0.3.17" @@ -4087,7 +3207,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi", "libc", "wasi", "windows-sys 0.52.0", @@ -4128,7 +3248,7 @@ dependencies = [ "crossbeam-channel", "crossbeam-epoch", "crossbeam-utils", - "event-listener 5.3.1", + "event-listener", "futures-util", "once_cell", "parking_lot 0.12.3", @@ -4268,18 +3388,6 @@ dependencies = [ "tempfile", ] -[[package]] -name = "nix" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" -dependencies = [ - "bitflags 2.6.0", - "cfg-if", - "cfg_aliases", - "libc", -] - [[package]] name = "nom" version = "7.1.3" @@ -4370,7 +3478,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi", "libc", ] @@ -4438,15 +3546,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" -[[package]] -name = "openssl-src" -version = "300.4.1+3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa4eac4138c62414b5622d1b31c5c304f34b406b013c079c2bbc652fdd6678c" -dependencies = [ - "cc", -] - [[package]] name = "openssl-sys" version = "0.9.104" @@ -4455,7 +3554,6 @@ checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" dependencies = [ "cc", "libc", - "openssl-src", "pkg-config", "vcpkg", ] @@ -4525,12 +3623,6 @@ dependencies = [ "tokio-stream", ] -[[package]] -name = "option-ext" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" - [[package]] name = "ordered-float" version = "2.10.1" @@ -4700,16 +3792,6 @@ dependencies = [ "sha2", ] -[[package]] -name = "pharos" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" -dependencies = [ - "futures", - "rustc_version 0.4.1", -] - [[package]] name = "pin-project" version = "1.1.7" @@ -4742,17 +3824,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "piper" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" -dependencies = [ - "atomic-waker", - "fastrand", - "futures-io", -] - [[package]] name = "pkcs1" version = "0.7.5" @@ -4780,21 +3851,6 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" -[[package]] -name = "polling" -version = "3.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" -dependencies = [ - "cfg-if", - "concurrent-queue", - "hermit-abi 0.4.0", - "pin-project-lite", - "rustix", - "tracing", - "windows-sys 0.59.0", -] - [[package]] name = "poly1305" version = "0.8.0" @@ -5195,17 +4251,6 @@ dependencies = [ "bitflags 2.6.0", ] -[[package]] -name = "redox_users" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" -dependencies = [ - "getrandom", - "libredox", - "thiserror 1.0.69", -] - [[package]] name = "regex" version = "1.11.1" @@ -5809,12 +4854,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -[[package]] -name = "send_wrapper" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" - [[package]] name = "serde" version = "1.0.215" @@ -5888,15 +4927,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_spanned" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" -dependencies = [ - "serde", -] - [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -6103,12 +5133,6 @@ dependencies = [ "time", ] -[[package]] -name = "siphasher" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" - [[package]] name = "sketches-ddsketch" version = "0.2.2" @@ -6133,12 +5157,6 @@ dependencies = [ "serde", ] -[[package]] -name = "snap" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" - [[package]] name = "socket2" version = "0.5.7" @@ -6229,7 +5247,7 @@ dependencies = [ "crc", "crossbeam-queue", "either", - "event-listener 5.3.1", + "event-listener", "futures-channel", "futures-core", "futures-intrusive", @@ -6718,23 +5736,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-graceful-shutdown" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377f179872759c830d3bf52d219fedf77c9dc44df51baba517b447cfdd963e62" -dependencies = [ - "async-trait", - "atomic", - "bytemuck", - "miette", - "pin-project-lite", - "thiserror 2.0.3", - "tokio", - "tokio-util", - "tracing", -] - [[package]] name = "tokio-macros" version = "2.4.0" @@ -6814,27 +5815,11 @@ dependencies = [ "tokio", ] -[[package]] -name = "toml" -version = "0.8.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" -dependencies = [ - "indexmap 2.6.0", - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", -] - [[package]] name = "toml_datetime" version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" -dependencies = [ - "serde", -] [[package]] name = "toml_edit" @@ -6843,8 +5828,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap 2.6.0", - "serde", - "serde_spanned", "toml_datetime", "winnow", ] @@ -7071,16 +6054,6 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" -[[package]] -name = "twox-hash" -version = "1.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" -dependencies = [ - "cfg-if", - "static_assertions", -] - [[package]] name = "typed-builder" version = "0.10.0" @@ -7149,12 +6122,6 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" -[[package]] -name = "unicode-width" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" - [[package]] name = "unicode_categories" version = "0.1.1" @@ -7306,12 +6273,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" -[[package]] -name = "value-bag" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" - [[package]] name = "vcpkg" version = "0.2.15" @@ -7758,25 +6719,6 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" -[[package]] -name = "ws_stream_wasm" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" -dependencies = [ - "async_io_stream", - "futures", - "js-sys", - "log", - "pharos", - "rustc_version 0.4.1", - "send_wrapper", - "thiserror 1.0.69", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - [[package]] name = "wyz" version = "0.5.1" @@ -7879,31 +6821,3 @@ dependencies = [ "quote", "syn 2.0.89", ] - -[[package]] -name = "zstd" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "7.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" -dependencies = [ - "zstd-sys", -] - -[[package]] -name = "zstd-sys" -version = "2.0.13+zstd.1.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" -dependencies = [ - "cc", - "pkg-config", -] diff --git a/Cargo.toml b/Cargo.toml index f1a9e57a..06a234be 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,8 +9,7 @@ members = [ "integrationos-gateway", "integrationos-database", "integrationos-unified", - "integrationos-watchdog", - "integrationos-emit" + "integrationos-watchdog" ] [workspace.dependencies] diff --git a/integrationos-api/src/domain/config.rs b/integrationos-api/src/domain/config.rs index a3c8b30f..914e9c1b 100644 --- a/integrationos-api/src/domain/config.rs +++ b/integrationos-api/src/domain/config.rs @@ -59,10 +59,6 @@ pub struct ConnectionsConfig { /// This is the admin secret for the API. Be sure this value is not the one use to generate /// tokens for the users as it gives access to sensitive admin endpoints. pub jwt_secret: String, - #[envconfig(from = "EMIT_URL", default = "http://localhost:3001")] - pub emit_url: String, - #[envconfig(from = "EMITTER_ENABLED", default = "false")] - pub emitter_enabled: bool, #[envconfig(from = "CONNECTIONS_URL", default = "http://localhost:3005")] /// Same as self url, but this may vary in a k8s environment hence it's a separate config pub connections_url: String, @@ -123,7 +119,6 @@ impl Display for ConnectionsConfig { "CONNECTION_CACHE_TTL_SECS: {}", self.connection_cache_ttl_secs )?; - writeln!(f, "EMIT_URL: {}", self.emit_url)?; writeln!(f, "CONNECTIONS_URL: {}", self.connections_url)?; writeln!( f, diff --git a/integrationos-api/src/logic/connection.rs b/integrationos-api/src/logic/connection.rs index cc6ba4c6..7ee36902 100644 --- a/integrationos-api/src/logic/connection.rs +++ b/integrationos-api/src/logic/connection.rs @@ -401,7 +401,6 @@ async fn generate_k8s_specs_and_secret( InternalError::serialize_error("Unable to convert address to SocketAddr", None) })?, environment: state.config.environment, - emit_url: state.config.emit_url.clone(), connections_url: state.config.connections_url.clone(), database_connection_type: connection_config.platform.parse().map_err(|_| { InternalError::serialize_error( @@ -410,7 +409,6 @@ async fn generate_k8s_specs_and_secret( ) })?, connection_id: connection_id.to_string(), - emitter_enabled: state.config.emitter_enabled, jwt_secret: None, }; diff --git a/integrationos-api/src/logic/openapi/mod.rs b/integrationos-api/src/logic/openapi/mod.rs index 2d1adf88..2943fbf0 100644 --- a/integrationos-api/src/logic/openapi/mod.rs +++ b/integrationos-api/src/logic/openapi/mod.rs @@ -65,7 +65,7 @@ pub struct CachedSchema { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase", untagged)] pub enum OpenApiSchema { - OpenAPI(OpenAPI), + OpenAPI(Box), Accepted(String), Error(String), } diff --git a/integrationos-database/src/service/init.rs b/integrationos-database/src/service/init.rs index b790c200..c8dcfbc7 100644 --- a/integrationos-database/src/service/init.rs +++ b/integrationos-database/src/service/init.rs @@ -8,20 +8,14 @@ use http::header::AUTHORIZATION; use integrationos_domain::{ database::{DatabaseConnectionType, DatabasePodConfig}, database_secret::DatabaseConnectionSecret, - emitted_events::DatabaseConnectionLost, - Claims, Id, InternalError, Secret, Unit, + Claims, InternalError, Secret, }; use reqwest::Client; -use std::{str::FromStr, sync::Arc}; +use std::sync::Arc; #[async_trait] pub trait Initializer { async fn init(config: &DatabasePodConfig) -> Result; - async fn kill( - config: &DatabasePodConfig, - reason: String, - client: Option, - ) -> Result; } pub struct DatabaseInitializer; @@ -38,35 +32,6 @@ impl Initializer for DatabaseInitializer { server } - - async fn kill( - config: &DatabasePodConfig, - reason: String, - client: Option, - ) -> Result { - let emit_url = config.emit_url.clone(); - let client = client.unwrap_or_default(); - let connection_id = Id::from_str(&config.connection_id)?; - let value = DatabaseConnectionLost { - connection_id, - reason: Some(reason), - schedule_on: None, - } - .as_event(); - - tracing::info!("Emitting event {value:?} to dispose of connection {connection_id}"); - - client - .post(format!("{}/v1/emit", emit_url)) - .header("content-type", "application/json") - .json(&value) - .send() - .await?; - - tracing::info!("Event for dispose of connection {connection_id} emitted"); - - Ok(()) - } } async fn start(config: &DatabasePodConfig) -> Result { diff --git a/integrationos-database/src/service/mod.rs b/integrationos-database/src/service/mod.rs index 452cf541..c5d0e6cf 100644 --- a/integrationos-database/src/service/mod.rs +++ b/integrationos-database/src/service/mod.rs @@ -1,6 +1,10 @@ -use init::{DatabaseInitializer, Initializer}; -use integrationos_domain::{database::DatabasePodConfig, Unit}; +use http::header::AUTHORIZATION; +use integrationos_domain::{ + database::DatabasePodConfig, emitted_events::ConnectionLostReason, ApplicationError, Claims, + Id, IntegrationOSError, InternalError, Unit, +}; use reqwest::Client; +use std::str::FromStr; pub mod init; pub mod storage; @@ -9,10 +13,50 @@ pub async fn on_error_callback( e: &anyhow::Error, config: &DatabasePodConfig, client: Option, -) -> Result { - if config.emitter_enabled { - DatabaseInitializer::kill(config, e.to_string(), client).await - } else { - Ok(()) - } +) -> Result { + let base_path = &config.connections_url; + let connection_id = Id::from_str(&config.connection_id)?; + let jwt_secret = config + .jwt_secret + .clone() + .ok_or(ApplicationError::bad_request( + "JWT secret is required for database connection", + None, + ))?; + + let path = format!("{base_path}/v1/database-connection-lost/{connection_id}"); + + let authorization = Claims::from_secret(jwt_secret.as_str())?; + let payload = ConnectionLostReason { + reason: e.to_string(), + }; + let client = client.unwrap_or_default(); + + client + .post(path) + .json(&payload) + .header(AUTHORIZATION, format!("Bearer {authorization}")) + .send() + .await + .inspect(|res| { + tracing::info!("Response: {:?}", res); + }) + .map_err(|e| { + tracing::error!("Failed to build request for connection id {connection_id}: {e}"); + InternalError::io_err( + &format!("Failed to build request for connection id {connection_id}"), + None, + ) + })? + .error_for_status() + .map_err(|e| { + tracing::error!("Failed to execute request for connection id {connection_id}: {e}"); + ApplicationError::bad_request( + &format!("Failed to execute request for connection id {connection_id}"), + None, + ) + }) + .map(|res| tracing::info!("Response: {:?}", res))?; + + Ok(()) } diff --git a/integrationos-database/tests/context.rs b/integrationos-database/tests/context.rs index cf373712..41532d9d 100644 --- a/integrationos-database/tests/context.rs +++ b/integrationos-database/tests/context.rs @@ -63,7 +63,6 @@ impl TestServer { Id::now(IdPrefix::Connection).to_string(), ), ("JWT_SECRET".to_string(), "secret".to_string()), - ("EMITTER_ENABLED".to_string(), "true".to_string()), ]) .into_iter() .chain(r#override.into_iter()) diff --git a/integrationos-database/tests/http/connection.rs b/integrationos-database/tests/http/connection.rs index 2c0ede6c..b3706509 100644 --- a/integrationos-database/tests/http/connection.rs +++ b/integrationos-database/tests/http/connection.rs @@ -58,7 +58,6 @@ async fn test_execute_probe() -> Result { let server = TestServer::new(HashMap::from([ ("CONNECTION_ID".to_string(), connection_id.to_string()), - ("EMIT_URL".to_string(), mock_uri.clone()), ("CONNECTIONS_URL".to_string(), mock_uri), ])) .await?; @@ -125,7 +124,6 @@ async fn test_execute_raw() -> Result { let server = TestServer::new(HashMap::from([ ("CONNECTION_ID".to_string(), connection_id.to_string()), - ("EMIT_URL".to_string(), mock_uri.clone()), ("CONNECTIONS_URL".to_string(), mock_uri), ])) .await?; diff --git a/integrationos-database/tests/http/signal.rs b/integrationos-database/tests/http/signal.rs index b8f471e1..8adc4346 100644 --- a/integrationos-database/tests/http/signal.rs +++ b/integrationos-database/tests/http/signal.rs @@ -1,7 +1,7 @@ use crate::context::TestServer; use http::header::{ACCEPT, CONTENT_LENGTH, CONTENT_TYPE, HOST}; use integrationos_domain::{ - emitted_events::DatabaseConnectionLost, prefix::IdPrefix, Id, IntegrationOSError, Unit, + emitted_events::ConnectionLostReason, prefix::IdPrefix, Id, IntegrationOSError, Unit, }; use mockito::Server as MockServer; use std::collections::HashMap; @@ -20,19 +20,15 @@ async fn test_kill_signal() -> Result { .create_async() .await; - let path = "/v1/emit"; - let body = DatabaseConnectionLost { - connection_id, - reason: Some( - "Deserialization error: Failed to deserialize secret: error decoding response body" - .to_string(), - ), - schedule_on: None, - } - .as_event(); - - let emit_req = mock_server - .mock("POST", path) + let path = format!("/v1/database-connection-lost/{connection_id}"); + let body = ConnectionLostReason { + reason: "Deserialization error: Failed to deserialize secret: error decoding response body" + .to_string(), + }; + let body = serde_json::to_string(&body).expect("Failed to serialize body"); + + let callback_req = mock_server + .mock("POST", path.as_str()) .match_header(CONTENT_TYPE, "application/json") .match_header(ACCEPT, "*/*") .match_header(HOST, mock_server.host_with_port().as_str()) @@ -45,13 +41,12 @@ async fn test_kill_signal() -> Result { let _ = TestServer::new(HashMap::from([ ("CONNECTION_ID".to_string(), connection_id.to_string()), ("POSTGRES_PASSWORD".to_string(), "wrongpass".to_string()), - ("EMIT_URL".to_string(), mock_uri.clone()), ("CONNECTIONS_URL".to_string(), mock_uri), ])) .await; secret_req.expect(1).assert_async().await; - emit_req.expect(1).assert_async().await; + callback_req.expect(1).assert_async().await; Ok(()) } diff --git a/integrationos-domain/src/domain/configuration/database.rs b/integrationos-domain/src/domain/configuration/database.rs index 008bb3f6..468d36d2 100644 --- a/integrationos-domain/src/domain/configuration/database.rs +++ b/integrationos-domain/src/domain/configuration/database.rs @@ -78,10 +78,6 @@ pub struct DatabasePodConfig { pub address: SocketAddr, #[envconfig(from = "ENVIRONMENT", default = "development")] pub environment: Environment, - #[envconfig(from = "EMIT_URL", default = "http://localhost:3001")] - pub emit_url: String, - #[envconfig(from = "EMITTER_ENABLED", default = "false")] - pub emitter_enabled: bool, #[envconfig(from = "CONNECTIONS_URL", default = "http://localhost:3005")] pub connections_url: String, #[envconfig(from = "DATABASE_CONNECTION_TYPE", default = "postgresql")] @@ -104,16 +100,11 @@ impl DatabasePodConfig { "INTERNAL_SERVER_ADDRESS".to_string(), self.address.to_string(), ); - map.insert( - "EMITTER_ENABLED".to_string(), - self.emitter_enabled.to_string(), - ); map.insert("ENVIRONMENT".to_string(), self.environment.to_string()); map.insert( "DATABASE_CONNECTION_TYPE".to_string(), self.database_connection_type.as_ref().into(), ); - map.insert("EMIT_URL".to_string(), self.emit_url.clone()); map.insert("CONNECTION_ID".to_string(), self.connection_id.clone()); map.insert("CONNECTIONS_URL".to_string(), self.connections_url.clone()); map.insert( @@ -129,9 +120,7 @@ impl Display for DatabasePodConfig { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { writeln!(f, "WORKER_THREADS: {:?}", self.worker_threads)?; writeln!(f, "INTERNAL_SERVER_ADDRESS: {}", self.address)?; - writeln!(f, "EMIT_URL: {}", self.emit_url)?; writeln!(f, "ENVIRONMENT: {}", self.environment)?; - writeln!(f, "EMITTER_ENABLED: {}", self.emitter_enabled)?; writeln!(f, "JWT_SECRET: ***")?; writeln!( f, diff --git a/integrationos-domain/src/domain/error/mod.rs b/integrationos-domain/src/domain/error/mod.rs index 5d30a57d..90b24aae 100644 --- a/integrationos-domain/src/domain/error/mod.rs +++ b/integrationos-domain/src/domain/error/mod.rs @@ -18,7 +18,7 @@ pub trait ErrorMeta { fn code(&self) -> ErrorCode; fn key(&self) -> ErrorKey; fn message(&self) -> ErrorMessage; - fn meta(&self) -> Option; + fn meta(&self) -> Option>; } #[derive(Debug, Clone, Hash, Eq, PartialEq, Ord, PartialOrd, Serialize)] @@ -86,79 +86,79 @@ pub enum InternalError { UnknownError { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("A unique field violation occurred: {}", .message)] UniqueFieldViolation { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("A timeout occurred: {}", .message)] Timeout { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("A connection error occurred: {}", .message)] ConnectionError { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Entity not found: {}", .message)] KeyNotFound { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Argument provided is invalid: {}", .message)] InvalidArgument { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("An error while performing an IO operation: {}", .message)] IOErr { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Encription error: {}", .message)] EncryptionError { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Decryption error: {}", .message)] DecryptionError { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Configuration error: {}", .message)] ConfigurationError { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Serialization error: {}", .message)] SerializeError { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Deserialization error: {}", .message)] DeserializeError { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("An error occurred running the javascript function: {}", .message)] ScriptError { message: String, subtype: Option, - meta: Option, + meta: Option>, }, } @@ -280,7 +280,7 @@ impl InternalError { }) } - fn set_meta(self, metadata: Value) -> Self { + fn set_meta(self, metadata: Box) -> Self { match self { InternalError::UnknownError { message, subtype, .. @@ -461,7 +461,7 @@ impl ErrorMeta for InternalError { } // TODO: Implement this - fn meta(&self) -> Option { + fn meta(&self) -> Option> { match self { InternalError::UnknownError { meta, .. } => meta.clone(), InternalError::UniqueFieldViolation { meta, .. } => meta.clone(), @@ -502,73 +502,73 @@ pub enum ApplicationError { BadRequest { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Conflict: {}", .message)] Conflict { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Forbidden: {}", .message)] Forbidden { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Internal Server Error: {}", .message)] InternalServerError { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Method Not Allowed: {}", .message)] MethodNotAllowed { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Not Found: {}", .message)] NotFound { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Not Implemented: {}", .message)] NotImplemented { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Precondition Failed: {}", .message)] FailedDependency { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Service Unavailable: {}", .message)] ServiceUnavailable { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Too Many Requests: {}", .message)] TooManyRequests { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Unauthorized: {}", .message)] Unauthorized { message: String, subtype: Option, - meta: Option, + meta: Option>, }, #[error("Unprocessable Entity: {}", .message)] UnprocessableEntity { message: String, subtype: Option, - meta: Option, + meta: Option>, }, } @@ -682,7 +682,7 @@ impl ApplicationError { }) } - fn set_meta(self, meta: Value) -> Self { + fn set_meta(self, meta: Box) -> Self { match self { ApplicationError::BadRequest { message, subtype, .. @@ -854,7 +854,7 @@ impl ErrorMeta for ApplicationError { } } - fn meta(&self) -> Option { + fn meta(&self) -> Option> { match self { ApplicationError::BadRequest { meta, .. } => meta.clone(), ApplicationError::Conflict { meta, .. } => meta.clone(), @@ -1139,13 +1139,6 @@ impl IntegrationOSError { StatusCode::from(self).as_u16() } - pub fn is_unique_error(&self) -> bool { - match self { - IntegrationOSError::Internal(InternalError::UniqueFieldViolation { .. }) => true, - _ => false, - } - } - fn internal(internal: InternalError) -> Self { IntegrationOSError::Internal(internal) } @@ -1291,10 +1284,10 @@ impl IntegrationOSError { pub fn set_meta(self, meta: &Value) -> Self { match self { IntegrationOSError::Internal(e) => { - IntegrationOSError::internal(e.set_meta(meta.clone())) + IntegrationOSError::internal(e.set_meta(Box::new(meta.clone()))) } IntegrationOSError::Application(e) => { - IntegrationOSError::application(e.set_meta(meta.clone())) + IntegrationOSError::application(e.set_meta(Box::new(meta.clone()))) } } } @@ -1330,7 +1323,7 @@ impl ErrorMeta for IntegrationOSError { } } - fn meta(&self) -> Option { + fn meta(&self) -> Option> { match self { IntegrationOSError::Internal(e) => e.meta(), IntegrationOSError::Application(e) => e.meta(), diff --git a/integrationos-domain/src/domain/event/emitted_events.rs b/integrationos-domain/src/domain/event/emitted_events.rs index c8efdd3b..17620f2c 100644 --- a/integrationos-domain/src/domain/event/emitted_events.rs +++ b/integrationos-domain/src/domain/event/emitted_events.rs @@ -1,25 +1,4 @@ -use crate::Id; use serde::{Deserialize, Serialize}; -use serde_json::{json, Value}; - -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct DatabaseConnectionLost { - pub connection_id: Id, - pub reason: Option, - pub schedule_on: Option, -} - -impl DatabaseConnectionLost { - pub fn as_event(&self) -> Value { - json!({ - "type": "DatabaseConnectionLost", - "connectionId": self.connection_id, - "reason": self.reason.clone(), - "scheduleOn": self.schedule_on, - }) - } -} #[derive(Debug, Deserialize, Serialize)] pub struct ConnectionLostReason { diff --git a/integrationos-domain/src/domain/pipeline/middleware.rs b/integrationos-domain/src/domain/pipeline/middleware.rs index 577b9cf9..71741a78 100644 --- a/integrationos-domain/src/domain/pipeline/middleware.rs +++ b/integrationos-domain/src/domain/pipeline/middleware.rs @@ -7,7 +7,7 @@ use super::extractor::HttpExtractor; #[serde(rename_all = "camelCase", tag = "_type")] pub enum Middleware { #[serde(rename = "extractor::http")] - HttpExtractor(HttpExtractor), + HttpExtractor(Box), Transformer { language: String, code: String, diff --git a/integrationos-emit/Cargo.toml b/integrationos-emit/Cargo.toml deleted file mode 100644 index e87a01b1..00000000 --- a/integrationos-emit/Cargo.toml +++ /dev/null @@ -1,38 +0,0 @@ -[package] -name = "integrationos-emit" -version = "0.1.0" -edition = "2021" - -[dependencies] -anyhow.workspace = true -async-trait.workspace = true -axum.workspace = true -axum-prometheus = "0.7" -chrono.workspace = true -dotenvy.workspace = true -envconfig.workspace = true -fluvio = "0.23.4" -futures-util.workspace = true -futures.workspace = true -http-serde-ext-ios.workspace = true -http.workspace = true -integrationos-domain = { path = "../integrationos-domain" } -mongodb.workspace = true -num_cpus.workspace = true -reqwest = { workspace = true, features = ["rustls-tls"] } -reqwest-middleware = { version = "0.4", features = ["json"] } -reqwest-retry = "0.7" -reqwest-tracing = "0.5.4" -serde = { workspace = true , features = ["derive"] } -serde_json.workspace = true -strum.workspace = true -tokio = { workspace = true, features = ["full"] } -tokio-graceful-shutdown = "0.15.2" -tower-http.workspace = true -tracing.workspace = true - -[dev-dependencies] -mockito.workspace = true -testcontainers-modules = { workspace = true, features = ["mongo"] } -tracing-subscriber.workspace = true -uuid = { workspace = true, features = ["v4", "serde"] } diff --git a/integrationos-emit/Dockerfile b/integrationos-emit/Dockerfile deleted file mode 100644 index 5f2ec6c3..00000000 --- a/integrationos-emit/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -# syntax = devthefuture/dockerfile-x - -ARG EXECUTABLE=integrationos-emit -INCLUDE Dockerfile.common - -FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y ca-certificates && rm -rf /var/lib/apt/lists/* -WORKDIR /app -COPY --from=builder /app/integrationos-emit/target/release/integrationos-emit /usr/local/bin -ENTRYPOINT /usr/local/bin/integrationos-emit diff --git a/integrationos-emit/README.md b/integrationos-emit/README.md deleted file mode 100644 index 89879be9..00000000 --- a/integrationos-emit/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Integrationos Emit - -## Architecture - -![zenuml](https://github.com/user-attachments/assets/e8ac8908-77af-491c-8489-ebd20f17f06e) - -## Running the Emitter - -This guide assumes that you have already have a working MongoDB instance running. - -1. Install [fluvio cli and setup Docker compose](https://www.fluvio.io/docs/fluvio/installation/docker/) -2. Create the topic you want to emit to by running the following command: - -```bash -fluvio topic create -p -``` -3. Create the death letter queue topic you want to emit to by running the following command: - -```bash -fluvio topic create dlq -p -``` -4. Run the emitter with the following command: - -```bash -$ cargo watch -x run -q | bunyan -``` - -This command will monitor changes in the project and execute the emitter service with Bunyan-formatted logging. - -## Running the Tests - -To run the tests for the emitter, use: - -```bash -cargo nextest run --all-features -``` - -This will execute all tests in the project, ensuring that the archiving process works as expected across all features. - diff --git a/integrationos-emit/src/algebra/event.rs b/integrationos-emit/src/algebra/event.rs deleted file mode 100644 index 3f7073d5..00000000 --- a/integrationos-emit/src/algebra/event.rs +++ /dev/null @@ -1,61 +0,0 @@ -use crate::{domain::event::Event, server::AppState}; -use async_trait::async_trait; -use http::header::AUTHORIZATION; -use integrationos_domain::{ - emitted_events::{ConnectionLostReason, DatabaseConnectionLost}, - ApplicationError, Claims, Id, IntegrationOSError, InternalError, Unit, -}; - -#[async_trait] -pub trait EventExt { - async fn side_effect(&self, ctx: &AppState, entity_id: Id) -> Result; -} - -#[async_trait] -impl EventExt for Event { - async fn side_effect(&self, ctx: &AppState, entity_id: Id) -> Result { - match self { - Event::DatabaseConnectionLost(DatabaseConnectionLost { - connection_id, - reason, - .. - }) => { - let base_path = &ctx.config.event_callback_url; - let path = format!("{base_path}/database-connection-lost/{connection_id}"); - - let authorization = Claims::from_secret(ctx.config.jwt_secret.as_str())?; - let reason = reason - .clone() - .unwrap_or_else(|| "Unable to connect to database".to_string()); - - let payload = ConnectionLostReason { reason }; - - ctx.http_client - .post(path) - .json(&payload) - .header(AUTHORIZATION, format!("Bearer {authorization}")) - .send() - .await - .inspect(|res| { - tracing::info!("Response: {:?}", res); - }) - .map_err(|e| { - tracing::error!("Failed to build request for entity id {entity_id}: {e}"); - InternalError::io_err( - &format!("Failed to build request for entity id {entity_id}"), - None, - ) - })? - .error_for_status() - .map_err(|e| { - tracing::error!("Failed to execute request for entity id {entity_id}: {e}"); - ApplicationError::bad_request( - &format!("Failed to execute request for entity id {entity_id}"), - None, - ) - }) - .map(|res| tracing::info!("Response: {:?}", res)) - } - } - } -} diff --git a/integrationos-emit/src/algebra/metrics.rs b/integrationos-emit/src/algebra/metrics.rs deleted file mode 100644 index 7056346b..00000000 --- a/integrationos-emit/src/algebra/metrics.rs +++ /dev/null @@ -1,103 +0,0 @@ -use axum_prometheus::metrics::{counter, histogram, Counter, Histogram}; -use axum_prometheus::{ - metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle}, - utils::SECONDS_DURATION_BUCKETS, - GenericMetricLayer, Handle, PrometheusMetricLayerBuilder, AXUM_HTTP_REQUESTS_DURATION_SECONDS, -}; -use integrationos_domain::Unit; -use std::time::Duration; - -pub const EVENT_DURATION_KEY: &str = "event_duration_seconds"; -pub const EVENT_COUNT_KEY: &str = "event_count"; -pub const EVENT_ERRORS_KEY: &str = "event_errors"; -pub const EVENT_SUCCESS_KEY: &str = "event_success"; - -pub const DLQ_DURATION_KEY: &str = "dlq_duration_seconds"; -pub const DLQ_COUNT_KEY: &str = "dlq_count"; -pub const DLQ_ERRORS_KEY: &str = "dlq_errors"; -pub const DLQ_SUCCESS_KEY: &str = "dlq_success"; - -pub type MetricHandle = ( - GenericMetricLayer<'static, PrometheusHandle, Handle>, - PrometheusHandle, -); - -pub trait MetricExt { - fn succeeded(&self, value: u64) -> Unit; - fn errored(&self, value: u64) -> Unit; - fn duration(&self, value: Duration) -> Unit; -} - -pub struct MetricsRegistry { - pub event_count: Counter, - pub event_errors: Counter, - pub event_success: Counter, - pub event_duration: Histogram, -} - -impl MetricsRegistry { - pub fn noop() -> Self { - Self { - event_count: Counter::noop(), - event_errors: Counter::noop(), - event_success: Counter::noop(), - event_duration: Histogram::noop(), - } - } - - pub fn handle() -> MetricHandle { - PrometheusMetricLayerBuilder::new() - .with_metrics_from_fn(|| { - PrometheusBuilder::new() - .set_buckets_for_metric( - Matcher::Full(AXUM_HTTP_REQUESTS_DURATION_SECONDS.to_string()), - SECONDS_DURATION_BUCKETS, - ) - .expect("Unable to install request matcher") - .set_buckets_for_metric( - Matcher::Full(EVENT_DURATION_KEY.to_string()), - SECONDS_DURATION_BUCKETS, - ) - .expect("Unable to install event recorder matcher") - .set_buckets_for_metric( - Matcher::Full(DLQ_DURATION_KEY.to_string()), - SECONDS_DURATION_BUCKETS, - ) - .expect("Unable to install dlq recorder matcher") - .install_recorder() - .expect("Unable to setup metrics") - }) - .with_ignore_pattern("/metrics") - .build_pair() - } -} - -impl Default for MetricsRegistry { - fn default() -> Self { - Self { - event_count: counter!(EVENT_COUNT_KEY, "events" => "count"), - event_errors: counter!(EVENT_ERRORS_KEY, "events" => "errors"), - event_success: counter!(EVENT_SUCCESS_KEY, "events" => "success"), - event_duration: histogram!(EVENT_DURATION_KEY, "events" => "duration"), - // dlq_count: counter!(DLQ_COUNT_KEY, "dlq" => "count"), - // dlq_errors: counter!(DLQ_ERRORS_KEY, "dlq" => "errors"), - // dlq_success: counter!(DLQ_SUCCESS_KEY, "dlq" => "success"), - } - } -} - -impl MetricExt for MetricsRegistry { - fn succeeded(&self, value: u64) -> Unit { - self.event_success.increment(value); - self.event_count.increment(value); - } - - fn errored(&self, value: u64) -> Unit { - self.event_errors.increment(value); - self.event_count.increment(value); - } - - fn duration(&self, value: Duration) -> Unit { - self.event_duration.record(value); - } -} diff --git a/integrationos-emit/src/algebra/mod.rs b/integrationos-emit/src/algebra/mod.rs deleted file mode 100644 index 43c955ac..00000000 --- a/integrationos-emit/src/algebra/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod event; -pub mod metrics; diff --git a/integrationos-emit/src/domain/config.rs b/integrationos-emit/src/domain/config.rs deleted file mode 100644 index 11194fdd..00000000 --- a/integrationos-emit/src/domain/config.rs +++ /dev/null @@ -1,266 +0,0 @@ -use crate::stream::EventStreamProvider; -use envconfig::Envconfig; -use fluvio::dataplane::types::PartitionId; -use integrationos_domain::{ - cache::CacheConfig, database::DatabaseConfig, environment::Environment, IntegrationOSError, - InternalError, -}; -use std::{ - fmt::{Display, Formatter}, - net::SocketAddr, - str::FromStr, -}; - -#[derive(Envconfig, Clone)] // Intentionally no Debug so secret is not printed -pub struct EmitterConfig { - #[envconfig(from = "API_VERSION", default = "v1")] - pub api_version: String, - #[envconfig(from = "WORKER_THREADS")] - pub worker_threads: Option, - #[envconfig(from = "INTERNAL_SERVER_ADDRESS", default = "0.0.0.0:3001")] - pub address: SocketAddr, - #[envconfig(from = "METRICS_SERVER_ADDRESS", default = "0.0.0.0:9004")] - pub metrics_address: SocketAddr, - #[envconfig(from = "CACHE_SIZE", default = "10000")] - pub cache_size: u64, - #[envconfig(from = "ENVIRONMENT", default = "development")] - pub environment: Environment, - #[envconfig(from = "HTTP_CLIENT_TIMEOUT_SECS", default = "30")] - pub http_client_timeout_secs: u64, - #[envconfig(from = "HTTP_CLIENT_MAX_RETRIES", default = "3")] - pub http_client_max_retries: u32, - #[envconfig(from = "EVENT_STREAM_PROVIDER", default = "logger")] - pub event_stream_provider: EventStreamProvider, - #[envconfig(from = "EVENT_PROCESSING_MAX_RETRIES", default = "5")] - pub event_processing_max_retries: u32, - #[envconfig(from = "EVENT_MAX_SPAN_FOR_RETRY_SECS", default = "86400")] - pub event_max_span_for_retry_secs: i64, - #[envconfig(from = "SCHEDULED_MAX_CONCURRENT_TASKS", default = "10")] - pub scheduled_max_concurrent_tasks: usize, - #[envconfig(from = "SCHEDULED_SLEEP_DURATION_IN_MILLIS", default = "1000")] - pub scheduled_sleep_duration_millis: u64, - #[envconfig(from = "SCHEDULED_MAX_CHUNK_SIZE", default = "100")] - pub scheduled_max_chunk_size: usize, - #[envconfig(from = "PUSHER_MAX_CONCURRENT_TASKS", default = "10")] - pub pusher_max_concurrent_tasks: usize, - #[envconfig(from = "PUSHER_SLEEP_DURATION_IN_MILLIS", default = "1000")] - pub pusher_sleep_duration_millis: u64, - #[envconfig(from = "PUSHER_MAX_CHUNK_SIZE", default = "100")] - pub pusher_max_chunk_size: usize, - #[envconfig(from = "SHUTDOWN_TIMEOUT_MILLIS", default = "20000")] - pub shutdown_timeout_millis: u64, - #[envconfig( - from = "JWT_SECRET", - default = "2thZ2UiOnsibmFtZSI6IlN0YXJ0dXBsa3NoamRma3NqZGhma3NqZGhma3NqZG5jhYtggfaP9ubmVjdGlvbnMiOjUwMDAwMCwibW9kdWxlcyI6NSwiZW5kcG9pbnRzIjo3b4e05e2-f050-401f-9822-44f43f71753c" - )] - pub jwt_secret: String, - #[envconfig(from = "STATEFULSET_POD_NAME")] - pub statefulset_pod_name: String, - #[envconfig(from = "PARTITION_COUNT")] - pub partition_count: u32, - #[envconfig( - from = "EVENT_CALLBACK_URL", - default = "http://localhost:3005/v1/event-callbacks" - )] - pub event_callback_url: String, - #[envconfig(nested = true)] - pub fluvio: EventStreamConfig, - #[envconfig(nested = true)] - pub cache: CacheConfig, - #[envconfig(nested = true)] - pub db_config: DatabaseConfig, -} - -impl EmitterConfig { - /// Determines the partition ID that this pod should consume from. - /// - /// This method relies on the pod's name to derive the partition ID. It assumes the following: - /// - /// 1. **Pod Naming Convention**: The pod name must follow the format `topic-partition-id`, - /// such as `example-topic-0` or `example-topic-1`. This is typical for StatefulSet pods. - /// 2. **1:1 Mapping**: Each pod has a one-to-one mapping with a specific partition. - /// 3. **Consistent Partition Counts**: The number of partitions in the main topic and - /// any associated Dead Letter Queue (DLQ) must be the same. This ensures consistent - /// partition-to-pod assignments for both topics. - /// - /// ### Behavior - /// - Extracts the partition ID from the pod name using the last hyphen-delimited segment. - /// - Computes the resulting partition by taking the modulus of the extracted ID with the - /// total number of partitions (`partition_count`). - /// - /// ### Assumptions & Limitations - /// - The method is **fragile** and depends on strict adherence to the pod naming convention. - /// - This approach will break in more complex setups or if pod naming conventions change. - /// - Any modifications to the partitioning or naming logic should revisit this method. - /// - /// ### Error Handling - /// Returns an error in the following cases: - /// - The pod name does not match the expected format. - /// - The partition ID cannot be parsed as a valid integer. - /// - /// ### Related Issue - /// For future improvements and a more robust solution, see: - /// [Fluvio Issue #760](https://github.com/infinyon/fluvio/issues/760) - pub fn partition(&self) -> Result { - let pod_name = self.statefulset_pod_name.clone(); - let partition_count = self.partition_count; - - if let Some((_, partition_id)) = pod_name.rsplit_once('-') { - let partition_id = PartitionId::from_str(partition_id).ok().ok_or({ - InternalError::configuration_error( - &format!("Could not parse partition from pod name: {}", pod_name), - None, - ) - })?; - Ok(partition_id % partition_count) - } else { - Err(InternalError::configuration_error( - &format!("Could not parse partition from pod name: {}", pod_name), - None, - )) - } - } -} - -impl Display for EmitterConfig { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - writeln!(f, "SERVER_ADDRESS: {}", self.address)?; - writeln!(f, "METRICS_SERVER_ADDRESS: {}", self.metrics_address)?; - writeln!(f, "CACHE_SIZE: {}", self.cache_size)?; - writeln!(f, "SECRET: ****")?; - writeln!(f, "ENVIRONMENT: {}", self.environment)?; - writeln!( - f, - "HTTP_CLIENT_TIMEOUT_SECS: {}", - self.http_client_timeout_secs - )?; - writeln!( - f, - "HTTP_CLIENT_MAX_RETRIES: {}", - self.http_client_max_retries - )?; - writeln!(f, "EVENT_STREAM_PROVIDER: {}", self.event_stream_provider)?; - writeln!( - f, - "EVENT_MAX_SPAN_FOR_RETRY_DAYS: {}", - self.event_max_span_for_retry_secs - )?; - writeln!( - f, - "PUSHER_MAX_CONCURRENT_TASKS: {}", - self.pusher_max_concurrent_tasks - )?; - writeln!( - f, - "PUSHER_SLEEP_DURATION_IN_MILLIS: {}", - self.pusher_sleep_duration_millis - )?; - writeln!(f, "STATEFUL_SET_POD_NAME: {:?}", self.statefulset_pod_name)?; - writeln!(f, "PUSHER_MAX_CHUNK_SIZE: {}", self.pusher_max_chunk_size)?; - writeln!(f, "JWT_SECRET: ****")?; - writeln!(f, "EVENT_CALLBACK_URL: {}", self.event_callback_url)?; - writeln!( - f, - "EVENT_PROCESSING_MAX_RETRIES: {}", - self.event_processing_max_retries - )?; - writeln!(f, "SHUTDOWN_TIMEOUT_SECS: {}", self.shutdown_timeout_millis)?; - writeln!(f, "{}", self.fluvio)?; - writeln!(f, "{}", self.cache)?; - writeln!(f, "{}", self.db_config) - } -} - -#[derive(Envconfig, Clone)] -pub struct EventStreamConfig { - #[envconfig(from = "EVENT_STREAM_HOST", default = "127.0.0.1")] - pub host: String, - #[envconfig(from = "EVENT_STREAM_PORT", default = "9003")] - pub port: u16, - #[envconfig(from = "EVENT_STREAM_PRODUCER_TOPIC")] - pub producer_topic: Option, - #[envconfig(from = "EVENT_STREAM_CONSUMER_TOPIC")] - pub consumer_topic: Option, - #[envconfig(from = "EVENT_STREAM_DLQ_TOPIC", default = "dlq")] - pub dlq_topic: Topic, - #[envconfig(from = "EVENT_STREAM_PRODUCER_LINGER_TIME_IN_MILLIS", default = "500")] - pub producer_linger_time: u64, - #[envconfig(from = "EVENT_STREAM_PRODUCER_BATCH_SIZE", default = "10000")] - pub producer_batch_size: usize, - #[envconfig( - from = "EVENT_STREAM_CONSUMER_LINGER_TIME_IN_MILLIS", - default = "10000" - )] - pub consumer_linger_time: u64, - #[envconfig(from = "EVENT_STREAM_CONSUMER_BATCH_SIZE", default = "500")] - pub consumer_batch_size: usize, - #[envconfig(from = "EVENT_STREAM_ABSOLUTE_OFFSET")] - pub absolute_offset: Option, - #[envconfig(from = "EVENT_STREAM_CONSUMER_GROUP")] - pub consumer_group: Option, -} - -impl EventStreamConfig { - pub fn endpoint(&self) -> String { - format!("{}:{}", self.host, self.port) - } -} - -impl Display for EventStreamConfig { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - writeln!(f, "EVENT_STREAM_HOST: {}", self.host)?; - writeln!(f, "EVENT_STREAM_PORT: {}", self.port)?; - writeln!(f, "EVENT_STREAM_CONSUMER_TOPIC: {:?}", self.consumer_topic)?; - writeln!(f, "EVENT_STREAM_PRODUCER_TOPIC: {:?}", self.producer_topic)?; - writeln!(f, "EVENT_STREAM_DLQ_TOPIC: {:?}", self.dlq_topic)?; - writeln!( - f, - "EVENT_STREAM_PRODUCER_LINGER_TIME_IN_MILLIS: {}", - self.producer_linger_time - )?; - writeln!( - f, - "EVENT_STREAM_PRODUCER_BATCH_SIZE: {}", - self.producer_batch_size - )?; - writeln!( - f, - "EVENT_STREAM_CONSUMER_LINGER_TIME_IN_MILLIS: {}", - self.consumer_linger_time - )?; - writeln!( - f, - "EVENT_STREAM_CONSUMER_BATCH_SIZE: {}", - self.consumer_batch_size - )?; - writeln!( - f, - "EVENT_STREAM_ABSOLUTE_OFFSET: {:?}", - self.absolute_offset - )?; - writeln!(f, "EVENT_STREAM_CONSUMER_GROUP: {:?}", self.consumer_group) - } -} - -#[derive(Debug, Clone)] -pub struct Topic(String); - -impl<'a> From<&'a Topic> for String { - fn from(topic: &'a Topic) -> Self { - topic.0.clone() - } -} - -impl FromStr for Topic { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - Ok(Topic(s.to_string())) - } -} - -impl Display for Topic { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - writeln!(f, "{}", self.0) - } -} diff --git a/integrationos-emit/src/domain/deduplication.rs b/integrationos-emit/src/domain/deduplication.rs deleted file mode 100644 index ba215aa5..00000000 --- a/integrationos-emit/src/domain/deduplication.rs +++ /dev/null @@ -1,11 +0,0 @@ -use integrationos_domain::{record_metadata::RecordMetadata, Id}; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct Deduplication { - #[serde(rename = "_id")] - pub entity_id: Id, - #[serde(flatten)] - pub metadata: RecordMetadata, -} diff --git a/integrationos-emit/src/domain/event.rs b/integrationos-emit/src/domain/event.rs deleted file mode 100644 index 08fce212..00000000 --- a/integrationos-emit/src/domain/event.rs +++ /dev/null @@ -1,131 +0,0 @@ -use crate::{algebra::event::EventExt, server::AppState}; -use chrono::Utc; -use integrationos_domain::{ - emitted_events::DatabaseConnectionLost, prefix::IdPrefix, record_metadata::RecordMetadata, Id, - IntegrationOSError, Unit, -}; -use serde::{Deserialize, Serialize}; -use strum::{AsRefStr, EnumString}; - -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "PascalCase", tag = "type")] -pub enum Event { - DatabaseConnectionLost(DatabaseConnectionLost), -} - -impl Event { - pub fn as_entity(&self) -> EventEntity { - EventEntity { - entity: self.clone(), - entity_id: Id::now(IdPrefix::PipelineEvent), - outcome: EventStatus::Created, - claimed_by: None, - metadata: RecordMetadata::default(), - } - } - - pub fn scheduled_on(&self) -> Option { - match self { - Event::DatabaseConnectionLost(DatabaseConnectionLost { schedule_on, .. }) => { - *schedule_on - } - } - } -} - -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct EventEntity { - #[serde(rename = "_id")] - pub entity_id: Id, - pub entity: Event, - pub outcome: EventStatus, - #[serde(skip_serializing_if = "Option::is_none", default)] - // TODO: Index this field - pub claimed_by: Option, - #[serde(flatten, default)] - pub metadata: RecordMetadata, -} - -impl EventEntity { - pub fn with_status(&self, outcome: EventStatus) -> Self { - let mut metadata = self.metadata.clone(); - metadata.mark_updated("system"); - Self { - entity_id: self.entity_id, - entity: self.entity.clone(), - claimed_by: self.claimed_by, - outcome, - metadata, - } - } - - pub fn with_claimed_by(&self, claimed_by: u32) -> Self { - Self { - claimed_by: Some(claimed_by), - ..self.clone() - } - } - - pub async fn side_effect(&self, ctx: &AppState) -> Result { - self.entity.side_effect(ctx, self.entity_id).await - } - - pub fn retries(&self) -> u32 { - self.outcome.retries() - } - - pub fn error(&self) -> Option { - self.outcome.error() - } -} - -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, EnumString, AsRefStr)] -#[serde(rename_all = "kebab-case", tag = "type")] -#[strum(serialize_all = "kebab-case")] -pub enum EventStatus { - Created, - Executed { timestamp: i64 }, - Succeded { retries: u32 }, - Errored { error: String, retries: u32 }, -} - -impl EventStatus { - pub fn succeded(retries: u32) -> Self { - Self::Succeded { retries } - } - - pub fn errored(error: String, retries: u32) -> Self { - Self::Errored { error, retries } - } - - pub fn executed() -> Self { - Self::Executed { - timestamp: Utc::now().timestamp_millis(), - } - } - - fn retries(&self) -> u32 { - match self { - Self::Errored { retries, .. } => *retries, - Self::Succeded { retries, .. } => *retries, - _ => 0, - } - } - - fn error(&self) -> Option { - match self { - Self::Errored { error, .. } => Some(error.clone()), - _ => None, - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct ScheduledEvent { - #[serde(rename = "_id")] - pub id: Id, - pub event: EventEntity, - pub schedule_on: i64, -} diff --git a/integrationos-emit/src/domain/idempotency.rs b/integrationos-emit/src/domain/idempotency.rs deleted file mode 100644 index de6dd402..00000000 --- a/integrationos-emit/src/domain/idempotency.rs +++ /dev/null @@ -1,38 +0,0 @@ -use chrono::{DateTime, Utc}; -use integrationos_domain::{record_metadata::RecordMetadata, Id}; -use serde::{Deserialize, Serialize}; -use std::fmt::Display; - -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct IdempotencyKey(Id); - -impl IdempotencyKey { - pub fn new(key: Id) -> Self { - Self(key) - } - - pub fn inner(&self) -> Id { - self.0 - } -} - -impl Display for IdempotencyKey { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -// TODO: Add a TTL to the key and create index -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Idempotency { - /// We use the _id field to store the idempotency key because it is unique out of - /// the box and we can use it as a conflict generation key - #[serde(rename = "_id")] - pub key: IdempotencyKey, - /// Used for the TTL index, MongoDB uses BSON DateTime - /// hence millisecond (number) can't be used - pub date: DateTime, - #[serde(flatten)] - pub metadata: RecordMetadata, -} diff --git a/integrationos-emit/src/domain/mod.rs b/integrationos-emit/src/domain/mod.rs deleted file mode 100644 index 6ea10063..00000000 --- a/integrationos-emit/src/domain/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod config; -pub mod deduplication; -pub mod event; -pub mod idempotency; diff --git a/integrationos-emit/src/lib.rs b/integrationos-emit/src/lib.rs deleted file mode 100644 index 9b7d20d5..00000000 --- a/integrationos-emit/src/lib.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub mod algebra; -pub mod domain; -pub mod logic; -pub mod middleware; -pub mod router; -pub mod server; -pub mod stream; diff --git a/integrationos-emit/src/logic/emitter.rs b/integrationos-emit/src/logic/emitter.rs deleted file mode 100644 index 20d966a9..00000000 --- a/integrationos-emit/src/logic/emitter.rs +++ /dev/null @@ -1,104 +0,0 @@ -use crate::{ - domain::{ - event::{Event, ScheduledEvent}, - idempotency::{Idempotency, IdempotencyKey}, - }, - middleware::idempotency::{header_idempotency, IDEMPOTENCY_HEADER_STR}, - server::AppState, - stream::EventStreamTopic, -}; -use axum::{extract::State, middleware::from_fn, routing::post, Extension, Json, Router}; -use chrono::Utc; -use http::HeaderName; -use integrationos_domain::{ - prefix::IdPrefix, record_metadata::RecordMetadata, ApplicationError, Id, IntegrationOSError, -}; -use mongodb::bson::doc; -use serde::{Deserialize, Serialize}; -use std::{iter::once, sync::Arc}; -use tower_http::sensitive_headers::SetSensitiveRequestHeadersLayer; - -pub fn get_router() -> Router> { - Router::new() - .route("/emit", post(emit)) - .layer(from_fn(header_idempotency)) - .layer(SetSensitiveRequestHeadersLayer::new(once( - HeaderName::from_lowercase(IDEMPOTENCY_HEADER_STR.as_bytes()).unwrap(), - ))) -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct EntityIdResponse { - pub idempotency_key: Id, - pub entity_id: Id, -} - -#[tracing::instrument(skip(state, event))] -pub async fn emit( - State(state): State>, - Extension(idempotency_key): Extension, - Json(event): Json, -) -> Result, IntegrationOSError> { - let is_processed = state - .app_stores - .idempotency - .get_one(doc! { - "_id": idempotency_key.inner().to_string() - }) - .await - .map(|idempotency| idempotency.is_some()) - .unwrap_or(false); - - if is_processed { - Err(ApplicationError::conflict( - &format!("Event with key {idempotency_key} already processed"), - None, - )) - } else { - let idempotency = Idempotency { - key: idempotency_key.clone(), - date: Utc::now(), - metadata: RecordMetadata::default(), - }; - - state - .app_stores - .idempotency - .create_one(&idempotency) - .await?; - - match event.scheduled_on() { - None => { - let id = state - .event_stream - .publish(event.as_entity(), EventStreamTopic::Target) - .await?; - - Ok(Json(EntityIdResponse { - entity_id: id, - idempotency_key: idempotency_key.inner(), - })) - } - Some(schedule_on) => { - let scheduled = ScheduledEvent { - id: Id::now(IdPrefix::ScheduledEvent), - event: event.as_entity(), - schedule_on, - }; - - state - .app_stores - .scheduled - .create_one(&scheduled) - .await - .map_err(|e| anyhow::anyhow!("Failed to schedule event: {e}"))?; - - Ok(Json(EntityIdResponse { - entity_id: scheduled.id, - idempotency_key: idempotency_key.inner(), - })) - } - } - } -} diff --git a/integrationos-emit/src/logic/mod.rs b/integrationos-emit/src/logic/mod.rs deleted file mode 100644 index b799a185..00000000 --- a/integrationos-emit/src/logic/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod emitter; diff --git a/integrationos-emit/src/main.rs b/integrationos-emit/src/main.rs deleted file mode 100644 index 78b27030..00000000 --- a/integrationos-emit/src/main.rs +++ /dev/null @@ -1,42 +0,0 @@ -use anyhow::Result; -use dotenvy::dotenv; -use envconfig::Envconfig; -use integrationos_domain::{ - telemetry::{get_subscriber, init_subscriber}, - Unit, -}; -use integrationos_emit::{ - algebra::metrics::MetricsRegistry, domain::config::EmitterConfig, server::Server, -}; -use std::{sync::Arc, time::Duration}; -use tokio_graceful_shutdown::{SubsystemHandle, Toplevel}; - -fn main() -> Result { - dotenv().ok(); - - let config = EmitterConfig::init_from_env()?; - let shutdown_timeout_millis = config.shutdown_timeout_millis; - - let subscriber = get_subscriber("emitter".into(), "info".into(), std::io::stdout, None); - init_subscriber(subscriber); - - tokio::runtime::Builder::new_multi_thread() - .worker_threads(config.worker_threads.unwrap_or(num_cpus::get())) - .enable_all() - .build()? - .block_on(async move { - Toplevel::new(|subsys: SubsystemHandle| async move { - let metrics = Arc::new(MetricsRegistry::handle()); - - let server = Server::init(config.clone(), &metrics) - .await - .expect("Failed to initialize server"); - - Server::subsystem(server, &config, subsys).await; - }) - .catch_signals() - .handle_shutdown_requests(Duration::from_millis(shutdown_timeout_millis)) - .await - .map_err(Into::into) - }) -} diff --git a/integrationos-emit/src/middleware/idempotency.rs b/integrationos-emit/src/middleware/idempotency.rs deleted file mode 100644 index 1811ffd6..00000000 --- a/integrationos-emit/src/middleware/idempotency.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::domain::idempotency::IdempotencyKey; -use axum::{body::Body, middleware::Next, response::Response}; -use http::Request; -use integrationos_domain::{prefix::IdPrefix, ApplicationError, Id, IntegrationOSError}; - -pub const IDEMPOTENCY_HEADER_STR: &str = "x-integrationos-idempotency-key"; - -pub async fn header_idempotency( - mut req: Request, - next: Next, -) -> Result { - if let Some(idempotency_key) = req.headers().get(IDEMPOTENCY_HEADER_STR) { - let idempotency_key = idempotency_key - .to_str() - .map_err(|_| ApplicationError::bad_request("Invalid idempotency key", None))?; - - if idempotency_key.is_empty() { - return Err(ApplicationError::bad_request( - "Invalid idempotency key, cannot be empty", - None, - )); - } - - let id = Id::try_from(idempotency_key.to_owned()) - .map_err(|_| ApplicationError::bad_request("Invalid idempotency key", None))?; - - let data = IdempotencyKey::new(id); - req.extensions_mut().insert(data); - } else { - let data = IdempotencyKey::new(Id::now(IdPrefix::Idempotency)); - req.extensions_mut().insert(data); - } - Ok(next.run(req).await) -} diff --git a/integrationos-emit/src/middleware/mod.rs b/integrationos-emit/src/middleware/mod.rs deleted file mode 100644 index 6bf46bd9..00000000 --- a/integrationos-emit/src/middleware/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod idempotency; diff --git a/integrationos-emit/src/router/emitter.rs b/integrationos-emit/src/router/emitter.rs deleted file mode 100644 index 123c0344..00000000 --- a/integrationos-emit/src/router/emitter.rs +++ /dev/null @@ -1,31 +0,0 @@ -use crate::{logic::emitter, server::AppState}; -use axum::{middleware::from_fn, response::IntoResponse, routing::get, Json, Router}; -use http::StatusCode; -use integrationos_domain::telemetry::log_request_middleware; -use serde_json::json; -use std::sync::Arc; -use tower_http::{cors::CorsLayer, trace::TraceLayer}; - -pub async fn get_router(state: &Arc) -> Router> { - let path = format!("/{}", state.config.api_version); - let metrics_layer = state.metrics.as_ref().0.clone(); - Router::new() - .nest(&path, emitter::get_router()) - .route("/", get(get_root)) - .fallback(not_found_handler) - .layer(CorsLayer::permissive()) - .layer(from_fn(log_request_middleware)) - .layer(metrics_layer) - .layer(TraceLayer::new_for_http()) -} - -pub async fn get_root() -> impl IntoResponse { - Json(json!({ "success": true })) -} - -pub async fn not_found_handler() -> impl IntoResponse { - ( - StatusCode::NOT_FOUND, - Json(json!({ "error": "Not found", })), - ) -} diff --git a/integrationos-emit/src/router/metrics.rs b/integrationos-emit/src/router/metrics.rs deleted file mode 100644 index 6c4448a3..00000000 --- a/integrationos-emit/src/router/metrics.rs +++ /dev/null @@ -1,8 +0,0 @@ -use crate::server::AppState; -use axum::{routing::get, Router}; -use std::{future::ready, sync::Arc}; - -pub async fn get_router(state: &Arc) -> Router> { - let metrics_handle = state.metrics.as_ref().1.clone(); - Router::new().route("/metrics", get(move || ready(metrics_handle.render()))) -} diff --git a/integrationos-emit/src/router/mod.rs b/integrationos-emit/src/router/mod.rs deleted file mode 100644 index d5129faa..00000000 --- a/integrationos-emit/src/router/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod emitter; -pub mod metrics; diff --git a/integrationos-emit/src/server.rs b/integrationos-emit/src/server.rs deleted file mode 100644 index db08ea3e..00000000 --- a/integrationos-emit/src/server.rs +++ /dev/null @@ -1,206 +0,0 @@ -use crate::{ - algebra::metrics::MetricHandle, - domain::{ - config::EmitterConfig, - deduplication::Deduplication, - event::{EventEntity, ScheduledEvent}, - idempotency::Idempotency, - }, - router, - stream::{ - fluvio_driver::FluvioDriverImpl, logger_driver::LoggerDriverImpl, pusher::EventPusher, - scheduler::PublishScheduler, EventStreamExt, EventStreamProvider, EventStreamTopic, - }, -}; -use anyhow::Result as AnyhowResult; -use axum::Router; -use integrationos_domain::{MongoStore, Store, Unit}; -use mongodb::Client; -use reqwest_middleware::{reqwest, ClientBuilder, ClientWithMiddleware}; -use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; -use reqwest_tracing::TracingMiddleware; -use std::{sync::Arc, time::Duration}; -use tokio::{net::TcpListener, signal}; -use tokio_graceful_shutdown::{SubsystemBuilder, SubsystemHandle}; - -#[derive(Clone)] -pub struct AppStores { - pub events: MongoStore, - pub idempotency: MongoStore, - pub deduplication: MongoStore, - pub scheduled: MongoStore, -} - -#[derive(Clone)] -pub struct AppState { - pub config: EmitterConfig, - pub app_stores: AppStores, - pub http_client: ClientWithMiddleware, - pub metrics: Arc, - pub event_stream: Arc, -} - -#[derive(Clone)] -pub struct Server { - pub state: Arc, - pub event_stream: Arc, - pub scheduler: Arc, - pub pusher: Arc, -} - -impl Server { - pub async fn init(config: EmitterConfig, metrics: &Arc) -> AnyhowResult { - let client = Client::with_uri_str(&config.db_config.event_db_url).await?; - let database = client.database(&config.db_config.event_db_name); - - let app_stores = AppStores { - events: MongoStore::new(&database, &Store::PipelineEvents).await?, - idempotency: MongoStore::new(&database, &Store::Idempotency).await?, - deduplication: MongoStore::new(&database, &Store::Deduplication).await?, - scheduled: MongoStore::new(&database, &Store::ScheduledEvents).await?, - }; - - let event_stream: Arc = match config.event_stream_provider - { - EventStreamProvider::Logger => Arc::new(LoggerDriverImpl), - EventStreamProvider::Fluvio => Arc::new(FluvioDriverImpl::new(&config).await?), - }; - - let pusher = Arc::new(EventPusher { - event_stream: Arc::clone(&event_stream), - events: app_stores.events.clone(), - deduplication: app_stores.deduplication.clone(), - max_concurrent_tasks: config.pusher_max_concurrent_tasks, - max_chunk_size: config.pusher_max_chunk_size, - sleep_duration: config.pusher_sleep_duration_millis, - }); - - let scheduler = Arc::new(PublishScheduler { - event_stream: Arc::clone(&event_stream), - scheduled: app_stores.scheduled.clone(), - max_concurrent_tasks: config.scheduled_max_concurrent_tasks, - max_chunk_size: config.scheduled_max_chunk_size, - sleep_duration: config.scheduled_sleep_duration_millis, - }); - - let retry_policy = - ExponentialBackoff::builder().build_with_max_retries(config.http_client_max_retries); - let client = reqwest::Client::builder() - .timeout(Duration::from_secs(config.http_client_timeout_secs)) - .build()?; - let http_client = ClientBuilder::new(client) - .with(RetryTransientMiddleware::new_with_policy(retry_policy)) - .with(TracingMiddleware::default()) - .build(); - - let state = Arc::new(AppState { - config: config.clone(), - app_stores, - metrics: metrics.clone(), - http_client, - event_stream: Arc::clone(&event_stream), - }); - - Ok(Self { - state, - event_stream, - scheduler, - pusher, - }) - } - - pub async fn run(&self, subsys: SubsystemHandle) -> AnyhowResult { - let server_app = router::emitter::get_router(&self.state).await; - let metrics_app = router::metrics::get_router(&self.state).await; - - let server_app: Router = server_app.with_state(self.state.clone()); - let metrics_app: Router = metrics_app.with_state(self.state.clone()); - - tracing::info!("Emitter server listening on {}", self.state.config.address); - tracing::info!( - "Metrics server listening on {}", - self.state.config.metrics_address - ); - - let server_tcp_listener = TcpListener::bind(&self.state.config.address).await?; - let metrics_tcp_listener = TcpListener::bind(&self.state.config.metrics_address).await?; - - let server_handle = axum::serve(server_tcp_listener, server_app) - .with_graceful_shutdown(Self::shutdown(subsys)); - - let metrics_handle = axum::serve(metrics_tcp_listener, metrics_app); - - let (_, _) = tokio::join!(server_handle, metrics_handle); - - Ok(()) - } - - async fn shutdown(subsys: SubsystemHandle) { - let ctrl_c = async { - signal::ctrl_c() - .await - .expect("failed to install Ctrl+C handler"); - }; - - #[cfg(unix)] - let terminate = async { - signal::unix::signal(signal::unix::SignalKind::terminate()) - .expect("failed to install signal handler") - .recv() - .await; - }; - - #[cfg(not(unix))] - let terminate = std::future::pending::(); - - tokio::select! { - _ = ctrl_c => { - subsys.on_shutdown_requested().await; - }, - _ = terminate => { - subsys.on_shutdown_requested().await; - }, - } - tracing::info!("Starting server shutdown ..."); - } - - pub async fn subsystem( - server: Server, - config: &EmitterConfig, - subsys: SubsystemHandle, - ) -> Unit { - tracing::info!("Starting Emitter API with config:\n{config}"); - - let state = server.state.clone(); - let stream = server.state.event_stream.clone(); - let scheduler = server.scheduler.clone(); - let pusher = server.pusher.clone(); - - subsys.start(SubsystemBuilder::new( - EventStreamTopic::Dlq.as_ref(), - |h| async move { stream.consume(EventStreamTopic::Dlq, h, &state).await }, - )); - - let state = server.state.clone(); - let stream = server.state.event_stream.clone(); - subsys.start(SubsystemBuilder::new( - EventStreamTopic::Target.as_ref(), - |s| async move { stream.consume(EventStreamTopic::Target, s, &state).await }, - )); - // - let config = server.state.config.clone(); - subsys.start(SubsystemBuilder::new("PusherSubsystem", |s| async move { - pusher.start(&config, s).await - })); - - let config = server.state.config.clone(); - subsys.start(SubsystemBuilder::new( - "SchedulerSubsystem", - |s| async move { scheduler.start(&config, s).await }, - )); - - subsys.start(SubsystemBuilder::new("ServerSubsystem", |s| async move { - server.run(s).await - })); - } -} diff --git a/integrationos-emit/src/stream/fluvio_driver.rs b/integrationos-emit/src/stream/fluvio_driver.rs deleted file mode 100644 index 33716376..00000000 --- a/integrationos-emit/src/stream/fluvio_driver.rs +++ /dev/null @@ -1,585 +0,0 @@ -use super::{EventStreamExt, EventStreamTopic}; -use crate::{ - algebra::metrics::{MetricExt, MetricsRegistry}, - domain::{ - config::{EmitterConfig, EventStreamConfig}, - deduplication::Deduplication, - event::{EventEntity, EventStatus}, - }, - server::AppState, -}; -use anyhow::Context; -use async_trait::async_trait; -use fluvio::{ - consumer::{ - ConsumerConfigExt, ConsumerConfigExtBuilder, ConsumerStream, OffsetManagementStrategy, - Record, - }, - dataplane::link::ErrorCode, - spu::SpuSocketPool, - Compression, Fluvio, FluvioConfig, Offset, RetryPolicy, TopicProducer, - TopicProducerConfigBuilder, -}; -use futures::StreamExt; -use integrationos_domain::{Id, IntegrationOSError, InternalError, TimedExt, Unit}; -use mongodb::bson::doc; -use std::boxed::Box; -use std::{ - sync::atomic::{AtomicBool, AtomicU64, Ordering}, - time::Duration, -}; -use tokio::time::interval; -use tokio_graceful_shutdown::SubsystemHandle; - -pub struct ConsumerConfig { - ext: ConsumerConfigExt, - app: EventStreamConfig, -} - -type TargetProducer = TopicProducer; -type DlqProducer = TopicProducer; - -pub struct FluvioDriverImpl { - pub client: Fluvio, - pub tgt_consumer: ConsumerConfig, - pub dlq_consumer: ConsumerConfig, - pub tgt_producer: TargetProducer, - pub dlq_producer: DlqProducer, - pub partition: u32, - pub metrics: MetricsRegistry, -} - -impl FluvioDriverImpl { - pub async fn new(config: &EmitterConfig) -> Result { - let fluvio_config = FluvioConfig::new(config.fluvio.endpoint()); - let fluvio_client = Fluvio::connect_with_config(&fluvio_config).await?; - let partition = config.partition()?; - - let tgt_producer = match &config.fluvio.producer_topic { - Some(producer_topic) => { - // TODO: Bring the retry policy from the config - let config = TopicProducerConfigBuilder::default() - .batch_size(config.fluvio.producer_batch_size) - .linger(Duration::from_millis(config.fluvio.producer_linger_time)) - .delivery_semantic( - fluvio::DeliverySemantic::AtLeastOnce(RetryPolicy::default()), - ) - .compression(Compression::Gzip) - .build() - .map_err(|e| anyhow::anyhow!("Could not create producer config: {e}"))?; - - fluvio_client - .topic_producer_with_config(producer_topic, config) - .await? - } - None => { - return Err(InternalError::configuration_error( - "Producer not initialized", - None, - )) - } - }; - - let dlq_producer = { - let topic = config.fluvio.dlq_topic.clone(); - let config = TopicProducerConfigBuilder::default() - .batch_size(config.fluvio.producer_batch_size) - .linger(Duration::from_millis(config.fluvio.producer_linger_time)) - .delivery_semantic(fluvio::DeliverySemantic::AtLeastOnce(RetryPolicy::default())) - .compression(Compression::Gzip) - .build() - .map_err(|e| anyhow::anyhow!("Could not create producer config: {e}"))?; - - fluvio_client - .topic_producer_with_config(&topic, config) - .await? - }; - - let tgt_consumer = match &config.fluvio.consumer_topic { - Some(consumer_topic) => { - let offset = match &config.fluvio.absolute_offset { - Some(absolute_offset) => Offset::absolute(*absolute_offset).map_err(|e| { - InternalError::invalid_argument( - &format!("Could not create offset: {e}"), - None, - ) - })?, - None => Offset::beginning(), - }; - - let consumer_id = config.fluvio.consumer_group.clone().ok_or_else(|| { - InternalError::invalid_argument( - "When specifying a consumer topic, a consumer group must be specified", - None, - ) - })?; - - let ext = ConsumerConfigExtBuilder::default() - .topic(consumer_topic) - .partition(partition) - .offset_start(offset) - .offset_consumer(consumer_id) - .offset_strategy(OffsetManagementStrategy::Manual) - .build() - .map_err(|e| anyhow::anyhow!("Could not create consumer config: {e}"))?; - - ConsumerConfig { - ext, - app: config.fluvio.clone(), - } - } - None => { - return Err(InternalError::configuration_error( - "Consumer not initialized", - None, - )) - } - }; - - let dlq_consumer = { - let topic = config.fluvio.dlq_topic.clone(); - let consumer_id = config.fluvio.consumer_group.clone().ok_or_else(|| { - InternalError::invalid_argument( - "When specifying a consumer topic, a consumer group must be specified", - None, - ) - })?; - - let consumer_id = format!("{consumer_id}-dlq"); - - let ext = ConsumerConfigExtBuilder::default() - .topic(&topic) - .partition(partition) - .offset_start(Offset::beginning()) - .offset_consumer(consumer_id) - .offset_strategy(OffsetManagementStrategy::Manual) - .build() - .map_err(|e| anyhow::anyhow!("Could not create consumer config: {e}"))?; - - ConsumerConfig { - ext, - app: config.fluvio.clone(), - } - }; - - Ok(Self { - client: fluvio_client, - tgt_consumer, - dlq_consumer, - tgt_producer, - dlq_producer, - partition, - metrics: MetricsRegistry::default(), - }) - } - - async fn consume_topic( - &self, - target: EventStreamTopic, - subsys: &SubsystemHandle, - ctx: &AppState, - consumer: &ConsumerConfig, - stream: &mut impl ConsumerStream>, - ) -> Result { - let mut interval = interval(Duration::from_millis(consumer.app.consumer_linger_time)); - interval.tick().await; - - // We don't really need it but we may use a different approach if something comes out of https://github.com/infinyon/fluvio/issues/4267#issuecomment-2489354987 - let count = AtomicU64::new(0); - let is_processing = AtomicBool::new(true); - - if !consumer.ext.partition.is_empty() { - tracing::info!( - "Consuming events from topic {} partition {}", - target.as_ref(), - consumer - .ext - .partition - .iter() - .map(u32::to_string) - .collect::>() - .join("-") - ); - } - - loop { - is_processing.store(false, Ordering::SeqCst); - tokio::select! { - timeout = interval.tick() => { - if count.load(std::sync::atomic::Ordering::SeqCst) > 0 { - tracing::info!("Committing offsets after {:?} for topic {}", timeout.elapsed(), target.as_ref()); - stream.offset_commit().map_err(|err| anyhow::anyhow!(err))?; - stream.offset_flush().await.map_err(|err| anyhow::anyhow!(err))?; - tracing::info!("Periodic offset commit completed for topic {}", target.as_ref()); - count.store(0, std::sync::atomic::Ordering::SeqCst); - } - - if subsys.is_shutdown_requested() && !is_processing.load(Ordering::SeqCst) { - tracing::info!("Consumer for {} cancelled by external request. Breaking the loop", target.as_ref()); - break Ok(()); - } - }, - record = stream.next() => { - count.fetch_add(1, Ordering::Relaxed); - - match record { - Some(Ok(record)) => { - let event: EventEntity = serde_json::from_slice(record.get_value()).context("Could not deserialize event")?; - is_processing.store(true, Ordering::SeqCst); - self.process(ctx, target, &event.with_claimed_by(self.partition)).await?; - is_processing.store(false, Ordering::SeqCst); - }, - Some(Err(err)) => return Err(InternalError::io_err(&format!("Error consuming record: {err}"), None)), - None => { - tracing::info!("Consumer stream closed"); - subsys.request_shutdown(); - } - } - - if count.load(std::sync::atomic::Ordering::SeqCst) >= consumer.app.consumer_batch_size as u64 { - count.store(0, Ordering::SeqCst); - stream.offset_commit().map_err(|err| anyhow::anyhow!(err))?; - stream.offset_flush().await.map_err(|err| anyhow::anyhow!(err))?; - } - - if subsys.is_shutdown_requested() { - tracing::info!("Consumer for {} cancelled by external request. Breaking the loop", target.as_ref()); - unset_claimed_by(ctx, self.partition).await?; - break Ok(()); - } - } - } - } - } -} - -#[async_trait] -impl EventStreamExt for FluvioDriverImpl { - /// Publishes an event to the specified topic. - /// - /// # Parameters - /// - `event`: The event to publish, containing its metadata, payload, and associated data. - /// - `target`: The target topic to which the event should be published, either `Target` or `Dlq`. - /// - /// # Behavior - /// This method performs the following steps: - /// 1. Serializes the event into a binary payload using `serde_json`. - /// 2. Sends the serialized payload to the specified topic (`Target` or `DLQ`) using the appropriate producer. - /// - /// The method ensures proper error handling for serialization and publishing, logging relevant errors and returning an appropriate result. - /// - /// # Returns - /// - `Ok(Id)`: The `entity_id` of the published event, indicating successful publication. - /// - `Err(IntegrationOSError)`: If an error occurs during serialization or while sending the event to the target. - /// - /// # Errors - /// - **Serialization Error**: If the event cannot be serialized into a JSON payload. - /// - **Publishing Error**: If the Fluvio producer fails to send the event to the target topic. - async fn publish( - &self, - event: EventEntity, - target: EventStreamTopic, - ) -> Result { - let payload = serde_json::to_vec(&event).map_err(|e| { - InternalError::serialize_error(&format!("Could not serialize event: {e}"), None) - })?; - - match target { - EventStreamTopic::Target => { - self.tgt_producer - .send(event.entity_id.to_string(), payload) - .await - .map_err(|e| { - InternalError::io_err(&format!("Could not send event to fluvio: {e}"), None) - })?; - } - EventStreamTopic::Dlq => { - self.dlq_producer - .send(event.entity_id.to_string(), payload) - .await - .map_err(|e| { - InternalError::io_err(&format!("Could not send event to fluvio: {e}"), None) - })?; - } - }; - - Ok(event.entity_id) - } - - /// Consumes events from the specified topic and processes them. - /// - /// # Parameters - /// - `target`: The target event stream topic to consume from, either the main target or the dead-letter queue (DLQ). - /// - `subsys`: A handle to the subsystem used for inter-process communication or coordination. - /// - `ctx`: A reference to the application state, providing access to shared resources and configurations. - /// - /// # Behavior - /// This method creates a consumer stream for the specified topic using the appropriate consumer configuration. - /// It processes each event from the stream and updates the event outcome in the events collection. The processing - /// logic is delegated to the `consume_topic` method, which handles event-specific tasks. - /// - /// # Returns - /// - `Ok(Unit)`: If the events are consumed and processed successfully. - /// - `Err(IntegrationOSError)`: If an error occurs during stream consumption or processing. - /// - /// # Errors - /// This method returns an error if: - /// - There is an issue initializing the consumer stream. - /// - An error occurs while processing events in the topic. - async fn consume( - &self, - target: EventStreamTopic, - subsys: SubsystemHandle, - ctx: &AppState, - ) -> Result { - let consumer = match target { - EventStreamTopic::Target => &self.tgt_consumer, - EventStreamTopic::Dlq => &self.dlq_consumer, - }; - - let mut stream = self - .client - .consumer_with_config(consumer.ext.clone()) - .await?; - - self.consume_topic(target, &subsys, ctx, consumer, &mut stream) - .await - } - - /// Processes an individual event from the consumer stream. - /// - /// # Parameters - /// - `ctx`: A reference to the application state, which provides access to shared resources, configurations, and storage. - /// - `target`: The event stream topic that the event belongs to, either `Target` or `Dlq`. - /// - `event`: The event to be processed, containing its metadata, status, and logic for side effects. - /// - /// # Behavior - /// This method performs the following steps: - /// 1. **Deduplication Check**: Verifies if the event has already been processed by checking the deduplication store. If so, the method returns early. - /// 2. **Deduplication Record Creation**: If the event is not processed, it creates a deduplication record to prevent re-processing. - /// 3. **Event Processing**: - /// - Executes the event's side effect logic. - /// - Updates the event's outcome in the events store based on the success or failure of the side effect. - /// 4. **Error Handling**: - /// - If processing fails, the deduplication record is removed, and the event is published to the DLQ with updated retry metadata. - /// - If the event is in the DLQ and has exceeded the maximum allowed retries, it marks the event as permanently failed. - /// - /// The method distinguishes between events in the main `Target` topic and the `DLQ` (Dead Letter Queue), handling them differently based on their context and retry state. - /// - /// # Returns - /// - `Ok(Unit)`: If the event is successfully processed or deemed complete (even if moved to the DLQ). - /// - `Err(IntegrationOSError)`: If a critical error occurs during processing or storage operations. - /// - /// # Errors - /// - Returns an error if there are issues interacting with the deduplication store or the events store. - /// - Errors may also occur if publishing to the DLQ or executing side effects fails critically. - async fn process( - &self, - ctx: &AppState, - target: EventStreamTopic, - event: &EventEntity, - ) -> Result { - match process_event(self, ctx, target, event).await { - Ok(_) => { - self.metrics.succeeded(1); - Ok(()) - } - Err(e) => { - self.metrics.errored(1); - Err(e) - } - } - } -} - -async fn unset_claimed_by(ctx: &AppState, claimer: u32) -> Result { - ctx.app_stores - .events - .update_many( - doc! { "claimedBy": claimer }, - doc! { "$unset": { "claimedBy": "" } }, - ) - .await?; - - Ok(()) -} - -async fn delete_deduplication_record( - ctx: &AppState, - event: &EventEntity, -) -> Result { - ctx.app_stores - .deduplication - .collection - .delete_one(doc! { - "_id": event.entity_id.to_string() - }) - .await?; - - Ok(()) -} - -async fn update_event_status( - ctx: &AppState, - event: &EventEntity, - outcome: EventStatus, -) -> Result { - let outcome = mongodb::bson::to_bson(&outcome).context("Could not serialize event")?; - - ctx.app_stores - .events - .update_one( - &event.entity_id.to_string(), - doc! { "$set": { "outcome": outcome } }, - ) - .await?; - - Ok(()) -} - -async fn process_event( - fluvio_driver: &FluvioDriverImpl, - ctx: &AppState, - target: EventStreamTopic, - event: &EventEntity, -) -> Result { - let is_processed = ctx - .app_stores - .deduplication - .get_one_by_id(&event.entity_id.to_string()) - .await - .map_err(|e| { - tracing::error!("Could not fetch deduplication record: {e}"); - InternalError::unknown("Could not fetch deduplication record", None) - })? - .is_some(); - - if is_processed { - tracing::info!("Event with id {} is already processed", event.entity_id); - return Ok(()); - } - - let insert_result = ctx - .app_stores - .deduplication - .create_one(&Deduplication { - entity_id: event.entity_id, - metadata: event.metadata.clone(), - }) - .await; - - if let Err(e) = insert_result { - tracing::error!("Could not create deduplication record: {e}"); - if e.is_unique_error() { - return Ok(()); - } else { - return Err(e); - } - } - - tracing::info!( - "Event with id {} is claimed by {}", - event.entity_id, - fluvio_driver.partition - ); - - match target { - EventStreamTopic::Target => { - ctx.app_stores.events.create_one(event).await.map_err(|e| { - tracing::error!("Could not create event record: {e}"); - InternalError::unknown("Could not create event record", None) - })?; - - tracing::info!("Event with id {} is ready to be processed", event.entity_id); - let result = event - .side_effect(ctx) - .timed(|_, elapsed| { - fluvio_driver.metrics.duration(elapsed); - - tracing::info!( - "Side effect for entity id {} took {}ms", - event.entity_id, - elapsed.as_millis() - ) - }) - .await; - - update_event_status(ctx, event, EventStatus::executed()).await?; - - if let Err(e) = result { - fluvio_driver.metrics.errored(1); - tracing::error!("Error processing event: {e}, removing deduplication record"); - delete_deduplication_record(ctx, event).await?; - - let status = EventStatus::errored(e.to_string(), 1); - let event = event.with_status(status.clone()); - - tracing::debug!( - "Event with id {} is in DLQ, with number of retries {}", - event.entity_id, - event.retries() - ); - - fluvio_driver - .publish(event.clone(), EventStreamTopic::Dlq) - .await?; - - tracing::debug!("Event with id {} is published to DLQ", event.entity_id); - - update_event_status(ctx, &event, status).await?; - - tracing::debug!("Event with id {} is updated to DLQ", event.entity_id); - - return Ok(()); - } - - update_event_status(ctx, event, EventStatus::succeded(event.retries())).await?; - } - EventStreamTopic::Dlq => { - tracing::info!("Event with id {} is in DLQ", event.entity_id); - if event.retries() <= ctx.config.event_processing_max_retries { - let result = event.side_effect(ctx).await; - - if let Err(e) = result { - tracing::error!("Error processing event: {e}, removing deduplication record"); - delete_deduplication_record(ctx, event).await?; - - let outcome = EventStatus::errored(e.to_string(), event.retries() + 1); - - tracing::debug!( - "Event with id {} is in DLQ, with number of retries {}", - event.entity_id, - event.retries() - ); - - let event = event.with_status(outcome.clone()); - - fluvio_driver - .publish(event.clone(), EventStreamTopic::Dlq) - .await?; - - tracing::debug!("Event with id {} is published to DLQ", event.entity_id); - - update_event_status(ctx, &event, outcome).await?; - - tracing::debug!("Event with id {} is updated to DLQ", event.entity_id); - - return Ok(()); - } - - update_event_status(ctx, event, EventStatus::succeded(event.retries())).await?; - } else { - tracing::info!("Giving up on event with id {}", event.entity_id); - // this is the case where we exhausted the retries, now - // the error is updated and not sent to the target topic - let error = event.error().unwrap_or_default() - + ".\n Exhausted retries, cannot process event"; - - update_event_status(ctx, event, EventStatus::errored(error, event.retries())) - .await?; - } - } - } - - Ok(()) -} diff --git a/integrationos-emit/src/stream/logger_driver.rs b/integrationos-emit/src/stream/logger_driver.rs deleted file mode 100644 index 4a1406bb..00000000 --- a/integrationos-emit/src/stream/logger_driver.rs +++ /dev/null @@ -1,49 +0,0 @@ -use super::{EventStreamExt, EventStreamTopic}; -use crate::{domain::event::EventEntity, server::AppState}; -use async_trait::async_trait; -use integrationos_domain::{prefix::IdPrefix, Id, IntegrationOSError, Unit}; -use std::boxed::Box; -use tokio_graceful_shutdown::SubsystemHandle; - -pub struct LoggerDriverImpl; - -#[async_trait] -impl EventStreamExt for LoggerDriverImpl { - async fn publish( - &self, - event: EventEntity, - _target: EventStreamTopic, - ) -> Result { - tracing::info!("Received event: {:?}, using logger handler", event); - - Ok(Id::now(IdPrefix::PipelineEvent)) - } - - async fn consume( - &self, - target: EventStreamTopic, - _subsys: SubsystemHandle, - _ctx: &AppState, - ) -> Result { - tracing::info!( - "Consuming records from {} using logger handler", - target.as_ref() - ); - - Ok(()) - } - - async fn process( - &self, - _ctx: &AppState, - target: EventStreamTopic, - _event: &EventEntity, - ) -> Result { - tracing::info!( - "Processing records from {} using logger handler", - target.as_ref() - ); - - Ok(()) - } -} diff --git a/integrationos-emit/src/stream/mod.rs b/integrationos-emit/src/stream/mod.rs deleted file mode 100644 index 05d7ca91..00000000 --- a/integrationos-emit/src/stream/mod.rs +++ /dev/null @@ -1,47 +0,0 @@ -pub mod fluvio_driver; -pub mod logger_driver; -pub mod pusher; -pub mod scheduler; - -use crate::{domain::event::EventEntity, server::AppState}; -use async_trait::async_trait; -use integrationos_domain::{Id, IntegrationOSError, Unit}; -use strum::{AsRefStr, Display, EnumIter, EnumString}; -use tokio_graceful_shutdown::SubsystemHandle; - -pub const SINGLETON_ID: u32 = 0; - -#[async_trait] -pub trait EventStreamExt { - async fn publish( - &self, - event: EventEntity, - target: EventStreamTopic, - ) -> Result; - async fn consume( - &self, - target: EventStreamTopic, - subsys: SubsystemHandle, - ctx: &AppState, - ) -> Result; - async fn process( - &self, - ctx: &AppState, - target: EventStreamTopic, - events: &T, - ) -> Result; -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumString, AsRefStr, Display)] -#[strum(serialize_all = "kebab-case")] -pub enum EventStreamProvider { - Fluvio, - Logger, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumString, AsRefStr, EnumIter)] -#[strum(serialize_all = "kebab-case")] -pub enum EventStreamTopic { - Target, - Dlq, -} diff --git a/integrationos-emit/src/stream/pusher.rs b/integrationos-emit/src/stream/pusher.rs deleted file mode 100644 index 33be61ba..00000000 --- a/integrationos-emit/src/stream/pusher.rs +++ /dev/null @@ -1,213 +0,0 @@ -use super::{EventStreamExt, SINGLETON_ID}; -use crate::{ - domain::{config::EmitterConfig, deduplication::Deduplication, event::EventEntity}, - stream::EventStreamTopic, -}; -use chrono::{Duration as CDuration, Utc}; -use futures::{StreamExt, TryStreamExt}; -use integrationos_domain::{IntegrationOSError, InternalError, MongoStore, Unit}; -use mongodb::bson::doc; -use std::{sync::Arc, time::Duration}; -use tokio_graceful_shutdown::{FutureExt, SubsystemHandle}; - -#[derive(Clone)] -pub struct EventPusher { - pub event_stream: Arc, - pub events: MongoStore, - pub deduplication: MongoStore, - pub max_concurrent_tasks: usize, - pub max_chunk_size: usize, - pub sleep_duration: u64, -} - -impl EventPusher { - pub async fn start( - &self, - config: &EmitterConfig, - subsys: SubsystemHandle, - ) -> Result { - if config.partition()? != SINGLETON_ID { - tracing::info!( - "Limiting event publisher to single partition {}", - SINGLETON_ID - ); - return Ok(()); - } - - match self.process(config).cancel_on_shutdown(&subsys).await { - Ok(result) => { - tracing::info!("Scheduled event publisher finished"); - subsys.on_shutdown_requested().await; - - result - } - Err(_) => { - tracing::warn!("EventPusher was cancelled due to shutdown"); - subsys.on_shutdown_requested().await; - Ok(()) - } - } - } - - async fn process(&self, config: &EmitterConfig) -> Result { - let events_store = self.events.clone(); - let deduplication_store = self.deduplication.clone(); - let event_stream = Arc::clone(&self.event_stream); - let claimer = config.partition()?; - - let max_concurrent_tasks = self.max_concurrent_tasks; - let max_chunk_size = self.max_chunk_size; - let sleep_duration = self.sleep_duration; - - tracing::info!("Starting event pusher"); - loop { - let now = Utc::now(); - let before = now - CDuration::seconds(config.event_max_span_for_retry_secs); - - tracing::debug!("Polling for events at {}", now); - - let query = doc! { - "$or": [ - {"$and": [ - { - "outcome.type": "errored" - }, - { - "outcome.retries": { "$lt": config.event_processing_max_retries} - }, - { - "createdAt": { "$lt": before.timestamp_millis() } - }, - { - "claimedBy": { "$exists": false } - } - ]}, - {"$and": [ - { - "outcome.type": "executed" - }, - { - "createdAt": { "$lt": before.timestamp_millis() } - }, - { - "claimedBy": { "$exists": false } - } - ]}, - {"$and": [ - { - "outcome.type": "created" - }, - { - "createdAt": { "$lt": before.timestamp_millis() } - }, - { - "claimedBy": { "$exists": false } - } - ]} - ] - }; - - let events = events_store.collection.find(query).await; - - if let Ok(events) = events { - let event_stream = Arc::clone(&event_stream); - let deduplication_store = deduplication_store.clone(); - let events_store = events_store.clone(); - - let result = events - .try_chunks(max_chunk_size) - .map(|result| { - let event_stream = Arc::clone(&event_stream); - let deduplication_store = deduplication_store.clone(); - let events_store = events_store.clone(); - - let result = - result.map_err(|e| InternalError::io_err(&e.to_string(), None)); - async move { - process_chunk( - result, - &event_stream, - &deduplication_store, - &events_store, - claimer, - ) - .await - } - }) - .buffer_unordered(max_concurrent_tasks) - .collect::>() - .await - .into_iter() - .collect::, IntegrationOSError>>(); - - if let Err(e) = result { - tracing::error!("Failed to publish one or more event chunks: {e}"); - } - } else if let Err(e) = events { - tracing::error!("Failed to fetch events: {e}"); - } - - tokio::time::sleep(Duration::from_millis(sleep_duration)).await; - } - } -} - -async fn process_chunk( - result: Result, IntegrationOSError>, - event_stream: &Arc, - deduplication_store: &MongoStore, - events_store: &MongoStore, - claimer: u32, -) -> Result { - match result { - Ok(chunk) => { - tracing::info!("Publishing {} event(s)", chunk.len()); - for event in chunk { - // Double check mechanism to prevent duplicated events - if events_store - .get_one_by_id(&event.entity_id.to_string()) - .await? - .map(|e| e.claimed_by.is_some()) - .unwrap_or(false) - { - tracing::warn!("Event with id {} is already published", event.entity_id); - continue; - } - - events_store - .update_one( - &event.entity_id.to_string(), - doc! { "$set": { "claimedBy": claimer } }, - ) - .await?; - - let entity_id = event.entity_id; - - let deleted = deduplication_store - .collection - .delete_one(doc! { "_id": entity_id.to_string() }) - .await?; - - tracing::info!( - "Deleted event with id {:?} from deduplication store", - deleted - ); - - event_stream - .publish(event, EventStreamTopic::Dlq) - .await - .inspect(|_| { - tracing::info!("Event with id {} is published", entity_id); - }) - .inspect_err(|e| { - tracing::error!("Failed to publish event: {e}"); - })?; - } - Ok(()) - } - Err(e) => { - tracing::error!("Failed to chunk events: {e}"); - Err(e) - } - } -} diff --git a/integrationos-emit/src/stream/scheduler.rs b/integrationos-emit/src/stream/scheduler.rs deleted file mode 100644 index 6a4bd90a..00000000 --- a/integrationos-emit/src/stream/scheduler.rs +++ /dev/null @@ -1,135 +0,0 @@ -use super::{EventStreamExt, SINGLETON_ID}; -use crate::{ - domain::{config::EmitterConfig, event::ScheduledEvent}, - stream::EventStreamTopic, -}; -use chrono::Utc; -use futures::{StreamExt, TryStreamExt}; -use integrationos_domain::{IntegrationOSError, InternalError, MongoStore, Unit}; -use mongodb::bson::doc; -use std::{sync::Arc, time::Duration}; -use tokio_graceful_shutdown::{FutureExt, SubsystemHandle}; - -// Simple scheduler. Heavily relies on the database for scheduling events -#[derive(Clone)] -pub struct PublishScheduler { - pub event_stream: Arc, - pub scheduled: MongoStore, - pub max_concurrent_tasks: usize, - pub max_chunk_size: usize, - pub sleep_duration: u64, -} - -impl PublishScheduler { - pub async fn start( - &self, - config: &EmitterConfig, - subsys: SubsystemHandle, - ) -> Result { - if config.partition()? != SINGLETON_ID { - tracing::info!( - "Limiting event scheduler to single partition {}", - SINGLETON_ID - ); - return Ok(()); - } - - match self.process().cancel_on_shutdown(&subsys).await { - Ok(result) => { - tracing::info!("Scheduled event publisher finished"); - subsys.on_shutdown_requested().await; - - result - } - Err(_) => { - tracing::warn!("PublishScheduler was cancelled due to shutdown"); - subsys.on_shutdown_requested().await; - Ok(()) - } - } - } - - async fn process(&self) -> Result { - let scheduled = self.scheduled.clone(); - let event_stream = Arc::clone(&self.event_stream); - - let max_concurrent_tasks = self.max_concurrent_tasks; - let max_chunk_size = self.max_chunk_size; - let sleep_duration = self.sleep_duration; - - tracing::info!("Starting scheduled event publisher"); - loop { - tracing::debug!( - "Polling for scheduled events at {}", - Utc::now().timestamp_millis() - ); - - let events = scheduled - .collection - .find(doc! { - "scheduleOn": { "$lte": Utc::now().timestamp_millis() } - - }) - .await; - - if let Ok(events) = events { - let event_stream = Arc::clone(&event_stream); - let scheduled = scheduled.clone(); - let results = events - .try_chunks(max_chunk_size) - .map(|result| { - let event_stream = Arc::clone(&event_stream); - let scheduled = scheduled.clone(); - - let result = - result.map_err(|e| InternalError::io_err(&e.to_string(), None)); - async move { process_chunk(result, &event_stream, &scheduled).await } - }) - .buffer_unordered(max_concurrent_tasks) - .collect::>() - .await; - - if results.iter().any(|r| r.is_err()) { - tracing::error!("Failed to publish one or more event chunks"); - } - } else if let Err(e) = events { - tracing::error!("Failed to fetch events: {e}"); - } - - tokio::time::sleep(Duration::from_millis(sleep_duration)).await; - } - } -} - -async fn process_chunk( - result: Result, IntegrationOSError>, - event_stream: &Arc, - scheduled: &MongoStore, -) -> Result { - match result { - Ok(chunk) => { - tracing::info!("Publishing {} scheduled event(s)", chunk.len()); - for event in chunk { - let id = event.id; - let entity_id = event.event.entity_id; - if let Err(e) = event_stream - .publish(event.event, EventStreamTopic::Target) - .await - { - tracing::error!("Failed to publish event: {e}"); - } else { - tracing::info!("Event with id {} is published", entity_id); - scheduled - .collection - .delete_one(doc! { "_id": id.to_string() }) - .await?; - } - } - Ok(()) - } - Err(e) => { - tracing::error!("Failed to chunk events: {e}"); - Err(e) - } - } -} diff --git a/integrationos-emit/tests/context.rs b/integrationos-emit/tests/context.rs deleted file mode 100644 index 3296ae2b..00000000 --- a/integrationos-emit/tests/context.rs +++ /dev/null @@ -1,178 +0,0 @@ -use envconfig::Envconfig; -use http::{Method, StatusCode}; -use integrationos_domain::{IntegrationOSError, InternalError, Unit}; -use integrationos_emit::algebra::metrics::{MetricHandle, MetricsRegistry}; -use integrationos_emit::domain::config::EmitterConfig; -use integrationos_emit::server::Server; -use mockito::{Server as MockServer, ServerGuard}; -use serde::{de::DeserializeOwned, Serialize}; -use serde_json::Value; -use std::error::Error; -use std::fmt::Debug; -use std::sync::Arc; -use std::{collections::HashMap, sync::OnceLock, time::Duration}; -use testcontainers_modules::{ - mongo::Mongo, - testcontainers::{clients::Cli as Docker, Container}, -}; -use tokio::net::TcpListener; -use tokio_graceful_shutdown::Toplevel; -use tracing::level_filters::LevelFilter; -use tracing_subscriber::EnvFilter; -use uuid::Uuid; - -static DOCKER: OnceLock = OnceLock::new(); -static MONGO: OnceLock> = OnceLock::new(); -static TRACING: OnceLock = OnceLock::new(); -static METRICS: OnceLock> = OnceLock::new(); - -pub struct TestServer { - pub port: u16, - pub client: reqwest::Client, - pub mock_server: ServerGuard, -} - -#[derive(Debug, Clone, Eq, PartialEq)] -pub struct ApiResponse { - pub code: StatusCode, - pub data: T, -} - -impl TestServer { - pub async fn new() -> Result { - TRACING.get_or_init(|| { - let filter = EnvFilter::builder() - .with_default_directive(LevelFilter::INFO.into()) - .from_env_lossy(); - - tracing_subscriber::fmt().with_env_filter(filter).init(); - }); - let metrics = METRICS.get_or_init(|| Arc::new(MetricsRegistry::handle())); - let docker = DOCKER.get_or_init(Default::default); - let mongo = MONGO.get_or_init(|| docker.run(Mongo)); - let port = mongo.get_host_port_ipv4(27017); - - let database_uri = format!("mongodb://127.0.0.1:{port}/?directConnection=true"); - let database_name = Uuid::new_v4().to_string(); - - let server_port = TcpListener::bind("127.0.0.1:0") - .await - .expect("Failed to bind to port") - .local_addr() - .expect("Failed to get local address") - .port(); - - let metrics_port = TcpListener::bind("127.0.0.1:0") - .await - .expect("Failed to bind to port") - .local_addr() - .expect("Failed to get local address") - .port(); - - let mock_server = MockServer::new_async().await; - let mock_uri = mock_server.url(); - let config = vec![ - ( - "INTERNAL_SERVER_ADDRESS".to_string(), - format!("0.0.0.0:{server_port}"), - ), - ( - "METRICS_SERVER_ADDRESS".to_string(), - format!("0.0.0.0:{metrics_port}"), - ), - ("CONTROL_DATABASE_URL".to_string(), database_uri.clone()), - ("CONTROL_DATABASE_NAME".to_string(), database_name.clone()), - ("CONTEXT_DATABASE_URL".to_string(), database_uri.clone()), - ("CONTEXT_DATABASE_NAME".to_string(), database_name.clone()), - ("EVENT_DATABASE_URL".to_string(), database_uri.clone()), - ("EVENT_DATABASE_NAME".to_string(), database_name.clone()), - ( - "STATEFULSET_POD_NAME".to_string(), - "event-emit-0".to_string(), - ), - ("PARTITION_COUNT".to_string(), "1".to_string()), - ("ENVIRONMENT".to_string(), "test".to_string()), - ("EVENT_STREAM_PROVIDER".to_string(), "fluvio".to_string()), - ("EVENT_STREAM_PORT".to_string(), "9103".to_string()), - ( - "EVENT_STREAM_PRODUCER_TOPIC".to_string(), - "events".to_string(), - ), - ( - "EVENT_STREAM_CONSUMER_TOPIC".to_string(), - "events".to_string(), - ), - ( - "EVENT_STREAM_CONSUMER_GROUP".to_string(), - "event-all-partitions-consumer".to_string(), - ), - ( - "EVENT_CALLBACK_URL".to_string(), - format!("{mock_uri}/v1/event-callbacks"), - ), - ]; - - let config = EmitterConfig::init_from_hashmap(&HashMap::from_iter(config)) - .expect("Failed to initialize storage config"); - - let server = Server::init(config.clone(), metrics) - .await - .expect("Failed to initialize storage"); - - tokio::task::spawn(async move { - Toplevel::new(|s| async move { - Server::subsystem(server, &config, s).await; - }) - .catch_signals() - .handle_shutdown_requests(Duration::from_secs(5)) - .await - }); - - tokio::time::sleep(Duration::from_secs(1)).await; - - let client = reqwest::Client::new(); - - Ok(Self { - port: server_port, - client, - mock_server, - }) - } - - pub async fn send_request( - &self, - path: &str, - method: Method, - payload: Option<&T>, - header: Option<&HashMap>, - ) -> Result, IntegrationOSError> { - let uri = format!("http://localhost:{}/{path}", self.port); - let mut req = self.client.request(method, uri); - if let Some(payload) = payload { - req = req.json(payload); - } - - if let Some(header) = header { - for (key, value) in header { - req = req.header(key, value); - } - } - - let res = req.send().await.map_err(|e| { - InternalError::io_err(&format!("Failed to send request: {:?}", e.source()), None) - })?; - - let status = res.status(); - let json = res.json().await; - - Ok(ApiResponse { - code: status, - data: json.map_err(|e| { - InternalError::deserialize_error( - &format!("Failed to deserialize response: {}", e), - None, - ) - })?, - }) - } -} diff --git a/integrationos-emit/tests/http/emitter.rs b/integrationos-emit/tests/http/emitter.rs deleted file mode 100644 index 2cb15143..00000000 --- a/integrationos-emit/tests/http/emitter.rs +++ /dev/null @@ -1,140 +0,0 @@ -use crate::context::TestServer; -use futures::{stream, StreamExt}; -use http::{ - header::{ACCEPT, AUTHORIZATION, HOST}, - Method, StatusCode, -}; -use integrationos_domain::{prefix::IdPrefix, Id, IntegrationOSError, Unit}; -use integrationos_emit::logic::emitter::EntityIdResponse; -use mockito::Matcher; -use serde_json::{json, Value}; -use std::{collections::HashMap, time::Duration}; - -const PARALLEL_REQUESTS: usize = 10; - -#[tokio::test] -async fn test_concurrent_requests() -> Result { - let server = TestServer::new().await?; - let payload = json!({ - "type": "DatabaseConnectionLost", - "connectionId": "conn::GAL2svWJp9k::MtmXaau5Qf6R5n3Y-L9ejQ" - }); - - let response = server - .send_request::("v1/emit", Method::POST, Some(&payload), None) - .await; - - assert!(response.is_ok()); - - let headers = HashMap::from_iter(vec![( - "x-integrationos-idempotency-key".to_string(), - response - .expect("Failed to get response") - .data - .entity_id - .to_string(), - )]); - - let reqs = vec!["v1/emit"; PARALLEL_REQUESTS]; - - let results = stream::iter(reqs) - .map(|path| { - server.send_request::(path, Method::POST, Some(&payload), Some(&headers)) - }) - .buffer_unordered(PARALLEL_REQUESTS) - .collect::>() - .await; - - assert_eq!(results.len(), PARALLEL_REQUESTS); - let status_codes = results - .into_iter() - .map(|r| r.expect("Failed to send request").code) - .collect::>(); - - assert!(status_codes.iter().any(|c| c == &StatusCode::CONFLICT)); - assert!(status_codes.iter().any(|c| c == &StatusCode::OK)); - - assert_eq!( - status_codes - .iter() - .filter(|c| c == &&StatusCode::CONFLICT) - .count(), - PARALLEL_REQUESTS - 1 - ); - assert_eq!( - status_codes - .iter() - .filter(|c| c == &&StatusCode::OK) - .count(), - 1 - ); - - tokio::time::sleep(Duration::from_secs(10)).await; - - Ok(()) -} - -#[tokio::test] -async fn test_event_processed() -> Result { - let mut server = TestServer::new().await?; - - let id = Id::now(IdPrefix::Connection).to_string(); - let payload = json!({ - "type": "DatabaseConnectionLost", - "connectionId": id.clone() - }); - let path = format!("/v1/event-callbacks/database-connection-lost/{}", id); - let mock_server = server - .mock_server - .mock("POST", path.as_str()) - .match_header(AUTHORIZATION, Matcher::Any) - .match_header(ACCEPT, "*/*") - .match_header(HOST, server.mock_server.host_with_port().as_str()) - .with_status(200) - .with_body("{}") - .with_header("content-type", "application/json") - .create_async() - .await; - - let res = server - .send_request::("v1/emit", Method::POST, Some(&payload), None) - .await - .expect("Failed to send request"); - - assert_eq!(res.code, StatusCode::OK); - - // Giving it some time for the commit to happen - tokio::time::sleep(Duration::from_secs(10)).await; - - mock_server.expect_at_most(1).assert_async().await; - - let id = Id::now(IdPrefix::Connection).to_string(); - let payload = json!({ - "type": "DatabaseConnectionLost", - "connectionId": id.clone() - }); - let path = format!("/v1/event-callbacks/database-connection-lost/{}", id); - let mock_server = server - .mock_server - .mock("POST", path.as_str()) - .match_header(AUTHORIZATION, Matcher::Any) - .match_header(ACCEPT, "*/*") - .match_header(HOST, server.mock_server.host_with_port().as_str()) - .with_status(500) - .with_body("{}") - .with_header("content-type", "application/json") - .create_async() - .await; - - let res = server - .send_request::("v1/emit", Method::POST, Some(&payload), None) - .await - .expect("Failed to send request"); - - assert_eq!(res.code, StatusCode::OK); - - tokio::time::sleep(Duration::from_secs(3)).await; - - mock_server.expect_at_least(3).assert_async().await; - Ok(()) -} diff --git a/integrationos-emit/tests/http/mod.rs b/integrationos-emit/tests/http/mod.rs deleted file mode 100644 index b799a185..00000000 --- a/integrationos-emit/tests/http/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod emitter; diff --git a/integrationos-emit/tests/main.rs b/integrationos-emit/tests/main.rs deleted file mode 100644 index 188c0a9b..00000000 --- a/integrationos-emit/tests/main.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod context; -pub mod http; diff --git a/integrationos-emit/tests/resource/Dockerfile b/integrationos-emit/tests/resource/Dockerfile deleted file mode 100644 index c66da9dc..00000000 --- a/integrationos-emit/tests/resource/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM ubuntu:20.04 - -RUN apt-get update -RUN apt-get install -y curl unzip -RUN curl -fsS https://hub.infinyon.cloud/install/install.sh?ctx=dc | bash - -ENV PATH "$PATH:/root/.fluvio/bin" -ENV PATH "$PATH:/root/.fvm/bin" diff --git a/integrationos-emit/tests/resource/docker-compose.yml b/integrationos-emit/tests/resource/docker-compose.yml deleted file mode 100644 index 1197a5aa..00000000 --- a/integrationos-emit/tests/resource/docker-compose.yml +++ /dev/null @@ -1,47 +0,0 @@ -services: - cache: - image: redis:7-alpine - ports: - - '6379:6379' - volumes: - - ./redis-data:/data - sc: - image: infinyon/fluvio:stable - container_name: sc - hostname: sc - ports: - - "9103:9003" - environment: - - RUST_LOG=debug - command: "./fluvio-run sc --local /fluvio/metadata" - volumes: - - ./fluvio-metadata:/fluvio/metadata - sc-setup: - build: - context: . - dockerfile: Dockerfile - container_name: sc-setup - environment: - - RUST_LOG=debug - entrypoint: > - /bin/sh -c " - fluvio profile add docker sc:9003 docker; - fluvio cluster spu register --id 5001 -p 0.0.0.0:9110 -l spu:9010 --private-server spu:9011; - exit 0; - " - depends_on: - - sc - spu: - image: infinyon/fluvio:stable - container_name: spu - hostname: spu - volumes: - - ./fluvio-data:/fluvio/data - environment: - - RUST_LOG=debug - ports: - - "9110:9010" - - "9111:9011" - command: "./fluvio-run spu -i 5001 -p spu:9010 -v spu:9011 --sc-addr sc:9004 --log-base-dir /fluvio/data" - depends_on: - - sc diff --git a/integrationos-event/src/mongo_control_data_store.rs b/integrationos-event/src/mongo_control_data_store.rs index ae952124..2f1d4b93 100644 --- a/integrationos-event/src/mongo_control_data_store.rs +++ b/integrationos-event/src/mongo_control_data_store.rs @@ -5,7 +5,6 @@ use crate::{ use anyhow::{bail, Context as AnyhowContext, Result}; use async_trait::async_trait; use bson::doc; - use futures::future::join_all; use handlebars::Handlebars; use http::header::AUTHORIZATION; @@ -279,7 +278,7 @@ impl ControlDataStore for MongoControlDataStore { for e in pipeline.middleware { if let Middleware::HttpExtractor(e) = e { if e.key == extractor_key { - return Ok(e); + return Ok(*e); } } }