diff --git a/.dockerignore b/.dockerignore index 9a6f0b8e..eafe5ef4 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,8 +1,24 @@ +**/.DS_Store +**/.classpath +**/.dockerignore +**/.env +**/.git +**/.gitignore +**/.project +**/.settings +**/.vs +**/.vscode +**/charts +**/docker-compose* +**/compose.y*ml +**/Dockerfile* +/bin /target +LICENSE +README.md + +/demo /helm -.github/ -.cargo/ -.gitignore +.env.* +arc.* architecture.md -README.md -arc.png \ No newline at end of file diff --git a/.env.template b/.env.template new file mode 100644 index 00000000..18f26e80 --- /dev/null +++ b/.env.template @@ -0,0 +1,37 @@ +#==================================================== +# Application specific configuration +#==================================================== +RUST_LOG=info +APP_ENV=development + +APP_SERVER__HOST=0.0.0.0 +APP_SERVER__DOMAIN=statuslist.example.com +APP_SERVER__PORT=8000 + +# Certificate +APP_SERVER__CERT__EMAIL=support@example.com +APP_SERVER__CERT__ORGANIZATION=example.com +# (or https://acme-v02.api.letsencrypt.org/directory in production) +APP_SERVER__CERT__ACME_DIRECTORY_URL=https://pebble:14000/dir +# The last number is not explicitly defined in the spec +APP_SERVER__CERT__EKU=1,3,6,1,5,5,7,3,30 + +# Postgres connection string +APP_DATABASE__URL=postgres://postgres:postgres@db:5432/status-list + +# Redis configuration +APP_REDIS__REQUIRE_TLS=false +APP_REDIS__URI=redis://redis:6379 + +# AWS SDK +APP_AWS__REGION=us-east-1 +AWS_ENDPOINT_URL=http://localstack:4566 +AWS_ACCESS_KEY_ID=test +AWS_SECRET_ACCESS_KEY=test + +#==================================================== +# Postgres configuration +#==================================================== +POSTGRES_USER=postgres +POSTGRES_PASSWORD=postgres +POSTGRES_DB=status-list \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index f8fee696..00000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "yaml.schemas": { - "https://www.artillery.io/schema.json": [] - } -} \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 5b1f1b5b..352c1f31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -114,12 +114,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "anyhow" -version = "1.0.98" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" - [[package]] name = "arrayvec" version = "0.7.6" @@ -182,9 +176,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-config" -version = "1.6.2" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6fcc63c9860579e4cb396239570e979376e70aab79e496621748a09913f8b36" +checksum = "02a18fd934af6ae7ca52410d4548b98eb895aab0f1ea417d168d85db1434a141" dependencies = [ "aws-credential-types", "aws-runtime", @@ -271,9 +265,9 @@ dependencies = [ [[package]] name = "aws-sdk-secretsmanager" -version = "1.71.0" +version = "1.75.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9328a01c5c822fadf96be02187e0944eb6059067509ed32ad1af00e18cd5eb27" +checksum = "f9c0b663c9e8f67af8b264a94f81c0f7dfd1edad1484cdfca30944d1fb4429b6" dependencies = [ "aws-credential-types", "aws-runtime", @@ -287,16 +281,15 @@ dependencies = [ "bytes", "fastrand", "http 0.2.12", - "once_cell", "regex-lite", "tracing", ] [[package]] name = "aws-sdk-sso" -version = "1.67.0" +version = "1.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d4863da26489d1e6da91d7e12b10c17e86c14f94c53f416bd10e0a9c34057ba" +checksum = "95a4fd09d6e863655d99cd2260f271c6d1030dc6bfad68e19e126d2e4c8ceb18" dependencies = [ "aws-credential-types", "aws-runtime", @@ -310,16 +303,15 @@ dependencies = [ "bytes", "fastrand", "http 0.2.12", - "once_cell", "regex-lite", "tracing", ] [[package]] name = "aws-sdk-ssooidc" -version = "1.68.0" +version = "1.72.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95caa3998d7237789b57b95a8e031f60537adab21fa84c91e35bef9455c652e4" +checksum = "3224ab02ebb3074467a33d57caf6fcb487ca36f3697fdd381b0428dc72380696" dependencies = [ "aws-credential-types", "aws-runtime", @@ -333,16 +325,15 @@ dependencies = [ "bytes", "fastrand", "http 0.2.12", - "once_cell", "regex-lite", "tracing", ] [[package]] name = "aws-sdk-sts" -version = "1.68.0" +version = "1.72.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4939f6f449a37308a78c5a910fd91265479bd2bb11d186f0b8fc114d89ec828d" +checksum = "f6933f189ed1255e78175fbd73fb200c0aae7240d220ed3346f567b0ddca3083" dependencies = [ "aws-credential-types", "aws-runtime", @@ -357,16 +348,15 @@ dependencies = [ "aws-types", "fastrand", "http 0.2.12", - "once_cell", "regex-lite", "tracing", ] [[package]] name = "aws-sigv4" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3503af839bd8751d0bdc5a46b9cac93a003a353e635b0c12cf2376b5b53e41ea" +checksum = "3734aecf9ff79aa401a6ca099d076535ab465ff76b46440cf567c8e70b65dc13" dependencies = [ "aws-credential-types", "aws-smithy-http", @@ -576,7 +566,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.12", + "thiserror", "tokio", ] @@ -661,12 +651,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "base16ct" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" - [[package]] name = "base64" version = "0.21.7" @@ -695,15 +679,6 @@ version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" -[[package]] -name = "base64url" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33de68096bac8e252e45589f42afd364c1dd28fbb3466ed726a941d5b9727d2c" -dependencies = [ - "base64 0.21.7", -] - [[package]] name = "bigdecimal" version = "0.4.8" @@ -884,40 +859,11 @@ checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" dependencies = [ "android-tzdata", "iana-time-zone", - "js-sys", "num-traits", "serde", - "wasm-bindgen", "windows-link", ] -[[package]] -name = "ciborium" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" -dependencies = [ - "ciborium-io", - "ciborium-ll", - "serde", -] - -[[package]] -name = "ciborium-io" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" - -[[package]] -name = "ciborium-ll" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" -dependencies = [ - "ciborium-io", - "half", -] - [[package]] name = "clang-sys" version = "1.8.1" @@ -1025,16 +971,6 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" -[[package]] -name = "coset" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8cc80f631f8307b887faca24dcc3abc427cd0367f6eb6188f6e8f5b7ad8fb" -dependencies = [ - "ciborium", - "ciborium-io", -] - [[package]] name = "cpufeatures" version = "0.2.17" @@ -1059,15 +995,6 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" -[[package]] -name = "crc32fast" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" -dependencies = [ - "cfg-if", -] - [[package]] name = "crossbeam-queue" version = "0.3.12" @@ -1083,24 +1010,6 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" -[[package]] -name = "crunchy" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" - -[[package]] -name = "crypto-bigint" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - [[package]] name = "crypto-common" version = "0.1.6" @@ -1202,20 +1111,6 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" -[[package]] -name = "ecdsa" -version = "0.16.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" -dependencies = [ - "der", - "digest", - "elliptic-curve", - "rfc6979", - "signature", - "spki", -] - [[package]] name = "either" version = "1.15.0" @@ -1226,23 +1121,12 @@ dependencies = [ ] [[package]] -name = "elliptic-curve" -version = "0.13.8" +name = "encoding_rs" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ - "base16ct", - "crypto-bigint", - "digest", - "ff", - "generic-array", - "group", - "pem-rfc7468", - "pkcs8", - "rand_core 0.6.4", - "sec1", - "subtle", - "zeroize", + "cfg-if", ] [[package]] @@ -1289,26 +1173,6 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" -[[package]] -name = "ff" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" -dependencies = [ - "rand_core 0.6.4", - "subtle", -] - -[[package]] -name = "flate2" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - [[package]] name = "flume" version = "0.11.1" @@ -1462,7 +1326,6 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", - "zeroize", ] [[package]] @@ -1472,10 +1335,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", - "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "wasm-bindgen", ] [[package]] @@ -1502,17 +1363,6 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" -[[package]] -name = "group" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" -dependencies = [ - "ff", - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "h2" version = "0.3.26" @@ -1551,16 +1401,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "half" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" -dependencies = [ - "cfg-if", - "crunchy", -] - [[package]] name = "hashbrown" version = "0.12.3" @@ -1774,6 +1614,7 @@ dependencies = [ "http 1.3.1", "hyper 1.6.0", "hyper-util", + "log", "rustls 0.23.27", "rustls-native-certs 0.8.1", "rustls-pki-types", @@ -1782,24 +1623,46 @@ dependencies = [ "tower-service", ] +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.6.0", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" -version = "0.1.11" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" +checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb" dependencies = [ + "base64 0.22.1", "bytes", "futures-channel", + "futures-core", "futures-util", "http 1.3.1", "http-body 1.0.1", "hyper 1.6.0", + "ipnet", "libc", + "percent-encoding", "pin-project-lite", "socket2", + "system-configuration", "tokio", "tower-service", "tracing", + "windows-registry", ] [[package]] @@ -1973,25 +1836,19 @@ dependencies = [ ] [[package]] -name = "instant-acme" -version = "0.7.2" +name = "ipnet" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37221e690dcc5d0ea7c1f70decda6ae3495e72e8af06bca15e982193ffdf4fc4" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" dependencies = [ - "async-trait", - "base64 0.22.1", - "bytes", - "http 1.3.1", - "http-body 1.0.1", - "http-body-util", - "hyper 1.6.0", - "hyper-rustls 0.27.5", - "hyper-util", - "ring", - "rustls-pki-types", + "memchr", "serde", - "serde_json", - "thiserror 1.0.69", ] [[package]] @@ -2035,21 +1892,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "jsonwebtoken" -version = "9.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" -dependencies = [ - "base64 0.22.1", - "js-sys", - "pem", - "ring", - "serde", - "serde_json", - "simple_asn1", -] - [[package]] name = "lazy_static" version = "1.5.0" @@ -2073,12 +1915,12 @@ checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] name = "libloading" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a793df0d7afeac54f95b471d3af7f0d4fb975699f972341a4b76988d49cdf0c" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" dependencies = [ "cfg-if", - "windows-targets 0.53.0", + "windows-targets 0.52.6", ] [[package]] @@ -2227,16 +2069,6 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "nu-ansi-term" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" -dependencies = [ - "overload", - "winapi", -] - [[package]] name = "num-bigint" version = "0.4.6" @@ -2259,7 +2091,7 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "rand 0.8.5", + "rand", "smallvec", "zeroize", ] @@ -2398,24 +2230,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - -[[package]] -name = "p256" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" -dependencies = [ - "ecdsa", - "elliptic-curve", - "primeorder", - "sha2", -] - [[package]] name = "parking" version = "2.2.1" @@ -2445,16 +2259,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "pem" -version = "3.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" -dependencies = [ - "base64 0.22.1", - "serde", -] - [[package]] name = "pem-rfc7468" version = "0.7.0" @@ -2544,23 +2348,14 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.32" +version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6" +checksum = "9dee91521343f4c5c6a63edd65e54f31f5c92fe8978c40a4282f8372194c6a7d" dependencies = [ "proc-macro2", "syn 2.0.101", ] -[[package]] -name = "primeorder" -version = "0.13.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" -dependencies = [ - "elliptic-curve", -] - [[package]] name = "proc-macro-crate" version = "3.3.0" @@ -2634,6 +2429,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.40" @@ -2662,18 +2463,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" -dependencies = [ - "rand_chacha 0.9.0", - "rand_core 0.9.3", + "rand_chacha", + "rand_core", ] [[package]] @@ -2683,17 +2474,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" -dependencies = [ - "ppv-lite86", - "rand_core 0.9.3", + "rand_core", ] [[package]] @@ -2705,28 +2486,6 @@ dependencies = [ "getrandom 0.2.16", ] -[[package]] -name = "rand_core" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" -dependencies = [ - "getrandom 0.3.3", -] - -[[package]] -name = "rcgen" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" -dependencies = [ - "pem", - "ring", - "rustls-pki-types", - "time", - "yasna", -] - [[package]] name = "redox_syscall" version = "0.5.12" @@ -2796,13 +2555,45 @@ dependencies = [ ] [[package]] -name = "rfc6979" -version = "0.4.0" +name = "reqwest" +version = "0.12.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +checksum = "e98ff6b0dbbe4d5a37318f433d4fc82babd21631f194d370409ceb2e40b2f0b5" dependencies = [ - "hmac", - "subtle", + "base64 0.22.1", + "bytes", + "encoding_rs", + "futures-core", + "h2 0.4.10", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "hyper 1.6.0", + "hyper-rustls 0.27.5", + "hyper-tls", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-native-tls", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", ] [[package]] @@ -2861,7 +2652,7 @@ dependencies = [ "num-traits", "pkcs1", "pkcs8", - "rand_core 0.6.4", + "rand_core", "signature", "spki", "subtle", @@ -2878,7 +2669,7 @@ dependencies = [ "borsh", "bytes", "num-traits", - "rand 0.8.5", + "rand", "rkyv", "serde", "serde_json", @@ -3029,6 +2820,18 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +[[package]] +name = "rusty-forkfork" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ce85af4dfa2fb0c0143121ab5e424c71ea693867357c9159b8777b59984c218" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "ryu" version = "1.0.20" @@ -3095,7 +2898,7 @@ dependencies = [ "serde_json", "sqlx", "strum", - "thiserror 2.0.12", + "thiserror", "time", "tracing", "url", @@ -3193,7 +2996,7 @@ dependencies = [ "proc-macro2", "quote", "syn 2.0.101", - "thiserror 2.0.12", + "thiserror", ] [[package]] @@ -3226,17 +3029,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] -name = "sec1" -version = "0.7.3" +name = "sealed_test" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +checksum = "2a1867f8f005bd7fb73c367e2e45dd628417906a2ca27597fe59cbf04279a222" dependencies = [ - "base16ct", - "der", - "generic-array", - "pkcs8", - "subtle", - "zeroize", + "fs_extra", + "rusty-forkfork", + "sealed_test_derive", + "tempfile", +] + +[[package]] +name = "sealed_test_derive" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77253fb2d4451418d07025826028bcb96ee42d3e58859689a70ce62908009db6" +dependencies = [ + "quote", + "syn 2.0.101", ] [[package]] @@ -3418,7 +3229,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -3427,18 +3238,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" -[[package]] -name = "simple_asn1" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" -dependencies = [ - "num-bigint", - "num-traits", - "thiserror 2.0.12", - "time", -] - [[package]] name = "slab" version = "0.4.9" @@ -3526,17 +3325,19 @@ dependencies = [ "once_cell", "percent-encoding", "rust_decimal", + "rustls 0.23.27", "serde", "serde_json", "sha2", "smallvec", - "thiserror 2.0.12", + "thiserror", "time", "tokio", "tokio-stream", "tracing", "url", "uuid", + "webpki-roots 0.26.11", ] [[package]] @@ -3609,7 +3410,7 @@ dependencies = [ "memchr", "once_cell", "percent-encoding", - "rand 0.8.5", + "rand", "rsa", "rust_decimal", "serde", @@ -3618,7 +3419,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror 2.0.12", + "thiserror", "time", "tracing", "uuid", @@ -3653,7 +3454,7 @@ dependencies = [ "memchr", "num-bigint", "once_cell", - "rand 0.8.5", + "rand", "rust_decimal", "serde", "serde_json", @@ -3661,7 +3462,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror 2.0.12", + "thiserror", "time", "tracing", "uuid", @@ -3688,7 +3489,7 @@ dependencies = [ "serde", "serde_urlencoded", "sqlx-core", - "thiserror 2.0.12", + "thiserror", "time", "tracing", "url", @@ -3711,38 +3512,21 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" name = "status-list-server" version = "0.1.0" dependencies = [ - "anyhow", "async-trait", "aws-config", "aws-sdk-secretsmanager", "aws_secretsmanager_caching", "axum", - "base64url", - "chrono", - "coset", - "dotenvy", - "flate2", "http-body-util", "hyper 1.6.0", - "instant-acme", - "jsonwebtoken", - "once_cell", - "p256", - "pem", - "rand 0.9.1", - "rcgen", + "hyper-rustls 0.27.5", + "hyper-util", + "reqwest", "sea-orm", "sea-orm-migration", - "serde", - "serde_json", - "sha2", - "sqlx", - "thiserror 2.0.12", + "sealed_test", "tokio", - "tower", "tower-http", - "tracing", - "tracing-subscriber", "uuid", ] @@ -3802,6 +3586,9 @@ name = "sync_wrapper" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] [[package]] name = "synstructure" @@ -3814,6 +3601,27 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags", + "core-foundation 0.9.4", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tap" version = "1.0.1" @@ -3833,33 +3641,13 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "thiserror" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" -dependencies = [ - "thiserror-impl 1.0.69", -] - [[package]] name = "thiserror" version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ - "thiserror-impl 2.0.12", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", + "thiserror-impl", ] [[package]] @@ -3968,6 +3756,16 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.24.1" @@ -4057,7 +3855,9 @@ dependencies = [ "http 1.3.1", "http-body 1.0.1", "http-body-util", + "iri-string", "pin-project-lite", + "tower", "tower-layer", "tower-service", "tracing", @@ -4105,18 +3905,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", - "valuable", -] - -[[package]] -name = "tracing-log" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" -dependencies = [ - "log", - "once_cell", - "tracing-core", ] [[package]] @@ -4126,15 +3914,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", - "nu-ansi-term", "once_cell", "regex", "sharded-slab", - "smallvec", "thread_local", "tracing", "tracing-core", - "tracing-log", ] [[package]] @@ -4223,12 +4008,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "valuable" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" - [[package]] name = "vcpkg" version = "0.2.15" @@ -4247,6 +4026,15 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + [[package]] name = "want" version = "0.3.1" @@ -4303,6 +4091,19 @@ dependencies = [ "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" version = "0.2.100" @@ -4335,6 +4136,34 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.0", +] + +[[package]] +name = "webpki-roots" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2853738d1cc4f2da3a225c18ec6c3721abb31961096e9dbf5ab35fa88b19cfdb" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "which" version = "4.4.2" @@ -4357,28 +4186,6 @@ dependencies = [ "wasite", ] -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - [[package]] name = "windows-core" version = "0.61.0" @@ -4420,6 +4227,17 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +[[package]] +name = "windows-registry" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad1da3e436dc7653dfdf3da67332e22bff09bb0e28b0239e1624499c7830842e" +dependencies = [ + "windows-link", + "windows-result", + "windows-strings", +] + [[package]] name = "windows-result" version = "0.3.2" @@ -4489,29 +4307,13 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", + "windows_i686_gnullvm", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] -[[package]] -name = "windows-targets" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" -dependencies = [ - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", -] - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -4524,12 +4326,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" - [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -4542,12 +4338,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" - [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -4560,24 +4350,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" -[[package]] -name = "windows_i686_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" - [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" - [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -4590,12 +4368,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" -[[package]] -name = "windows_i686_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" - [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -4608,12 +4380,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" - [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -4626,12 +4392,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" - [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -4644,12 +4404,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" - [[package]] name = "winnow" version = "0.7.10" @@ -4695,15 +4449,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" -[[package]] -name = "yasna" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" -dependencies = [ - "time", -] - [[package]] name = "yoke" version = "0.8.0" diff --git a/Cargo.toml b/Cargo.toml index 50d418ef..d003a20e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,36 +4,23 @@ version = "0.1.0" edition = "2021" [dependencies] -chrono = "0.4" -anyhow = "1.0" +# Async runtime and utilities +tokio = { version = "1", features = ["full"] } async-trait = "0.1" -base64url = "0.1" -once_cell = "1.20" -serde_json = "1.0" -rand = "0.9" -sha2 = "0.10" -coset = "0.3" -jsonwebtoken = "9.3" + +# Web frameworks +hyper = "1.6" +hyper-util = "0.1" +reqwest = "0.12" http-body-util = "0.1" -tracing-subscriber = "0.3" +hyper-rustls = { version = "0.27", features = ["http2"] } axum = { version = "0.8", features = ["macros"] } -dotenvy = "0.15" -flate2 = "1.1.0" -hyper = "1.6" -rcgen = { version = "0.13", features = ["pem"] } -instant-acme = "0.7" -serde = { version = "1.0", features = ["derive"] } -sqlx = { version = "0.8", features = ["postgres", "runtime-tokio"] } -thiserror = "2.0" -tokio = { version = "1", features = ["full", "macros", "net", "rt-multi-thread"] } -tower = "0.5" tower-http = { version = "0.6", features = ["cors", "trace", "catch-panic"] } -tracing = "0.1.41" -p256 = { version = "0.13", features = ["pkcs8", "ecdsa", "alloc", "pem"] } -pem = "3.0.5" -sea-orm = { version = "1.1.7", features = [ + +# Database and ORM +sea-orm = { version = "1.1", features = [ "sqlx-postgres", - "runtime-tokio-native-tls", + "runtime-tokio-rustls", "macros", ] } aws-config = {version = "1.6", features = ["behavior-version-latest"] } @@ -42,8 +29,8 @@ aws_secretsmanager_caching = "1.2.1" uuid = { version = "1.17.0", features = ["v4"] } [dev-dependencies] -tokio = { version = "1", features = ["macros", "rt-multi-thread"] } -sea-orm = { version = "1.1.7", features = ["mock"] } +sealed_test = "1.1.0" +sea-orm = { version = "1.1", features = ["mock"] } [dependencies.sea-orm-migration] version = "1.1.7" diff --git a/Dockerfile b/Dockerfile index d7b668f2..e2ac0fe8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,51 +1,19 @@ -# Stage 1: Compile Rust binary with all dependencies -FROM rust:1.85-alpine AS builder +ARG APP_NAME=status-list-server -# Set working directory for build stage +FROM blackdex/rust-musl:x86_64-musl AS builder +ARG APP_NAME WORKDIR /app -# Install required build dependencies, including OpenSSL development files -RUN apk add --no-cache \ - build-base \ - pkgconf \ - openssl-dev \ - musl-dev \ - ca-certificates \ - postgresql-dev \ - musl-utils \ - llvm-libunwind-dev - -# Set environment variables (disable static linking for OpenSSL) -ENV OPENSSL_STATIC=0 -ENV OPENSSL_DIR=/usr - -# Copy dependency specifications first to optimize layer caching -COPY Cargo.toml Cargo.lock ./ - -# Create placeholder source to pre-download and cache dependencies -RUN mkdir -p src && echo "fn main() {}" > src/main.rs -RUN cargo build --release - -# Overwrite placeholder with actual source code -# Touch main.rs to ensure rebuild if source changed -COPY src ./src -RUN touch src/main.rs && cargo build --release - -# Build the project (using the host's default target) -RUN cargo build --release && \ - strip target/release/status-list-server - -# Debug: Check if binary exists in builder stage -RUN ls -l target/release/status-list-server && file target/release/status-list-server - -# Stage 2: Debug runtime image (temporary, for inspection) -FROM gcr.io/distroless/static-debian12:latest - -# Copy only the compiled binary from builder stage -COPY --from=builder /app/target/release/status-list-server /usr/local/bin/status-list-server - -# Expose default application port (TCP/8000) +RUN --mount=type=bind,source=src,target=src \ + --mount=type=bind,source=Cargo.toml,target=Cargo.toml \ + --mount=type=bind,source=Cargo.lock,target=Cargo.lock \ + --mount=type=cache,target=/app/target \ + --mount=type=cache,target=/root/.cargo/registry \ + cargo build --locked --release && \ + mv target/x86_64-unknown-linux-musl/release/${APP_NAME} . + +FROM gcr.io/distroless/static-debian12 AS runtime +ARG APP_NAME +COPY --from=builder --chown=nonroot:nonroot /app/${APP_NAME} /app/status-list EXPOSE 8000 - -# Container entrypoint -CMD ["/usr/local/bin/status-list-server"] \ No newline at end of file +ENTRYPOINT ["/app/status-list"] \ No newline at end of file diff --git a/README.md b/README.md index 103cc0ba..dcaaf085 100644 --- a/README.md +++ b/README.md @@ -2,50 +2,40 @@ The Status List Server is a web service that manages and publishes status lists, allowing issuers to update statuses and verifiers to retrieve them. It implements JWT-based authentication using ES256 (ECDSA with P-256 and SHA-256) for securing its endpoints. -## Prerequisites +## Getting Started -Before setting up the Status List Server, ensure you have the following installed: +Before running the server, ensure you have the following tools installed: -- [Rust](https://www.rust-lang.org/tools/install): The programming language used to develop the server. -- [Cargo](https://doc.rust-lang.org/cargo/getting-started/installation.html): The Rust package manager. +- [Rust & Cargo](https://www.rust-lang.org/tools/install) (Latest stable version recommended). - [PostgreSQL](https://www.postgresql.org/download/): The database system used for storing status lists. - -## Installation +- [Redis](https://redis.io/download): The in-memory data structure store used for caching. **Clone the Repository:** ```bash - git clone https://github.com/adorsys/status-list-server.git - cd status-list-server +git clone https://github.com/adorsys/status-list-server.git +cd status-list-server ``` -## Running with Docker Compose - -You can run the project directly using docker compose: - -- Execute the command below at the root of the project +### Configuration -```sh -docker-compose up -``` - -This command will pull and start postgres and also build the project image and start a container. +**Environment Variables:** -## Configuration + Create a `.env` file in the root directory. Take a look at the [.env.template](.env.template) file for an example of the required variables. -**Environment Variables:** +### Running with Docker Compose -> **TODO:** Document other required environment variables. +The simplest way to run the project is with [docker compose](https://docs.docker.com/compose/): -Create a `.env` file in the root directory with the following configurations: +- Execute the command below at the root of the project -```env -DATABASE_URL=postgres://username:password@localhost/status_list_db +```sh +docker compose up --build -d ``` -Replace `username` and `password` with your PostgreSQL credentials. +This command will pull all required images and start the server. -## Running the Server +### Running Manually To start the server, execute: @@ -53,7 +43,7 @@ To start the server, execute: cargo run ``` -By default, the server runs on `http://localhost:8000`. You can modify the port in the configuration settings. +By default, the server will listen on `http://localhost:8000`. You can modify the host and port in the configuration settings. ## API Endpoints @@ -69,6 +59,7 @@ By default, the server runs on `http://localhost:8000`. You can modify the port - **Endpoint**: `POST /credentials/` - **Description**: Allows issuers to register their public key and identifier for later authentication - **Request Body** + ```json { "issuer": "", @@ -76,6 +67,7 @@ By default, the server runs on `http://localhost:8000`. You can modify the port "alg": "ES256" } ``` + - `issuer`: Unique identifier for the issuer - `public_key`: PEM-encoded public key - `alg`: "ES256" (ECDSA with P-256 and SHA-256) @@ -86,6 +78,7 @@ By default, the server runs on `http://localhost:8000`. You can modify the port - **Description**: Allows an issuer to publish their token status list - **Authorization**: Requires a valid signed JWT token with the corresponding registered private key with issuer's ID as the `kid` (Key ID) in the header - **Request Body** + ```json { "list_id": "30202cc6-1e3f-4479-a567-74e86ad73693", @@ -95,6 +88,7 @@ By default, the server runs on `http://localhost:8000`. You can modify the port ] } ``` + - `index`: Position in the status list - `status`: Status value (VALID, INVALID, SUSPENDED, APPLICATIONSPECIFIC) @@ -180,9 +174,11 @@ The server uses JWT-based authentication with the following requirements: 1. Issuers must first register their public key using the `/credentials/` endpoint 2. All authenticated requests must include a JWT token in the Authorization header: - ``` + + ```http Authorization: Bearer ``` + 3. The JWT token must: - Be signed with the algorithm specified during issuer registration. - Include the issuer's ID as the `kid` (Key ID) in the header @@ -198,6 +194,16 @@ Example JWT header: } ``` +## Certificate Provisioning and Renewal + +The Status List Server is provisioned with a cryptographic certificate that is embedded into all issued status list tokens. This certificate ensures the authenticity and integrity of the tokens distributed by the server. + +**Automatic Issuance and Renewal:** + +- Certificate issuance and renewal are managed according to the configured renewal strategy. +- Every day at midnight, a cron job checks whether the certificate should be renewed based on this strategy. +- If the certificate is still considered valid according to the configured strategy, no renewal occurs; renewal is only triggered when necessary. + ## Error Handling The server implements proper error handling and returns appropriate HTTP status codes: diff --git a/docker-compose.yml b/docker-compose.yml index 6a5de366..e1a4eb0f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,9 +1,14 @@ services: db: - image: postgres:17.5 - container_name: status-list-server-db - env_file: - - .env + image: postgres:17-alpine + restart: always + container_name: status-list-db + ports: + - 5432:5432 + environment: + POSTGRES_USER: ${POSTGRES_USER:-postgres} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres} + POSTGRES_DB: ${POSTGRES_DB:-status-list} volumes: - pgdata:/var/lib/postgresql/data healthcheck: @@ -14,19 +19,68 @@ services: networks: - status-list-network + redis: + image: redis:8.0-alpine + container_name: redis + ports: + - 6379:6379 + networks: + - status-list-network + + localstack: + container_name: localstack + image: localstack/localstack + ports: + - 4566:4566 + environment: + - SERVICES=s3,secretsmanager + healthcheck: + test: ["CMD-SHELL", "curl -f http://localhost:4566/"] + interval: 5s + timeout: 5s + retries: 5 + networks: + - status-list-network + + challtestsrv: + image: ghcr.io/letsencrypt/pebble-challtestsrv:latest + command: -http01 "" -https01 "" -tlsalpn01 "" + ports: + - 8055:8055 + networks: + - status-list-network + + pebble: + image: ghcr.io/letsencrypt/pebble:latest + command: -config /test/pebble_config.json -strict -dnsserver challtestsrv:8053 + ports: + - 14000:14000 + - 15000:15000 + depends_on: + challtestsrv: + condition: service_started + networks: + - status-list-network + volumes: + - ./src/test_resources/pebble_config.json:/test/pebble_config.json + app: build: context: . dockerfile: Dockerfile container_name: status-list-server env_file: .env - volumes: - - ~/.aws/credentials:/root/.aws/credentials + ports: + - 8000:8000 depends_on: db: condition: service_healthy - ports: - - "8000:8000" + redis: + condition: service_started + pebble: + condition: service_started + localstack: + condition: service_healthy networks: - status-list-network @@ -36,4 +90,4 @@ networks: volumes: pgdata: - driver: local \ No newline at end of file + driver: local diff --git a/src/auth/authentication.rs b/src/auth/authentication.rs index 20130518..11a7b115 100644 --- a/src/auth/authentication.rs +++ b/src/auth/authentication.rs @@ -1,7 +1,7 @@ use jsonwebtoken::{decode, decode_header, Algorithm, DecodingKey, Validation}; use serde_json::Value; -use crate::{database::error::RepositoryError, model::Credentials, utils::state::AppState}; +use crate::{database::error::RepositoryError, models::Credentials, utils::state::AppState}; use super::errors::AuthenticationError; @@ -9,7 +9,7 @@ pub async fn publish_credentials( credentials: Credentials, state: AppState, ) -> Result<(), RepositoryError> { - let store = &state.credential_repository; + let store = &state.credential_repo; // Check for existing issuer if store @@ -26,7 +26,7 @@ pub async fn publish_credentials( } pub async fn verify_token(state: &AppState, token: &str) -> Result<(), AuthenticationError> { - let store = &state.credential_repository; + let store = &state.credential_repo; let header = decode_header(token).map_err(AuthenticationError::JwtError)?; diff --git a/src/config.rs b/src/config.rs new file mode 100644 index 00000000..311a7bbd --- /dev/null +++ b/src/config.rs @@ -0,0 +1,194 @@ +use config::{Config as ConfigLib, ConfigError, Environment}; +use redis::{ + aio::ConnectionManager, Client as RedisClient, ClientTlsConfig, RedisResult, TlsCertificates, +}; +use secrecy::{ExposeSecret, SecretString}; +use serde::Deserialize; +use serde_aux::field_attributes::deserialize_vec_from_string_or_vec; + +#[derive(Debug, Clone, Deserialize)] +pub struct Config { + pub server: ServerConfig, + pub database: DatabaseConfig, + pub redis: RedisConfig, + pub aws: AwsConfig, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct ServerConfig { + pub host: String, + pub domain: String, + pub port: u16, + pub cert: CertConfig, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct CertConfig { + pub email: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub organization: Option, + #[serde(deserialize_with = "deserialize_vec_from_string_or_vec")] + #[serde(default)] + pub eku: Vec, + pub acme_directory_url: String, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct RedisConfig { + pub uri: SecretString, + pub require_tls: bool, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct DatabaseConfig { + pub url: SecretString, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct AwsConfig { + pub region: String, +} + +impl RedisConfig { + /// Establishes a new Redis connection based on the configuration. + /// + /// If [`RedisConfig::require_tls`] is `false`, a plain-text connection is used. + /// If it is `true`, the connection will use TLS, and the URI **must** use the `rediss://` scheme. + /// + /// To enable mutual TLS (mTLS), both `cert_pem` and `key_pem` must be provided. + /// If one is missing, the client-side authentication will not be effective. + /// + /// The optional `root_cert` parameter allows specifying a custom root certificate (in PEM format). + /// If omitted, system root certificates will be used. + /// + /// # Parameters + /// - `cert_pem`: The client certificate in PEM format (required for mTLS). + /// - `key_pem`: The client private key in PEM format (required for mTLS). + /// - `root_cert`: An optional custom root certificate in PEM format. + /// + /// # Errors + /// Returns an error if the connection cannot be established. + pub async fn start( + &self, + cert_pem: Option<&str>, + key_pem: Option<&str>, + root_cert: Option<&str>, + ) -> RedisResult { + let client = if !self.require_tls { + RedisClient::open(self.uri.expose_secret())? + } else { + let client_tls = match (cert_pem, key_pem) { + (Some(cert), Some(key)) => Some(ClientTlsConfig { + client_cert: cert.as_bytes().to_vec(), + client_key: key.as_bytes().to_vec(), + }), + _ => None, + }; + let root_cert = root_cert.map(|cert| cert.as_bytes().to_vec()); + + RedisClient::build_with_tls( + self.uri.expose_secret(), + TlsCertificates { + client_tls, + root_cert, + }, + )? + }; + + client.get_connection_manager().await + } +} + +impl Config { + pub fn load() -> Result { + // Build the config + let config = ConfigLib::builder() + // Set default values + .set_default("server.host", "localhost")? + .set_default("server.domain", "localhost")? + .set_default("server.port", 8000)? + .set_default( + "database.url", + "postgres://postgres:postgres@localhost:5432/status-list", + )? + .set_default("redis.uri", "redis://localhost:6379")? + .set_default("redis.require_tls", false)? + .set_default("server.cert.email", "admin@example.com")? + .set_default("server.cert.eku", vec![1, 3, 6, 1, 5, 5, 7, 3, 30])? + .set_default("server.cert.organization", "adorsys GmbH & CO KG")? + .set_default( + "server.cert.acme_directory_url", + "https://acme-v02.api.letsencrypt.org/directory", + )? + .set_default("aws.region", "us-east-1")? + // Override config values via environment variables + // The environment variables should be prefixed with 'APP_' and use '__' as a separator + // Example: APP_REDIS__REQUIRE_TLS=true + .add_source( + Environment::with_prefix("APP") + .prefix_separator("_") + .separator("__"), + ) + .build()?; + + config.try_deserialize() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sealed_test::prelude::*; + use secrecy::ExposeSecret; + + #[sealed_test] + fn test_default_config() { + let config = Config::load().expect("Failed to load config"); + + assert_eq!(config.server.host, "localhost"); + assert_eq!(config.server.port, 8000); + assert_eq!( + config.database.url.expose_secret(), + "postgres://postgres:postgres@localhost:5432/status-list" + ); + assert_eq!(config.redis.uri.expose_secret(), "redis://localhost:6379"); + assert!(!config.redis.require_tls); + assert_eq!(config.server.cert.email, "admin@example.com"); + assert_eq!( + config.server.cert.acme_directory_url, + "https://acme-v02.api.letsencrypt.org/directory" + ); + assert_eq!(config.aws.region, "us-east-1"); + } + + #[sealed_test(env = [ + ("APP_SERVER__HOST", "0.0.0.0"), + ("APP_SERVER__PORT", "5002"), + ("APP_DATABASE__URL", "postgres://user:password@localhost:5432/status-list"), + ("APP_REDIS__URI", "rediss://user:password@localhost:6379/redis"), + ("APP_REDIS__REQUIRE_TLS", "true"), + ("APP_SERVER__CERT__EMAIL", "test@gmail.com"), + ("APP_SERVER__CERT__ACME_DIRECTORY_URL", "https://acme-v02.api.letsencrypt.org/directory"), + ])] + fn test_env_config() { + // Test configuration overrides via environment variables + let config = Config::load().expect("Failed to load config"); + + assert_eq!(config.server.host, "0.0.0.0"); + assert_eq!(config.server.port, 5002); + assert_eq!( + config.database.url.expose_secret(), + "postgres://user:password@localhost:5432/status-list" + ); + assert_eq!( + config.redis.uri.expose_secret(), + "rediss://user:password@localhost:6379/redis" + ); + assert!(config.redis.require_tls); + assert_eq!(config.server.cert.email, "test@gmail.com"); + assert_eq!( + config.server.cert.acme_directory_url, + "https://acme-v02.api.letsencrypt.org/directory" + ); + } +} diff --git a/src/database/mod.rs b/src/database/mod.rs index 08d00ca7..0b16edac 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -74,9 +74,10 @@ pub mod migrations { .col(ColumnDef::new(StatusListTokens::Sub).string().not_null()) .col(ColumnDef::new(StatusListTokens::Ttl).big_integer()) .foreign_key( + // Foreign key use to ensures that the Issuer in the StatusListTokens table references a valid Issuer in the Credentials table ForeignKey::create() - .name("fk_sub") // Foreign key name for the sub->issuer relationship - .from(StatusListTokens::Table, StatusListTokens::Sub) + .name("fk_issuer") + .from(StatusListTokens::Table, StatusListTokens::Issuer) .to(Credentials::Table, Credentials::Issuer), ) .to_owned(), diff --git a/src/database/queries.rs b/src/database/queries.rs index 02d00dd1..01830b8a 100644 --- a/src/database/queries.rs +++ b/src/database/queries.rs @@ -2,8 +2,9 @@ use sea_orm::{ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, Qu use std::sync::Arc; use super::error::RepositoryError; -use crate::model::{credentials, status_list_tokens, Credentials, StatusListToken}; +use crate::models::{credentials, status_list_tokens, Credentials, StatusListToken}; +#[derive(Clone)] pub struct SeaOrmStore { db: Arc, _phantom: std::marker::PhantomData, @@ -46,18 +47,6 @@ impl SeaOrmStore { .map_err(|e| RepositoryError::FindError(e.to_string())) } - pub async fn find_all_by( - &self, - issuer: String, - ) -> Result, RepositoryError> { - status_list_tokens::Entity::find() - .filter(status_list_tokens::Column::ListId.eq(issuer)) - .all(&*self.db) - .await - .map(|tokens| tokens.into_iter().collect()) - .map_err(|e| RepositoryError::FindError(e.to_string())) - } - pub async fn update_one( &self, list_id: String, @@ -94,7 +83,7 @@ impl SeaOrmStore { Ok(result.rows_affected > 0) } - pub async fn find_by_issuer( + pub async fn find_all_by_issuer( &self, issuer: &str, ) -> Result, RepositoryError> { @@ -155,7 +144,7 @@ impl SeaOrmStore { #[cfg(test)] mod test { - use crate::model; + use crate::models; use super::*; use jsonwebtoken::Algorithm; @@ -182,22 +171,22 @@ mod test { vec![credentials::Model { issuer: entity.issuer.clone(), public_key: entity.public_key.clone(), - alg: model::Alg(entity.alg), + alg: models::Alg(entity.alg), }], // Insert return vec![credentials::Model { issuer: entity.issuer.clone(), public_key: entity.public_key.clone(), - alg: model::Alg(entity.alg), + alg: models::Alg(entity.alg), }], // Find after insert vec![credentials::Model { issuer: entity.issuer.clone(), public_key: entity.public_key.clone(), - alg: model::Alg(entity.alg), + alg: models::Alg(entity.alg), }], // Find before update vec![credentials::Model { issuer: updated_entity.issuer.clone(), public_key: updated_entity.public_key.clone(), - alg: model::Alg(updated_entity.alg), + alg: models::Alg(updated_entity.alg), }], // Update return ]) .append_exec_results(vec![ diff --git a/src/lib.rs b/src/lib.rs index 1aeedcc0..2e2958c9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,14 @@ -pub mod auth; mod database; -pub mod model; +#[cfg(test)] mod test_resources; +#[cfg(test)] mod test_utils; -pub mod utils; +mod utils; + +pub mod auth; +pub mod config; +pub mod models; +pub mod startup; pub mod web; + +pub use utils::{cert_manager, state}; diff --git a/src/main.rs b/src/main.rs index b82495df..e786c665 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,9 +1,4 @@ -use axum::{ - http::Method, - response::IntoResponse, - routing::{get, patch, post}, - Json, Router, -}; +use color_eyre::{eyre::eyre, Result}; use dotenvy::dotenv; use serde::Serialize; use status_list_server::web::handlers::{credential_handler, get_status_list}; @@ -37,8 +32,7 @@ async fn health_check() -> impl IntoResponse { } #[tokio::main] -async fn main() { - dotenv().ok(); +async fn main() -> Result<()> { config_tracing(); let state = setup().await; diff --git a/src/model.rs b/src/models.rs similarity index 100% rename from src/model.rs rename to src/models.rs diff --git a/src/startup.rs b/src/startup.rs new file mode 100644 index 00000000..6fc5a767 --- /dev/null +++ b/src/startup.rs @@ -0,0 +1,74 @@ +use axum::{ + response::IntoResponse, + routing::{get, patch, post}, + Router, +}; +use color_eyre::eyre::Context; +use hyper::Method; +use tokio::net::TcpListener; +use tower_http::{ + catch_panic::CatchPanicLayer, + cors::{Any, CorsLayer}, + trace::TraceLayer, +}; + +use crate::{ + config::Config, + utils::state::AppState, + web::handlers::{ + credential_handler, get_status_list, publish_token_status, update_token_status, + }, +}; + +async fn welcome() -> impl IntoResponse { + "Status list Server" +} + +async fn health_check() -> impl IntoResponse { + "OK" +} + +pub struct HttpServer { + listener: TcpListener, + router: Router, +} + +impl HttpServer { + pub async fn new(config: &Config, state: AppState) -> color_eyre::Result { + let cors = CorsLayer::new() + .allow_methods([Method::GET, Method::POST, Method::OPTIONS]) + .allow_origin(Any) + .allow_headers(Any); + + let router = Router::new() + .route("/", get(welcome)) + .route("/health", get(health_check)) + .route("/credentials", post(credential_handler)) + .nest("/statuslists", status_list_routes()) + .layer(TraceLayer::new_for_http()) + .layer(CatchPanicLayer::new()) + .layer(cors) + .with_state(state); + + let listener = TcpListener::bind(format!("{}:{}", config.server.host, config.server.port)) + .await + .wrap_err_with(|| format!("Failed to bind to port {}", config.server.port))?; + + Ok(Self { router, listener }) + } + + pub async fn run(self) -> color_eyre::Result<()> { + tracing::info!("listening on {}", self.listener.local_addr()?); + axum::serve(self.listener, self.router) + .await + .wrap_err("Failed to start HTTP server")?; + Ok(()) + } +} + +fn status_list_routes() -> Router { + Router::new() + .route("/{list_id}", get(get_status_list)) + .route("/publish", post(publish_token_status)) + .route("/update", patch(update_token_status)) +} diff --git a/src/test_resources/cert_data.json b/src/test_resources/cert_data.json new file mode 100644 index 00000000..9a4e9a20 --- /dev/null +++ b/src/test_resources/cert_data.json @@ -0,0 +1 @@ +{"certificate":"-----BEGIN CERTIFICATE-----\nMIICjzCCAXegAwIBAgIIap1kd+VMkj4wDQYJKoZIhvcNAQELBQAwKDEmMCQGA1UE\nAxMdUGViYmxlIEludGVybWVkaWF0ZSBDQSAyZWFkMmEwHhcNMjUwNjAzMTM0NjI4\nWhcNMjUwOTAxMTM0NjI3WjAAMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEdfK+\n+GRIb+B+z64oqI5mpJ3ArNbsjNBgByMdw+tQTK+bVX2iCGq+bF8QBm65JQFnR8ow\nfwY5Z+RkaXt7aYzzFKOBrzCBrDAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI\nKwYBBQUHAwEwDAYDVR0TAQH/BAIwADAfBgNVHSMEGDAWgBSW1f2bv3iqsynBxtZ5\nAm/Ncr98VjBWBgNVHREBAf8ETDBKggtleGFtcGxlLmNvbYIQdGVzdC5leGFtcGxl\nLmNvbYIXc3RhdHVzLWxpc3QuZXhhbXBsZS5jb22CEGFjbWUuZXhhbXBsZS5jb20w\nDQYJKoZIhvcNAQELBQADggEBAC0PYLeO8/BrcMtcCeaWX8Zev/x8m2LwLegrO+Br\nJB1UP93MmvUsYU4nIO8jhZFWsHoOh7ObtyH2Jxlq+z3zOLJxWXUkASeyDq1cVGNm\nHMITgrqVUzgXNxZTMa7JEAV7kU+qUnVx5Qa6Euh2VOzWK7ErZNfDTgEMe4aghM6v\n5DLctrG2cGuHOppb+NKqrJaIBwLu+OO2F0krY2UOWPgqL4djXkdK+uT2tOwd/UNZ\nLwfXnfIVczOzTmE4UN84lXHVRfP764CDnV4gwSTVbMXUUK3u2AbJ1eLEkImRLy8f\n63IWWD/4rgUgHo2iTpwBeUNvB68c3d+3SfyTtIKNqbWDs3o=\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDRDCCAiygAwIBAgIIUipe7fy83DgwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE\nAxMVUGViYmxlIFJvb3QgQ0EgNGRkNTQzMCAXDTI1MDYwMzA4MDkzM1oYDzIwNTUw\nNjAzMDgwOTMzWjAoMSYwJAYDVQQDEx1QZWJibGUgSW50ZXJtZWRpYXRlIENBIDJl\nYWQyYTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL0xT0mSqavGsbom\nQfGfIwL0Qts7xVQEXF7QU4JYRPbkdZGuaItWnqG3ApEpAGGrKSCSU1aAP7UZzw7M\nsGxeS/XY47USR0Juj9uS7qc1nPytLwmG88zJjfp/m1n+rkC5HwfyZsFTt6R7ATkl\n/L+j40yLxlMusljXhYhzxQo/a29o6dnqlYJLXBcBZL/m4Q8IUqRCuEvbhnLaEju4\nMCm3SpEjMrWDJmygeVMf/zW/PoIcy+9RYZ9R7xuox4piGXh0p3UReILN2TZ1hTY6\nxkS60rsFPxyc7EMbV3NFbQuFluSmfWiVMSWH4S2Kh80xhtYlzzr9oftDxsQxahKV\n31XZK4sCAwEAAaN4MHYwDgYDVR0PAQH/BAQDAgKEMBMGA1UdJQQMMAoGCCsGAQUF\nBwMBMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJbV/Zu/eKqzKcHG1nkCb81y\nv3xWMB8GA1UdIwQYMBaAFHeA+onjJEo7JadDXMeGNUpwg++VMA0GCSqGSIb3DQEB\nCwUAA4IBAQCkcg6Zuw1sbeGOjDCBePuqGx8+s05QODuRQbK6G4SbO7DwxNrf3Lyu\nAIXUFdqp1fEXnwRVDLAjTVy15VpTcjHnGJeUGkPV1CVUddiAYokH5hqSguqCZI7z\n57v91CF2kdGw8/Z4o4oJ33iemfYOntShuq4WOp9SYrP6TSjZm2y3aDfcBEbpRRRc\n3Lz+E259zWVAaELLh9ufQhBIpjK6g2g7LLGAyrwStDw24ivzRkXp8/cxB8j8llAg\nrs2FlRWYCjdm2Z3aKek91FV+FiJiwNQ6dpEpM28E4T5j2BPcEAnvpCvpt7MlU7ie\nKhZqO9JY2T/9LhIhsLiji2Ikcp/NPNzh\n-----END CERTIFICATE-----\n","valid_from":1748958388,"expires_at":1756734387,"updated_at":1748958389} \ No newline at end of file diff --git a/src/test_resources/helper.rs b/src/test_resources/helper.rs index cb89b159..587b1af3 100644 --- a/src/test_resources/helper.rs +++ b/src/test_resources/helper.rs @@ -1,6 +1,6 @@ #[cfg(test)] -use crate::model::StatusEntry; -use crate::model::StatusRequest; +use crate::models::StatusEntry; +use crate::models::StatusRequest; // Helper to create a test request payload with customizable bits #[cfg(test)] diff --git a/src/test_resources/pebble.pem b/src/test_resources/pebble.pem new file mode 100644 index 00000000..35388ee5 --- /dev/null +++ b/src/test_resources/pebble.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCTCCAfGgAwIBAgIIJOLbes8sTr4wDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMjRlMmRiMCAXDTE3MTIwNjE5NDIxMFoYDzIxMTcx +MjA2MTk0MjEwWjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAyNGUyZGIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC5WgZNoVJandj43kkLyU50vzCZ +alozvdRo3OFiKoDtmqKPNWRNO2hC9AUNxTDJco51Yc42u/WV3fPbbhSznTiOOVtn +Ajm6iq4I5nZYltGGZetGDOQWr78y2gWY+SG078MuOO2hyDIiKtVc3xiXYA+8Hluu +9F8KbqSS1h55yxZ9b87eKR+B0zu2ahzBCIHKmKWgc6N13l7aDxxY3D6uq8gtJRU0 +toumyLbdzGcupVvjbjDP11nl07RESDWBLG1/g3ktJvqIa4BWgU2HMh4rND6y8OD3 +Hy3H8MY6CElL+MOCbFJjWqhtOxeFyZZV9q3kYnk9CAuQJKMEGuN4GU6tzhW1AgMB +AAGjRTBDMA4GA1UdDwEB/wQEAwIChDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB +BQUHAwIwEgYDVR0TAQH/BAgwBgEB/wIBADANBgkqhkiG9w0BAQsFAAOCAQEAF85v +d40HK1ouDAtWeO1PbnWfGEmC5Xa478s9ddOd9Clvp2McYzNlAFfM7kdcj6xeiNhF +WPIfaGAi/QdURSL/6C1KsVDqlFBlTs9zYfh2g0UXGvJtj1maeih7zxFLvet+fqll +xseM4P9EVJaQxwuK/F78YBt0tCNfivC6JNZMgxKF59h0FBpH70ytUSHXdz7FKwix +Mfn3qEb9BXSk0Q3prNV5sOV3vgjEtB4THfDxSz9z3+DepVnW3vbbqwEbkXdk3j82 +2muVldgOUgTwK8eT+XdofVdntzU/kzygSAtAQwLJfn51fS1GvEcYGBc1bDryIqmF +p9BI7gVKtWSZYegicA== +-----END CERTIFICATE----- \ No newline at end of file diff --git a/src/test_resources/pebble_config.json b/src/test_resources/pebble_config.json new file mode 100644 index 00000000..4f83b693 --- /dev/null +++ b/src/test_resources/pebble_config.json @@ -0,0 +1,27 @@ +{ + "pebble": { + "listenAddress": "0.0.0.0:14000", + "managementListenAddress": "0.0.0.0:15000", + "certificate": "test/certs/localhost/cert.pem", + "privateKey": "test/certs/localhost/key.pem", + "httpPort": 5002, + "tlsPort": 5001, + "ocspResponderURL": "", + "externalAccountBindingRequired": false, + "domainBlocklist": ["blocked-domain.example"], + "retryAfter": { + "authz": 3, + "order": 5 + }, + "profiles": { + "default": { + "description": "The profile you know and love", + "validityPeriod": 7776000 + }, + "shortlived": { + "description": "A short-lived cert profile, without actual enforcement", + "validityPeriod": 518400 + } + } + } +} \ No newline at end of file diff --git a/src/test_utils.rs b/src/test_utils.rs new file mode 100644 index 00000000..ae5d46fe --- /dev/null +++ b/src/test_utils.rs @@ -0,0 +1,70 @@ +use crate::{ + cert_manager::storage::StorageError, + utils::{ + cert_manager::{storage::Storage, CertManager}, + state::AppState, + }, +}; +use async_trait::async_trait; +use sea_orm::{DbBackend, MockDatabase}; +use std::{collections::HashMap, sync::Arc}; + +pub struct MockStorage { + pub key_value: HashMap, +} + +#[async_trait] +impl Storage for MockStorage { + async fn store(&self, _key: &str, _value: &str) -> Result<(), StorageError> { + Ok(()) + } + + async fn load(&self, key: &str) -> Result, StorageError> { + if let Some(value) = self.key_value.get(key) { + Ok(Some(value.clone())) + } else { + Ok(None) + } + } + + async fn delete(&self, _key: &str) -> Result<(), StorageError> { + Ok(()) + } +} + +pub async fn test_app_state(db_conn: Option>) -> AppState { + use crate::database::queries::SeaOrmStore; + + // Install the crypto provider for the tests + let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); + + let db = db_conn + .unwrap_or_else(|| Arc::new(MockDatabase::new(DbBackend::Postgres).into_connection())); + + let key_pem = include_str!("test_resources/ec-private.pem").to_string(); + let secrets_storage = MockStorage { + key_value: HashMap::from([("keys/test.com".to_string(), key_pem)]), + }; + + let cert_data = include_str!("test_resources/cert_data.json").to_string(); + let cert_storage = MockStorage { + key_value: HashMap::from([("certs/test.com/cert_data.json".to_string(), cert_data)]), + }; + + let certificate_manager = CertManager::new( + ["test.com"], + "test@example.com", + None::, + "http://example.com/dir", + ) + .unwrap() + .with_cert_storage(cert_storage) + .with_secrets_storage(secrets_storage); + + AppState { + credential_repo: SeaOrmStore::new(db.clone()), + status_list_token_repo: SeaOrmStore::new(db), + server_domain: "example.com".to_string(), + cert_manager: Arc::new(certificate_manager), + } +} diff --git a/src/test_utils/mod.rs b/src/test_utils/mod.rs deleted file mode 100644 index 7c8dd6bb..00000000 --- a/src/test_utils/mod.rs +++ /dev/null @@ -1,46 +0,0 @@ -#[cfg(test)] -pub mod test { - use crate::utils::errors::SecretCacheError; - use crate::utils::state::{AppState, CacheConfig, SecretCache, SecretManager}; - use async_trait::async_trait; - use aws_config::{BehaviorVersion, SdkConfig}; - use aws_sdk_secretsmanager::Client as SecretsManagerClient; - use std::sync::Arc; - - // Mock implementation for testing - pub struct MockSecretCache { - pub value: Option, - } - - #[async_trait] - impl SecretCache for MockSecretCache { - async fn get_secret_string( - &self, - _secret_id: String, - ) -> Result, SecretCacheError> { - Ok(self.value.clone()) - } - } - - pub fn test_app_state(db_conn: Arc) -> AppState { - use crate::database::queries::SeaOrmStore; - - let pem = include_str!("../test_resources/ec-private.pem").to_string(); - let config = SdkConfig::builder() - .behavior_version(BehaviorVersion::latest()) - .build(); - let secret_manager = SecretManager::new( - MockSecretCache { value: Some(pem) }, - SecretsManagerClient::new(&config), - "test-server-key".to_string(), - CacheConfig::default(), - ); - - AppState { - credential_repository: Arc::new(SeaOrmStore::new(db_conn.clone())), - status_list_token_repository: Arc::new(SeaOrmStore::new(db_conn)), - server_public_domain: "example.com".to_string(), - secret_manager: Arc::new(secret_manager), - } - } -} diff --git a/src/utils/ca_gen.rs b/src/utils/ca_gen.rs deleted file mode 100644 index 57925bbf..00000000 --- a/src/utils/ca_gen.rs +++ /dev/null @@ -1,146 +0,0 @@ -use instant_acme::{ - Account, ChallengeType, Identifier, LetsEncrypt, NewAccount, NewOrder, OrderStatus, -}; -use rcgen::{ - CertificateParams, DistinguishedName, DnType, ExtendedKeyUsagePurpose, KeyUsagePurpose, -}; -use std::{fs, path::Path, time::Duration}; - -const WELL_KNOWN_PATH: &str = ".well-known/acme-challenge"; - -/// The data needed to generate the CA certificate -pub struct CertOptions { - /// The server domain names - pub domains: Vec, - /// The company email - pub email: String, - /// The web root directory path - pub web_root: String, - /// The company name - pub company_name: String, - /// The account credentials file path - pub acc_cred_path: String, - /// The key usage extensions code - pub eku: Vec, -} - -// Generate a certificate signing request with EKU for Status List Token signing -fn generate_csr( - domains: &[String], - company_name: &str, - eku: &[u64], - server_key_pem: &str, -) -> Result<(String, String), anyhow::Error> { - if domains.is_empty() { - return Err(anyhow::anyhow!("No domain(s) provided")); - } - // Build certificate request parameters - let mut params = CertificateParams::new(domains)?; - let mut dn = DistinguishedName::new(); - dn.push(DnType::CommonName, domains.first().unwrap()); - dn.push(DnType::OrganizationName, company_name); - params.distinguished_name = dn; - // Add Extended Key Usage for Status List Token signing - // https://www.ietf.org/archive/id/draft-ietf-oauth-status-list-10.html#section-10.1 - params.extended_key_usages = vec![ExtendedKeyUsagePurpose::Other(eku.to_vec())]; - params.key_usages = vec![KeyUsagePurpose::DigitalSignature]; - - // Generate certificate signing request - let keypair = rcgen::KeyPair::from_pem(server_key_pem)?; - let csr = params.serialize_request(&keypair)?; - Ok((csr.pem()?, keypair.serialize_pem())) -} - -/// Request a certificate from the certificate authority -/// -/// Returns a tuple containing the server certificate and private key -pub async fn req_certificate( - cert_options: &CertOptions, - server_key_pem: &str, -) -> Result<(String, String), anyhow::Error> { - let web_root = if !cert_options.web_root.ends_with('/') { - format!("{}/", cert_options.web_root) - } else { - cert_options.web_root.clone() - }; - // try to load account credentials or create a new one if it doesn't exist - let credentials = if let Ok(file) = fs::read_to_string(&cert_options.acc_cred_path) { - serde_json::from_str(&file)? - } else { - let (_, credentials) = Account::create( - &NewAccount { - contact: &[&format!("mailto:{}", cert_options.email)], - terms_of_service_agreed: true, - only_return_existing: false, - }, - // TODO : Replace staging with production - // We use staging to avoid rate limits - LetsEncrypt::Staging.url(), - None, - ) - .await?; - fs::write( - &cert_options.acc_cred_path, - serde_json::to_string_pretty(&credentials)?, - )?; - credentials - }; - let account = Account::from_credentials(credentials).await?; - - // Create the ACME order based on the given domain names. - let identifiers = cert_options - .domains - .iter() - .map(|ident| Identifier::Dns(ident.clone())) - .collect::>(); - let mut order = account - .new_order(&NewOrder { - identifiers: &identifiers, - }) - .await?; - - let authorizations = order.authorizations().await?; - for auth in authorizations { - let challenge = auth - .challenges - .iter() - .find(|c| c.r#type == ChallengeType::Http01) - .ok_or_else(|| anyhow::anyhow!("no http01 challenge found"))?; - let token = &challenge.token; - let key_auth = order.key_authorization(challenge); - let challenge_dir = format!("{}{}", web_root, WELL_KNOWN_PATH); - let challenge_dir_path = Path::new(&challenge_dir); - if !challenge_dir_path.try_exists()? { - fs::create_dir_all(challenge_dir_path)?; - } - let challenge_path = format!("{}/{}", challenge_dir, token); - fs::write(&challenge_path, key_auth.as_str())?; - order.set_challenge_ready(&challenge.url).await?; - loop { - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - let auth_status = order.state().status; - if auth_status == OrderStatus::Ready { - break; - } else if auth_status == OrderStatus::Invalid { - return Err(anyhow::anyhow!("Authorization failed")); - } - } - } - - let (csr, private_key_pem) = generate_csr( - &cert_options.domains, - &cert_options.company_name, - &cert_options.eku, - server_key_pem, - )?; - order.finalize(csr.as_bytes()).await?; - - let cert_chain_pem = loop { - match order.certificate().await? { - Some(cert_chain_pem) => break cert_chain_pem, - None => tokio::time::sleep(Duration::from_secs(1)).await, - } - }; - - Ok((cert_chain_pem, private_key_pem)) -} diff --git a/src/utils/cert_manager.rs b/src/utils/cert_manager.rs new file mode 100644 index 00000000..59f5bd6f --- /dev/null +++ b/src/utils/cert_manager.rs @@ -0,0 +1,599 @@ +mod errors; +#[cfg(test)] +mod tests; + +pub mod challenge; +pub mod http_client; +pub mod storage; + +use challenge::CleanupFuture; +pub use errors::CertError; + +use chrono::{TimeZone, Utc}; +use color_eyre::eyre::eyre; +use instant_acme::{ + Account, AccountCredentials, AuthorizationStatus, HttpClient, Identifier, NewAccount, NewOrder, + Order, OrderStatus, +}; +use rcgen::{ + CertificateParams, DistinguishedName, DnType, ExtendedKeyUsagePurpose, KeyUsagePurpose, +}; +use serde::{Deserialize, Serialize}; +use std::{sync::Arc, time::Duration}; +use tokio::{sync::Mutex, time::sleep}; +use tokio_cron_scheduler::{Job, JobScheduler}; +use tracing::{error, info, instrument, warn}; +use x509_parser::pem::Pem; + +use crate::{ + cert_manager::{challenge::ChallengeHandler, http_client::DefaultHttpClient, storage::Storage}, + utils::keygen::Keypair, +}; + +// Struct that hold the certificate and its metadata +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct CertificateData { + pub certificate: String, + pub valid_from: i64, + pub expires_at: i64, + pub updated_at: i64, +} + +/// Type representing the certificate renewal strategy +#[derive(Debug, Clone)] +pub enum RenewalStrategy { + /// Renew the certificate at a fixed interval. + /// If not specified, it defaults to 60 days starting from the issue date + FixedInterval(Option), + /// Renew the certificate a certain number of days before it expires. + /// Defaults to 30 days before expiry if not specified + DaysBeforeExpiry(Option), + /// Renew the certificate a certain percentage of its lifetime. + /// Defaults to 2/3 of the certificate lifetime if not specified + PercentageOfLifetime(Option), +} + +type ACMEHttpClientFactory = Box Box + Send + Sync>; + +/// Struct representing the certificate manager +pub struct CertManager { + // Certificate storage backend + cert_storage: Option>, + // Secrets storage backend + secrets_storage: Option>, + // ACME challenge handler + challenge_handler: Option>, + // ACME client + acme_client: Arc>>, + // ACME HTTP client factory + acme_http_client_factory: ACMEHttpClientFactory, + // Certificate renewal strategy + renewal_strategy: RenewalStrategy, + // The subject alternative names + domains: Vec, + // The company email + email: String, + // The company name + organization: Option, + // The key usage extensions code + eku: Option>, + // The ACME directory URL + acme_directory_url: String, +} + +impl CertManager { + /// Create a new instance of [CertManager] with required parameters + pub fn new( + domains: impl IntoIterator>, + email: impl Into, + organization: Option>, + acme_directory_url: impl Into, + ) -> Result { + let acme_client = Arc::new(Mutex::new(None)); + let http_client = DefaultHttpClient::new(None)?; + let acme_http_client_factory = + Box::new(move || Box::new(http_client.clone()) as Box); + let renewal_strategy = RenewalStrategy::PercentageOfLifetime(None); + + Ok(Self { + cert_storage: None, + secrets_storage: None, + challenge_handler: None, + acme_client, + acme_http_client_factory, + renewal_strategy, + domains: domains.into_iter().map(|d| d.into()).collect(), + email: email.into(), + organization: organization.map(|o| o.into()), + eku: None, + acme_directory_url: acme_directory_url.into(), + }) + } + + /// Set the storage backend for the certificate + ///

Note: This method is required. + pub fn with_cert_storage(mut self, storage: impl Storage + 'static) -> Self { + self.cert_storage = Some(Box::new(storage)); + self + } + + /// Set the storage backend for the sensitive data + ///

Note: This method is required. + pub fn with_secrets_storage(mut self, storage: impl Storage + 'static) -> Self { + self.secrets_storage = Some(Box::new(storage)); + self + } + + /// Set the handler for the ACME challenge + ///

Note: This method is required. + pub fn with_challenge_handler(mut self, handler: impl ChallengeHandler + 'static) -> Self { + self.challenge_handler = Some(Box::new(handler)); + self + } + + /// Override the default http client used by the ACME client + /// + /// Default: [`DefaultHttpClient`](http_client::DefaultHttpClient) + pub fn with_acme_http_client(mut self, client: impl HttpClient + Clone + 'static) -> Self { + self.acme_http_client_factory = Box::new(move || Box::new(client.clone())); + self + } + + /// Override the default certificate renewal strategy + /// + /// Default: `PercentageOfLifetime` + pub fn with_renewal_strategy(mut self, strategy: RenewalStrategy) -> Self { + self.renewal_strategy = strategy; + self + } + + /// Set the key usage extensions code + pub fn with_eku(mut self, eku: &[u64]) -> Self { + self.eku = Some(eku.to_vec()); + self + } + + /// Request a certificate from the certificate authority + #[instrument( + name = "Running the ACME state machine", + skip(self), + fields( + domains = ?self.domains, + acme_directory_url = %self.acme_directory_url + ) + )] + pub async fn request_certificate(&self) -> Result { + let cert_storage = self + .cert_storage + .as_ref() + .ok_or_else(|| CertError::Other(eyre!("Certificate storage not set")))?; + + let challenge_handler = self + .challenge_handler + .as_ref() + .ok_or_else(|| CertError::Other(eyre!("Challenge handler not set")))?; + + if self.domains.is_empty() { + return Err(CertError::Other(eyre!( + "No domain(s) provided to request a certificate for" + ))); + } + + let account = self.acme_account().await?; + let identifiers: Vec<_> = self + .domains + .iter() + .map(|ident| Identifier::Dns(ident.into())) + .collect(); + + // Create the ACME order based on the given domain name(s). + let mut order = account + .new_order(&NewOrder { + identifiers: &identifiers, + }) + .await?; + + let mut cleanup_futures = Vec::new(); + // Process the authorizations + for authz in order.authorizations().await? { + // Skip already valid authorizations + if authz.status == AuthorizationStatus::Valid { + info!( + "Authorization for {:?} is already valid. Skipping...", + authz.identifier + ); + continue; + } + + // Handle the ACME challenge + let (challenge_url, cleanup_future) = challenge_handler + .handle_authorization(&authz, &mut order) + .await?; + // Signal the server we are ready to respond to the challenge + order.set_challenge_ready(&challenge_url).await?; + cleanup_futures.push(cleanup_future); + } + + // poll order until it is ready or an error occurs + self.poll_order(&mut order, cleanup_futures).await?; + + // Generate the certificate signing request + let server_key_pem = self.signing_key_pem().await?; + let csr_der_bytes = self.generate_csr(&server_key_pem)?; + + // Finalize the order and try to get the certificate + order.finalize(&csr_der_bytes).await?; + let cert_chain_pem = loop { + match order.certificate().await? { + Some(cert_chain_pem) => break cert_chain_pem, + None => sleep(Duration::from_secs(1)).await, + } + }; + + let parsed_cert_pem = self.parse_cert_pem(&cert_chain_pem)?; + let x509 = parsed_cert_pem.parse_x509().map_err(|e| { + error!("Got certificate but appears to be invalid: {e}"); + CertError::Parsing(e.to_string()) + })?; + let not_after = x509.validity().not_after.timestamp(); + let not_before = x509.validity().not_before.timestamp(); + + let cert_data = CertificateData { + certificate: cert_chain_pem, + valid_from: not_before, + expires_at: not_after, + updated_at: Utc::now().timestamp(), + }; + + // Store the certificate + let cert_key = self.cert_key(); + let serialized_cert_data = serde_json::to_string(&cert_data)?; + cert_storage.store(&cert_key, &serialized_cert_data).await?; + + info!( + "Certificate obtained successfully. Valid from {} to {}", + ts_to_local(not_before), + ts_to_local(not_after) + ); + Ok(cert_data) + } + + async fn poll_order( + &self, + order: &mut Order, + cleanup_futures: Vec, + ) -> Result<(), CertError> { + use tokio::time::{sleep, timeout}; + + const RETRY_DELAY: Duration = Duration::from_secs(2); + const TIMEOUT: Duration = Duration::from_secs(300); + + let poll_future = async { + loop { + order.refresh().await?; + match order.state().status { + OrderStatus::Ready => return Ok(()), + OrderStatus::Invalid => { + return Err(CertError::Other(eyre!( + "Order with url {} for domains {:?} has been invalidated", + order.url(), + self.domains + ))); + } + _ => sleep(RETRY_DELAY).await, + } + } + }; + + let result = match timeout(TIMEOUT, poll_future).await { + Ok(result) => match result { + Ok(()) => Ok(()), + Err(e) => Err(e), + }, + Err(_) => Err(CertError::Other(eyre!( + "Order validation timed out after {}s", + TIMEOUT.as_secs() + ))), + }; + + // perform clean up, regardless of success or failure + for cleanup_future in cleanup_futures { + if let Err(e) = cleanup_future.run().await { + warn!("Failed to clean up challenge: {e}"); + } + } + result + } + + /// Attempt to get the signing key + #[instrument( + name = "Getting Server Signing Secret", + skip(self), + fields(domains = ?self.domains) + )] + pub async fn signing_key_pem(&self) -> Result { + const MAX_RETRIES: u32 = 3; + const RETRY_DELAY: Duration = Duration::from_millis(500); + + let secrets_storage = self + .secrets_storage + .as_ref() + .ok_or_else(|| CertError::Other(eyre!("Secrets storage not set")))?; + + // Try to load the existing signing key + let secret_id = self.signing_secret_id(); + if let Some(secret) = secrets_storage.load(&secret_id).await? { + info!("Found existing server secret. Skipping..."); + return Ok(secret); + } + + // If the secret does not exist, try to generate and store a new one + warn!("No existing server secret found. Generating a new one..."); + let keypair = Keypair::generate()?; + let key_pem = keypair.to_pkcs8_pem()?; + let mut retries = 0; + loop { + info!("Trying to store the newly generated server secret..."); + match secrets_storage.store(&secret_id, &key_pem).await { + Ok(_) => { + info!("Successfully stored the secret"); + return Ok(key_pem); + } + Err(e) => { + retries += 1; + if retries >= MAX_RETRIES { + return Err(e.into()); + } + warn!("Retrying secret storage after failure: {e:#}"); + sleep(RETRY_DELAY).await; + } + } + } + } + + /// Attempt to get the certificate data + /// + /// # Errors + /// Returns an error if the certificate data cannot be parsed or if there was an issue when trying to retrieve the certificate data. + pub async fn certificate(&self) -> Result, CertError> { + if let Some(cert_storage) = &self.cert_storage { + let cert_key = self.cert_key(); + if let Some(cert_data) = cert_storage.load(&cert_key).await? { + return Ok(Some(serde_json::from_str(&cert_data)?)); + } + } else { + return Err(CertError::Other(eyre!("Certificate storage not set"))); + } + Ok(None) + } + + /// Extract individual certificates from the certificate chain and return them as a vector of base64-encoded strings + /// + /// This fuction will return `None` if the server certificate was not found. + /// + /// # Errors + /// Returns an error if the certificate chain cannot be parsed or if there was an issue when trying to retrieve the server certificate + pub async fn cert_chain_parts(&self) -> Result>, CertError> { + use base64::prelude::{Engine as _, BASE64_STANDARD}; + + if let Some(cert_data) = self.certificate().await? { + let certs = Pem::iter_from_buffer(cert_data.certificate.as_bytes()) + .map(|cert| { + cert.map(|pem| BASE64_STANDARD.encode(&pem.contents)) + .map_err(|e| CertError::Parsing(e.to_string())) + }) + .collect::>()?; + + return Ok(Some(certs)); + } + Ok(None) + } + + /// Renew the certificate if needed + #[instrument( + name = "Checking Certificate Renewal", + skip(self), + fields(domains = ?self.domains) + )] + pub async fn renew_cert_if_needed(&self) -> Result<(), CertError> { + if let Some(cert_data) = self.certificate().await? { + if self.should_renew_cert(&cert_data) { + self.request_certificate().await?; + info!("Certificate renewed successfully"); + return Ok(()); + } + } else { + info!("No certificate found for this domain, requesting new one..."); + self.request_certificate().await?; + info!("New certificate issued successfully"); + return Ok(()); + } + info!("Certificate is still valid. No need to renew"); + Ok(()) + } + + // Attempt to retrieve existing or create an ACME account + #[instrument( + name = "Getting or Creating ACME Account", + skip(self), + fields(domains = ?self.domains, email = %self.email) + )] + async fn acme_account(&self) -> Result { + let secrets_storage = self + .secrets_storage + .as_ref() + .ok_or_else(|| CertError::Other(eyre!("Secrets storage not set")))?; + + let mut client_guard = self.acme_client.lock().await; + if let Some(account) = client_guard.as_ref() { + info!("Found existing ACME account. Skipping..."); + return Ok(account.clone()); + } + + let account_id = self.acme_account_id(); + if let Some(credentials) = secrets_storage.load(&account_id).await? { + info!("Found existing credentials. Trying to load account..."); + let credentials: AccountCredentials = serde_json::from_str(&credentials)?; + let http_client = self.create_http_client(); + match Account::from_credentials_and_http(credentials, http_client).await { + Ok(account) => { + info!("Account successfully loaded"); + *client_guard = Some(account.clone()); + return Ok(account); + } + Err(e) => { + warn!("Invalid credentials: {e}\nrecreating new account..."); + secrets_storage.delete(&account_id).await?; + } + } + } + // Create a new ACME account + let (account, credentials) = Account::create_with_http( + &NewAccount { + contact: &[&format!("mailto:{}", self.email)], + terms_of_service_agreed: true, + only_return_existing: false, + }, + &self.acme_directory_url, + None, + self.create_http_client(), + ) + .await?; + // Store new credentials + secrets_storage + .store(&account_id, &serde_json::to_string(&credentials)?) + .await?; + *client_guard = Some(account.clone()); + info!("Account successfully created"); + Ok(account) + } + + // Generate a certificate signing request with optional EKU + fn generate_csr(&self, signing_key: &str) -> Result, CertError> { + // Build certificate request parameters + let mut params = CertificateParams::new(self.domains.clone()) + .map_err(|e| CertError::Parsing(e.to_string()))?; + let mut dn = DistinguishedName::new(); + dn.push(DnType::CommonName, &self.domains[0]); + if let Some(organization) = &self.organization { + dn.push(DnType::OrganizationName, organization); + } + params.distinguished_name = dn; + // Add Extended Key Usage for Status List Token signing + // https://www.ietf.org/archive/id/draft-ietf-oauth-status-list-10.html#section-10.1 + if let Some(eku) = &self.eku { + params.extended_key_usages = vec![ExtendedKeyUsagePurpose::Other(eku.to_vec())]; + } + params.key_usages = vec![KeyUsagePurpose::DigitalSignature]; + + // Generate certificate signing request + let keypair = + rcgen::KeyPair::from_pem(signing_key).map_err(|e| CertError::Parsing(e.to_string()))?; + let csr = params + .serialize_request(&keypair) + .map_err(|e| CertError::Parsing(e.to_string()))?; + Ok(csr.der().to_vec()) + } + + fn should_renew_cert(&self, cert_data: &CertificateData) -> bool { + let days_to_secs = |days: u32| (days as i64) * 24 * 60 * 60; + + match self.renewal_strategy { + RenewalStrategy::DaysBeforeExpiry(value) => { + // Default to 30 days if not specified + let days_before = value.unwrap_or(30); + let renewal_time = cert_data.expires_at - days_to_secs(days_before); + Utc::now().timestamp() >= renewal_time + } + RenewalStrategy::PercentageOfLifetime(value) => { + // Default to 2/3 of the lifetime if not specified + let percentage = value.unwrap_or(2.0 / 3.0); + let lifetime = cert_data.expires_at - cert_data.valid_from; + let elapsed = Utc::now().timestamp() - cert_data.valid_from; + (elapsed as f32 / lifetime as f32) >= percentage + } + RenewalStrategy::FixedInterval(value) => { + // Default to 60 days if not specified + let interval = value.unwrap_or(60); + let renewal_time = cert_data.valid_from + days_to_secs(interval); + Utc::now().timestamp() >= renewal_time + } + } + } + + fn parse_cert_pem(&self, cert_pem: &str) -> Result { + use x509_parser::pem::parse_x509_pem; + + let pem = parse_x509_pem(cert_pem.as_bytes()) + .map_err(|e| CertError::Parsing(e.to_string()))? + .1; + if pem.label != "CERTIFICATE" || pem.contents.is_empty() { + return Err(CertError::Parsing("Invalid X509 certificate".into())); + } + Ok(pem) + } + + #[inline] + fn create_http_client(&self) -> Box { + (self.acme_http_client_factory)() + } + + #[inline] + fn cert_key(&self) -> String { + format!("certs/{}/cert_data.json", &tld_plus_one(&self.domains)) + } + + #[inline] + fn acme_account_id(&self) -> String { + format!("acme_accounts/{}", tld_plus_one(&self.domains)) + } + + #[inline] + fn signing_secret_id(&self) -> String { + format!("keys/{}", tld_plus_one(&self.domains)) + } +} + +/// Setup the certificate renewal scheduler +pub async fn setup_cert_renewal_scheduler(cert_manager: Arc) -> Result<(), CertError> { + let scheduler = JobScheduler::new().await?; + + // Schedule certificate renewal check every day at midnight + scheduler + .add(Job::new_async("0 0 0 * * *", move |_, _| { + let cert_manager = cert_manager.clone(); + Box::pin(async move { + info!("Running scheduled certificate renewal check"); + if let Err(e) = cert_manager.renew_cert_if_needed().await { + error!("Failed to renew certificate: {e}"); + } + }) + })?) + .await?; + + scheduler.start().await?; + Ok(()) +} + +// Helper function to format timestamp as local time +fn ts_to_local(timestamp: i64) -> String { + use chrono::Local; + Local + .timestamp_opt(timestamp, 0) + .single() + .map(|dt| dt.format("%Y-%m-%d %H:%M").to_string()) + .unwrap_or_else(|| format!("Invalid timestamp: {timestamp}")) +} + +// Helper function to get the TLD+1 +fn tld_plus_one(domains: &[String]) -> String { + use public_suffix::{EffectiveTLDProvider, DEFAULT_PROVIDER}; + + let first = domains[0].clone(); + + // Get the effective TLD+1 for the first domain + DEFAULT_PROVIDER + .effective_tld_plus_one(&first) + .unwrap_or(&first) + .to_string() +} diff --git a/src/utils/cert_manager/challenge.rs b/src/utils/cert_manager/challenge.rs new file mode 100644 index 00000000..d389ee32 --- /dev/null +++ b/src/utils/cert_manager/challenge.rs @@ -0,0 +1,71 @@ +mod dns01; +mod http01; + +pub use dns01::{AwsRoute53DnsUpdater, Dns01Handler, PebbleDnsUpdater}; +pub use http01::Http01Handler; + +use std::{future::Future, pin::Pin}; + +use async_trait::async_trait; +use color_eyre::eyre::Error as Report; +use instant_acme::{Authorization, Order}; + +use crate::cert_manager::storage::StorageError; + +use std::io; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum ChallengeError { + #[error("AWS SDK error: {0}")] + AwsSdk(#[source] Report), + + #[error("No hosted zone found for domain {0}")] + ZoneNotFound(String), + + #[error("Storage error: {0}")] + Storage(#[from] StorageError), + + #[error("I/O error: {0}")] + Io(#[from] io::Error), + + #[error("Another error occurred: {0}")] + Other(#[source] Report), +} + +/// Abstract interface for handling ACME challenges +#[async_trait] +pub trait ChallengeHandler: Send + Sync { + /// Handle the ACME challenge for the given authorization and order + /// + /// Returns a tuple containing the challenge url and a cleanup future + async fn handle_authorization( + &self, + authz: &Authorization, + order: &mut Order, + ) -> Result<(String, CleanupFuture), ChallengeError>; +} + +type BoxFuture<'a, T> = Pin + Send + 'a>>; + +/// A future that performs cleanup of resources allocated during ACME challenge validation +pub struct CleanupFuture { + inner: BoxFuture<'static, Result<(), ChallengeError>>, +} + +impl CleanupFuture { + /// Create a cleanup future with the given future + pub fn new(fut: F) -> Self + where + F: Future> + Send + 'static, + { + Self { + inner: Box::pin(fut) as BoxFuture<'static, Result<(), ChallengeError>>, + } + } + + /// Run the cleanup process + pub async fn run(self) -> Result<(), ChallengeError> { + self.inner.await + } +} diff --git a/src/utils/cert_manager/challenge/dns01.rs b/src/utils/cert_manager/challenge/dns01.rs new file mode 100644 index 00000000..e9a1b4ba --- /dev/null +++ b/src/utils/cert_manager/challenge/dns01.rs @@ -0,0 +1,353 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use aws_config::SdkConfig; +use aws_sdk_route53::{ + types::{ + Change, ChangeAction, ChangeBatch, HostedZone, ResourceRecord, ResourceRecordSet, RrType, + }, + Client as Route53Client, +}; +use color_eyre::eyre::eyre; +use instant_acme::{Authorization, ChallengeType, Identifier, Order}; +use reqwest::Client; +use serde_json::json; +use tokio::sync::RwLock; +use tracing::info; + +use crate::cert_manager::challenge::{ChallengeError, ChallengeHandler, CleanupFuture}; + +/// Interface for updating DNS records +#[async_trait] +pub trait DnsUpdater: Send + Sync { + /// Upsert a DNS record for the given domain + async fn upsert_record(&self, domain: &str, value: &str) -> Result<(), ChallengeError>; + /// Remove a DNS record for the given domain + async fn remove_record(&self, domain: &str, value: &str) -> Result<(), ChallengeError>; +} + +/// Handler for DNS-01 challenges +pub struct Dns01Handler { + dns_updater: Arc, +} + +impl Dns01Handler { + pub fn new(dns_updater: impl DnsUpdater + 'static) -> Self { + Self { + dns_updater: Arc::new(dns_updater), + } + } +} + +#[async_trait] +impl ChallengeHandler for Dns01Handler { + async fn handle_authorization( + &self, + authz: &Authorization, + order: &mut Order, + ) -> Result<(String, CleanupFuture), ChallengeError> { + let challenge = authz + .challenges + .iter() + .find(|c| c.r#type == ChallengeType::Dns01) + .ok_or_else(|| ChallengeError::Other(eyre!("No DNS-01 challenge found")))?; + + let digest = order.key_authorization(challenge).dns_value(); + let domain = match &authz.identifier { + Identifier::Dns(domain) => domain.clone(), + }; + // Upsert the DNS record + self.dns_updater.upsert_record(&domain, &digest).await?; + + let cleanup = { + let dns_updater = self.dns_updater.clone(); + let domain = domain.clone(); + async move { dns_updater.remove_record(&domain, &digest).await } + }; + let cleanup_fut = CleanupFuture::new(cleanup); + + Ok((challenge.url.clone(), cleanup_fut)) + } +} + +/// A DNS updater for AWS Route 53 +pub struct AwsRoute53DnsUpdater { + client: Route53Client, + zones: Arc>>>, +} + +impl AwsRoute53DnsUpdater { + pub fn new(config: &SdkConfig) -> Self { + Self { + client: Route53Client::new(config), + zones: Arc::new(RwLock::new(None)), + } + } + + // Find the hosted zone for the given domain and return its ID + async fn find_hosted_zone(&self, domain: &str) -> Result { + self.try_cache_zones().await?; + + let read_guard = self.zones.read().await; + let zones = read_guard.as_ref().unwrap(); + // remove the trailing dot from the domain if any + let domain = domain.trim_end_matches('.'); + + if let Some(zone_id) = Self::find_best_match(domain, zones) { + Ok(zone_id.to_string()) + } else { + Err(ChallengeError::ZoneNotFound(domain.to_string())) + } + } + + // Find the best matching hosted zone for the given domain + fn find_best_match<'a>(lookup: &str, zones: &'a [ZoneInfo]) -> Option<&'a str> { + let mut best_match = None; + + for zone in zones.iter() { + let zone_name = &zone.name; + let is_match = if let Some(stripped) = zone_name.strip_prefix("*.") { + // Try to match wilcard domains + if lookup.ends_with(stripped) { + // We ensure there's at least one identifier before the wildcard + let diff = lookup.len() - stripped.len(); + lookup[..diff].contains('.') + } else { + false + } + } else if lookup == zone_name { + true + } else if lookup.len() > zone_name.len() { + // Check if lookup ends with .zone_name + let idx = lookup.len() - zone_name.len() - 1; + lookup.as_bytes().get(idx) == Some(&b'.') + && &lookup[idx + 1..] == zone_name.as_str() + } else { + false + }; + + if is_match { + let len = zone_name.len(); + match best_match { + None => best_match = Some((zone.id.as_str(), len)), + Some((_, curr_len)) if len > curr_len => { + best_match = Some((zone.id.as_str(), len)); + } + _ => {} + } + } + } + best_match.map(|(zone_id, _)| zone_id) + } + + async fn try_cache_zones(&self) -> Result<(), ChallengeError> { + // Check if zones are already cached + let read_guard = self.zones.read().await; + if read_guard.is_some() { + return Ok(()); + } + drop(read_guard); + + let mut all_zones = Vec::new(); + let mut next_marker = None; + + // try to get all hosted zones + loop { + let mut req = self.client.list_hosted_zones(); + if let Some(marker) = &next_marker { + req = req.marker(marker); + } + let resp = req + .send() + .await + .map_err(|e| ChallengeError::AwsSdk(e.into()))?; + let hosted_zones = resp.hosted_zones(); + for zone in hosted_zones { + all_zones.push(ZoneInfo::new(zone)); + } + // Check if there are more hosted zones + if resp.is_truncated() { + next_marker = resp.next_marker().map(|s| s.to_string()); + } else { + break; + } + } + *self.zones.write().await = Some(all_zones); + Ok(()) + } + + async fn change_records( + &self, + domain: &str, + change_action: ChangeAction, + value: &str, + ) -> Result { + let record_name = format!("_acme-challenge.{}", domain); + let hosted_zone_id = self.find_hosted_zone(domain).await?; + + // Prepare the TXT record to change + let change = Change::builder() + .action(change_action) + .resource_record_set( + ResourceRecordSet::builder() + .name(&record_name) + .r#type(RrType::Txt) + .ttl(60) + .resource_records( + ResourceRecord::builder() + .value(format!("\"{}\"", value)) + .build() + .map_err(|e| ChallengeError::AwsSdk(e.into()))?, + ) + .build() + .map_err(|e| ChallengeError::AwsSdk(e.into()))?, + ) + .build() + .map_err(|e| ChallengeError::AwsSdk(e.into()))?; + let change_batch = ChangeBatch::builder() + .changes(change) + .build() + .map_err(|e| ChallengeError::AwsSdk(e.into()))?; + + // Try to change the record in Route53 + self.client + .change_resource_record_sets() + .hosted_zone_id(&hosted_zone_id) + .change_batch(change_batch) + .send() + .await + .map_err(|e| ChallengeError::AwsSdk(e.into()))?; + + Ok(record_name) + } +} + +#[derive(Debug, Clone)] +struct ZoneInfo { + name: String, + id: String, +} + +impl ZoneInfo { + fn new(z: &HostedZone) -> Self { + let trimmed = z.name().trim_end_matches('.').to_string(); + ZoneInfo { + name: trimmed, + id: z.id().to_string(), + } + } +} + +#[async_trait] +impl DnsUpdater for AwsRoute53DnsUpdater { + async fn upsert_record(&self, domain: &str, value: &str) -> Result<(), ChallengeError> { + // Try to upsert the record in Route53 + let record_name = self + .change_records(domain, ChangeAction::Upsert, value) + .await?; + + info!("DNS record {record_name} created for {domain}"); + Ok(()) + } + + async fn remove_record(&self, domain: &str, value: &str) -> Result<(), ChallengeError> { + // Try to delete the record in Route53 + let record_name = self + .change_records(domain, ChangeAction::Delete, value) + .await?; + + info!("DNS record {record_name} deleted for {domain}"); + Ok(()) + } +} + +// Handler for Pebble DNS (mainly for generating test certificates) +pub struct PebbleDnsUpdater { + client: Client, + addr: String, +} + +impl PebbleDnsUpdater { + pub fn new(addr: impl Into) -> Self { + Self { + client: Client::new(), + addr: addr.into(), + } + } +} + +#[async_trait] +impl DnsUpdater for PebbleDnsUpdater { + async fn upsert_record(&self, domain: &str, value: &str) -> Result<(), ChallengeError> { + let record_name = format!("_acme-challenge.{}.", domain); + let url = format!("{}/set-txt", self.addr); + let body = json!({"host": record_name, "value": value}); + + self.client + .post(&url) + .body(body.to_string()) + .send() + .await + .map_err(|e| ChallengeError::Other(eyre!("Failed to send request: {e}")))? + .error_for_status() + .map_err(|e| ChallengeError::Other(eyre!("Failed to set TXT record: {e}")))?; + + Ok(()) + } + + async fn remove_record(&self, domain: &str, _value: &str) -> Result<(), ChallengeError> { + let record_name = format!("_acme-challenge.{}.", domain); + let url = format!("{}/clear-txt", self.addr); + let body = json!({"host": record_name}); + + self.client + .post(&url) + .body(body.to_string()) + .send() + .await + .map_err(|e| ChallengeError::Other(eyre!("Failed to send request: {e}")))? + .error_for_status() + .map_err(|e| ChallengeError::Other(eyre!("Failed to clear TXT record: {e}")))?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_find_best_match_exact_and_suffix() { + let zones = vec![ + ZoneInfo { + name: "example.com".into(), + id: "Z1".into(), + }, + ZoneInfo { + name: "sub.example.com".into(), + id: "Z2".into(), + }, + ZoneInfo { + name: "test.acme.com".into(), + id: "Z3".into(), + }, + ZoneInfo { + name: "*.test.example.com".into(), + id: "Z4".into(), + }, + ]; + + let id = AwsRoute53DnsUpdater::find_best_match("sub.example.com", &zones); + assert_eq!(id, Some("Z2")); + + let id = AwsRoute53DnsUpdater::find_best_match("www.example.com", &zones); + assert_eq!(id, Some("Z1")); + + let id = AwsRoute53DnsUpdater::find_best_match("acme.com", &zones); + assert_eq!(id, None); + + let id = AwsRoute53DnsUpdater::find_best_match("wildcard.test.example.com", &zones); + assert_eq!(id, Some("Z4")); + } +} diff --git a/src/utils/cert_manager/challenge/http01.rs b/src/utils/cert_manager/challenge/http01.rs new file mode 100644 index 00000000..c1faa8c3 --- /dev/null +++ b/src/utils/cert_manager/challenge/http01.rs @@ -0,0 +1,162 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use axum::{ + extract::{Path, State}, + http::HeaderValue, + response::IntoResponse, + routing::get, + Router, +}; +use color_eyre::eyre::eyre; +use hyper::{header, StatusCode}; +use instant_acme::{Authorization, ChallengeType, Order}; +use tokio::{ + net::TcpListener, + sync::{ + oneshot::{self, Receiver, Sender}, + RwLock, + }, + task::JoinHandle, +}; +use tracing::{error, info, warn}; + +use crate::cert_manager::{ + challenge::{ChallengeError, ChallengeHandler, CleanupFuture}, + storage::Storage, +}; + +type ServerHandler = Arc, Sender<()>)>>>; + +/// Struct representing the HTTP01 challenge handler +pub struct Http01Handler { + challenge_storage: Arc, + host: String, + port: u16, + server_handle: ServerHandler, +} + +impl Http01Handler { + /// Create a new instance of the HTTP-01 challenge handler + pub fn new(challenge_storage: impl Storage + 'static, host: &str, port: u16) -> Self { + Self { + challenge_storage: Arc::new(challenge_storage), + host: host.into(), + port, + server_handle: Arc::new(RwLock::new(None)), + } + } + + /// Start the server and return a receiver to confirm readiness + async fn start_server(&self) -> Result, ChallengeError> { + let (ready_tx, ready_rx) = oneshot::channel(); + + // Check if server is already running + if self.server_handle.read().await.is_some() { + info!("HTTP-01 challenge server already running"); + ready_tx.send(()).unwrap(); + return Ok(ready_rx); + } + + let app = Router::new() + .route("/.well-known/acme-challenge/{token}", get(serve_challenge)) + .with_state(self.challenge_storage.clone()); + + // ACME uses port 80 for HTTP-01 challenges + let listener = TcpListener::bind(format!("{}:{}", self.host, self.port)).await?; + info!("Starting HTTP-01 challenge server on port {}...", self.port); + + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let handle = tokio::spawn(async move { + let server = axum::serve(listener, app).with_graceful_shutdown(async { + shutdown_rx.await.ok(); + }); + ready_tx.send(()).unwrap(); + if let Err(e) = server.await { + warn!("HTTP-01 server error: {e}"); + } + }); + + self.server_handle + .write() + .await + .replace((handle, shutdown_tx)); + Ok(ready_rx) + } +} + +async fn serve_challenge( + Path(token): Path, + State(challenge_store): State>, +) -> Result { + match challenge_store.load(&token).await { + Ok(Some(challenge)) => { + info!("Serving ACME challenge for token: {token}"); + Ok(( + [( + header::CONTENT_TYPE, + HeaderValue::from_static("application/octet-stream"), + )], + challenge, + )) + } + Ok(None) => { + warn!("Challenge not found for token: {token}"); + Err(StatusCode::NOT_FOUND) + } + Err(e) => { + error!("Error while retrieving challenge for token {token}: {e:?}"); + Err(StatusCode::INTERNAL_SERVER_ERROR) + } + } +} + +#[async_trait] +impl ChallengeHandler for Http01Handler { + async fn handle_authorization( + &self, + authz: &Authorization, + order: &mut Order, + ) -> Result<(String, CleanupFuture), ChallengeError> { + let challenge = authz + .challenges + .iter() + .find(|c| c.r#type == ChallengeType::Http01) + .ok_or_else(|| ChallengeError::Other(eyre!("No HTTP-01 challenge found")))?; + + // Start the server and wait for it to be ready + self.start_server() + .await? + .await + .map_err(|_| ChallengeError::Other(eyre!("HTTP-01 server failed to start")))?; + + let token = &challenge.token; + let key_auth = order.key_authorization(challenge); + + // Store the key authorization + self.challenge_storage + .store(token, key_auth.as_str()) + .await?; + + let cleanup = { + let storage = self.challenge_storage.clone(); + let token = token.to_string(); + let handle = self.server_handle.clone(); + async move { + let (result, _) = tokio::join!( + async { storage.delete(&token).await.map_err(Into::into) }, + async { + if let Some((_, shutdown_tx)) = handle.write().await.take() { + // Signal the server to shutdown + let _ = shutdown_tx.send(()); + } + } + ); + result + } + }; + let cleanup_fut = CleanupFuture::new(cleanup); + + Ok((challenge.url.clone(), cleanup_fut)) + } +} diff --git a/src/utils/cert_manager/errors.rs b/src/utils/cert_manager/errors.rs new file mode 100644 index 00000000..74d7dcbd --- /dev/null +++ b/src/utils/cert_manager/errors.rs @@ -0,0 +1,37 @@ +use crate::{ + cert_manager::{challenge::ChallengeError, storage::StorageError}, + utils::keygen::Error as KeyOpError, +}; +use color_eyre::eyre::Error as EyreError; +use instant_acme::Error as AcmeError; +use serde_json::Error as SerdeError; +use thiserror::Error; +use tokio_cron_scheduler::JobSchedulerError; + +/// List of errors that can occur during certificate management +#[derive(Error, Debug)] +pub enum CertError { + #[error("ACME error: {0}")] + Acme(#[from] AcmeError), + + #[error("Storage error: {0}")] + Storage(#[from] StorageError), + + #[error("Challenge error: {0}")] + Challenge(#[from] ChallengeError), + + #[error("Certificate parsing error: {0}")] + Parsing(String), + + #[error("Cron error: {0}")] + Cron(#[from] JobSchedulerError), + + #[error("Serialization error: {0}")] + Serde(#[from] SerdeError), + + #[error("Key operation error: {0}")] + KeyOp(#[from] KeyOpError), + + #[error("Uncategorized error: {0}")] + Other(#[source] EyreError), +} diff --git a/src/utils/cert_manager/http_client.rs b/src/utils/cert_manager/http_client.rs new file mode 100644 index 00000000..13e9880c --- /dev/null +++ b/src/utils/cert_manager/http_client.rs @@ -0,0 +1,71 @@ +use std::{future::Future, pin::Pin}; + +use axum::body::Bytes; +use http_body_util::Full; +use hyper::Request; +use hyper_rustls::{HttpsConnector, HttpsConnectorBuilder}; +use hyper_util::{ + client::legacy::{connect::HttpConnector, Client}, + rt::TokioExecutor, +}; +use instant_acme::{BytesResponse, Error, HttpClient}; +use rustls::{ClientConfig, RootCertStore}; +use rustls_pki_types::{pem::PemObject, CertificateDer}; + +use crate::cert_manager::CertError; + +/// Default HTTP client used for ACME flow +#[derive(Clone)] +pub struct DefaultHttpClient(ClientInner); + +impl DefaultHttpClient { + /// Create a new instance of [DefaultHttpClient] with optional root certificate chain pem + pub fn new(root_cert_pem: Option<&[u8]>) -> Result { + Ok(Self(ClientInner::try_new(root_cert_pem)?)) + } +} + +impl HttpClient for DefaultHttpClient { + fn request( + &self, + req: Request>, + ) -> Pin> + Send>> { + let future = self.0.client.request(req); + Box::pin(async move { + match future.await { + Ok(resp) => Ok(BytesResponse::from(resp)), + Err(err) => Err(err.into()), + } + }) + } +} + +#[derive(Clone)] +struct ClientInner { + client: Client, Full>, +} + +impl ClientInner { + pub fn try_new(root_cert_pem: Option<&[u8]>) -> Result { + let mut root_store = RootCertStore::empty(); + if let Some(root_pem) = root_cert_pem { + let certs_der: Vec<_> = CertificateDer::pem_slice_iter(root_pem) + .collect::, _>>() + .map_err(|e| CertError::Parsing(e.to_string()))?; + root_store.add_parsable_certificates(certs_der); + } + root_store.extend(webpki_roots::TLS_SERVER_ROOTS.to_vec()); + let tls_config = ClientConfig::builder() + .with_root_certificates(root_store) + .with_no_client_auth(); + let http_builder = HttpsConnectorBuilder::new() + .with_tls_config(tls_config) + .https_only() + .enable_all_versions() + .build(); + + Ok(Self { + client: Client::builder(TokioExecutor::new()).build(http_builder), + }) + } +} diff --git a/src/utils/cert_manager/storage.rs b/src/utils/cert_manager/storage.rs new file mode 100644 index 00000000..64a269bd --- /dev/null +++ b/src/utils/cert_manager/storage.rs @@ -0,0 +1,39 @@ +mod aws; +mod redis; + +pub use crate::utils::cert_manager::storage::redis::Redis; +use ::redis::RedisError; +use async_trait::async_trait; +pub use aws::{AwsS3, AwsSecretsManager}; +use color_eyre::eyre::Error as Report; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum StorageError { + #[error("Redis error: {0}")] + Redis(#[from] RedisError), + + #[error("AWS SDK error: {0}")] + AwsSdk(#[source] Report), + + #[error("The data is invalid: {0}")] + InvalidData(String), + + #[error("Bucket {0} is unavailable")] + BucketUnavailable(String), +} + +/// Abstract interface for storage backends used by the certificate manager. +#[async_trait] +pub trait Storage: Send + Sync { + /// Store the value identified by the given key + async fn store(&self, key: &str, value: &str) -> Result<(), StorageError>; + /// Get the value specified by the given key + async fn load(&self, key: &str) -> Result, StorageError>; + /// Update the value associated with the given key + async fn update(&self, key: &str, value: &str) -> Result<(), StorageError> { + self.store(key, value).await + } + /// Delete the value associated with the given key + async fn delete(&self, key: &str) -> Result<(), StorageError>; +} diff --git a/src/utils/cert_manager/storage/aws.rs b/src/utils/cert_manager/storage/aws.rs new file mode 100644 index 00000000..655bdd02 --- /dev/null +++ b/src/utils/cert_manager/storage/aws.rs @@ -0,0 +1,320 @@ +use std::{ + num::NonZeroUsize, + sync::atomic::{AtomicBool, Ordering}, + time::Duration, +}; + +use async_trait::async_trait; +use aws_config::SdkConfig; +use aws_sdk_s3::Client as S3Client; +use aws_sdk_secretsmanager::{ + operation::get_secret_value::GetSecretValueError, Client as SecretsClient, + Config as SecretsConfig, +}; +use aws_secretsmanager_caching::SecretsManagerCachingClient as SecretsCacheClient; +use color_eyre::eyre::eyre; +use tokio::time::sleep; +use tracing::{info, warn}; + +use crate::{cert_manager::storage::StorageError, utils::cert_manager::Storage}; + +/// Type used for AWS Secrets Manager operations +pub struct AwsSecretsManager { + client: SecretsClient, + cache: SecretsCacheClient, +} + +impl AwsSecretsManager { + /// Create a new instance of [AwsSecretsManager] with the given AWS SDK config + pub async fn new(config: &SdkConfig) -> Result { + let client = SecretsClient::new(config); + let asm_builder = SecretsConfig::from(config).to_builder(); + // Cache size: 100 and a TTL of 5 minutes + let cache = SecretsCacheClient::from_builder( + asm_builder, + NonZeroUsize::new(100).unwrap(), + Duration::from_secs(300), + true, + ) + .await + .map_err(|e| StorageError::AwsSdk(e.into()))?; + + Ok(Self { client, cache }) + } +} + +#[async_trait] +impl Storage for AwsSecretsManager { + async fn store(&self, name: &str, data: &str) -> Result<(), StorageError> { + use aws_sdk_secretsmanager::error::SdkError; + + // Store a secret only if it does not already exist + match self.client.describe_secret().secret_id(name).send().await { + Ok(_) => { + warn!("Secret {name} already exists. Skipping..."); + Ok(()) + } + Err(SdkError::ServiceError(err)) if err.err().is_resource_not_found_exception() => { + // Secret does not exist, try to create it + self.client + .create_secret() + .name(name) + .secret_string(data) + .send() + .await + .map_err(|e| StorageError::AwsSdk(e.into()))?; + Ok(()) + } + Err(sdk_err) => Err(StorageError::AwsSdk(sdk_err.into())), + } + } + + async fn load(&self, name: &str) -> Result, StorageError> { + use aws_sdk_secretsmanager::error::SdkError; + + match self.cache.get_secret_value(name, None, None, false).await { + Ok(value) => Ok(value.secret_string), + Err(err) => { + // Check for ResourceNotFoundException + if let Some(SdkError::ServiceError(service_err)) = + err.downcast_ref::>() + { + if service_err.err().is_resource_not_found_exception() { + return Ok(None); + } + } + Err(StorageError::AwsSdk(eyre!("{err}"))) + } + } + } + + async fn update(&self, name: &str, data: &str) -> Result<(), StorageError> { + self.client + .put_secret_value() + .secret_id(name) + .secret_string(data) + .send() + .await + .map_err(|e| StorageError::AwsSdk(e.into()))?; + + // Force the secret refresh in the cache + let _ = self.cache.get_secret_value(name, None, None, true).await; + Ok(()) + } + + async fn delete(&self, name: &str) -> Result<(), StorageError> { + self.client + .delete_secret() + .secret_id(name) + .send() + .await + .map_err(|e| StorageError::AwsSdk(e.into()))?; + + // Invalidate cache by refreshing the secret + let _ = self.cache.get_secret_value(name, None, None, true).await; + Ok(()) + } +} + +/// Struct representing AWS S3 storage with optional caching mechanism +pub struct AwsS3 { + client: S3Client, + bucket: String, + cache: Option>, + bucket_exists: AtomicBool, +} + +impl AwsS3 { + /// Create a new instance of [AwsS3Storage] with the given AWS SDK config and bucket name + pub fn new(config: &SdkConfig, bucket_name: impl Into) -> Self { + let client = if std::env::var("APP_ENV").as_deref() == Ok("production") { + S3Client::new(config) + } else { + let dev_config = S3Client::new(config) + .config() + .to_builder() + .force_path_style(true) + .build(); + S3Client::from_conf(dev_config) + }; + Self { + client, + bucket: bucket_name.into(), + cache: None, + bucket_exists: AtomicBool::new(false), + } + } + + /// Set the cache layer if needed + pub fn with_cache(mut self, cache: impl Storage + 'static) -> Self { + self.cache = Some(Box::new(cache)); + self + } + + // Helper function to ensure the S3 bucket exists before any operation + async fn ensure_bucket_exists(&self) -> Result<(), StorageError> { + use aws_sdk_s3::error::SdkError; + + // return if bucket is already verified + if self.bucket_exists.load(Ordering::Relaxed) { + return Ok(()); + } + + const MAX_RETRIES: u32 = 3; + const RETRY_DELAY: Duration = Duration::from_millis(500); + + for attempt in 0..MAX_RETRIES { + // Check if the bucket exists + match self.client.head_bucket().bucket(&self.bucket).send().await { + Ok(_) => { + info!("Bucket {} already exists. Skipping...", self.bucket); + self.bucket_exists.store(true, Ordering::Relaxed); + return Ok(()); + } + Err(SdkError::ServiceError(err)) if err.err().is_not_found() => { + // Bucket not found, attempt to create it + match self + .client + .create_bucket() + .bucket(&self.bucket) + .send() + .await + { + Ok(_) => { + info!("Bucket {} created successfully", self.bucket); + self.bucket_exists.store(true, Ordering::Relaxed); + return Ok(()); + } + Err(create_err) => { + if attempt == MAX_RETRIES - 1 { + return Err(StorageError::AwsSdk(create_err.into())); + } + warn!( + "Failed to create bucket {}: {create_err}. Retrying...", + self.bucket + ); + } + } + } + Err(err) => { + if attempt == MAX_RETRIES - 1 { + return Err(StorageError::AwsSdk(err.into())); + } + warn!("Error checking bucket {}: {err}. Retrying...", self.bucket); + } + } + + // Wait a bit before retrying + if attempt < MAX_RETRIES - 1 { + sleep(RETRY_DELAY).await; + } + } + Err(StorageError::BucketUnavailable(self.bucket.clone())) + } +} + +#[async_trait] +impl Storage for AwsS3 { + async fn store(&self, key: &str, data: &str) -> Result<(), StorageError> { + // Ensure the bucket exists + self.ensure_bucket_exists().await?; + + // Invalidate cache + if let Some(cache) = &self.cache { + if let Err(e) = cache.delete(key).await { + warn!("Failed to invalidate cache for {key}: {e}"); + } + } + + // Store the object in the bucket + let body = data.as_bytes().to_vec(); + match self + .client + .put_object() + .bucket(&self.bucket) + .key(key) + .body(body.into()) + .send() + .await + { + Ok(_) => { + info!("Stored object {key} in bucket {}", self.bucket); + Ok(()) + } + Err(e) => { + // We make sure cache stays invalid + if let Some(cache) = &self.cache { + let _ = cache.delete(key).await; + } + Err(StorageError::AwsSdk(e.into())) + } + } + } + + async fn load(&self, key: &str) -> Result, StorageError> { + use aws_sdk_s3::error::SdkError; + + // Check the cache first if it exists + if let Some(cache) = &self.cache { + match cache.load(key).await { + Ok(Some(data)) => { + return Ok(Some(data)); + } + Ok(None) => (), + Err(e) => warn!("Cache error for {key}: {e}"), + } + } + + // If not found in cache, try to get directly from S3 + self.ensure_bucket_exists().await?; + match self + .client + .get_object() + .bucket(&self.bucket) + .key(key) + .send() + .await + { + Ok(output) => { + let bytes = output + .body + .collect() + .await + .map_err(|e| StorageError::AwsSdk(e.into()))?; + let data = String::from_utf8(bytes.into_bytes().into()) + .map_err(|e| StorageError::InvalidData(e.to_string()))?; + // Update cache if it exists + if let Some(cache) = &self.cache { + if let Err(e) = cache.store(key, &data).await { + warn!("Failed to update cache for {key}: {e}"); + } + } + Ok(Some(data)) + } + Err(SdkError::ServiceError(err)) if err.err().is_no_such_key() => Ok(None), + Err(sdk_err) => Err(StorageError::AwsSdk(sdk_err.into())), + } + } + + async fn delete(&self, key: &str) -> Result<(), StorageError> { + match self + .client + .delete_object() + .bucket(&self.bucket) + .key(key) + .send() + .await + { + Ok(_) => { + // Invalidate cache + if let Some(cache) = &self.cache { + if let Err(e) = cache.delete(key).await { + warn!("Failed to invalidate cache for {key}: {e}"); + } + } + Ok(()) + } + Err(e) => Err(StorageError::AwsSdk(e.into())), + } + } +} diff --git a/src/utils/cert_manager/storage/redis.rs b/src/utils/cert_manager/storage/redis.rs new file mode 100644 index 00000000..baf3a265 --- /dev/null +++ b/src/utils/cert_manager/storage/redis.rs @@ -0,0 +1,51 @@ +use async_trait::async_trait; +use redis::{aio::ConnectionManager, AsyncCommands}; + +use crate::cert_manager::storage::{Storage, StorageError}; + +/// Struct representing Redis storage +#[derive(Clone)] +pub struct Redis { + conn: ConnectionManager, + ttl: Option, +} + +impl Redis { + /// Create a new instance of [RedisStorage] + /// with the given Redis connection manager + pub fn new(conn: ConnectionManager) -> Self { + Self { conn, ttl: None } + } + + /// Set the time-to-live (TTL) for the stored data + pub fn with_ttl(self, ttl: u64) -> Self { + Self { + ttl: Some(ttl), + ..self + } + } +} + +#[async_trait] +impl Storage for Redis { + async fn store(&self, key: &str, value: &str) -> Result<(), StorageError> { + let mut conn = self.conn.clone(); + if let Some(ttl) = self.ttl { + let _: () = conn.set_ex(key, value, ttl).await?; + } else { + let _: () = conn.set(key, value).await?; + } + Ok(()) + } + + async fn load(&self, key: &str) -> Result, StorageError> { + let mut conn = self.conn.clone(); + Ok(conn.get(key).await?) + } + + async fn delete(&self, key: &str) -> Result<(), StorageError> { + let mut conn = self.conn.clone(); + let _: () = conn.del(key).await?; + Ok(()) + } +} diff --git a/src/utils/cert_manager/tests.rs b/src/utils/cert_manager/tests.rs new file mode 100644 index 00000000..9abf0a1a --- /dev/null +++ b/src/utils/cert_manager/tests.rs @@ -0,0 +1,324 @@ +use crate::cert_manager::storage::{Storage, StorageError}; + +use super::*; +use async_trait::async_trait; +use std::collections::HashMap; +use std::sync::{Arc, Once}; +use tokio::sync::Mutex; + +fn days_to_secs(days: u32) -> i64 { + (days as i64) * 24 * 60 * 60 +} + +static INIT_CRYPTO: Once = Once::new(); + +fn init_crypto() { + INIT_CRYPTO.call_once(|| { + rustls::crypto::aws_lc_rs::default_provider() + .install_default() + .expect("Failed to install crypto provider"); + }); +} + +#[derive(Clone)] +struct MockStorage { + data: Arc>>, +} + +impl MockStorage { + fn new() -> Self { + Self { + data: Arc::new(Mutex::new(HashMap::new())), + } + } +} + +#[async_trait] +impl Storage for MockStorage { + async fn store(&self, key: &str, value: &str) -> Result<(), StorageError> { + self.data + .lock() + .await + .insert(key.to_string(), value.to_string()); + Ok(()) + } + + async fn load(&self, key: &str) -> Result, StorageError> { + Ok(self.data.lock().await.get(key).cloned()) + } + + async fn delete(&self, key: &str) -> Result<(), StorageError> { + self.data.lock().await.remove(key); + Ok(()) + } +} + +#[test] +fn test_cert_manager_builder() { + init_crypto(); + + let cert_storage = MockStorage::new(); + let secrets_storage = MockStorage::new(); + + let manager = CertManager::new( + vec!["example.com"], + "test@example.com", + Some("Test Org"), + "https://acme-staging-v02.api.letsencrypt.org/directory", + ) + .unwrap() + .with_cert_storage(cert_storage) + .with_secrets_storage(secrets_storage) + .with_eku(&[1, 2, 3, 4]); + + assert!(manager.cert_storage.is_some()); + assert!(manager.secrets_storage.is_some()); + assert!(manager.challenge_handler.is_none()); + assert_eq!(manager.eku, Some(vec![1, 2, 3, 4])); +} + +#[test] +fn test_renewal_strategy_days_before_expiry() { + init_crypto(); + + let strategy = RenewalStrategy::DaysBeforeExpiry(Some(30)); + + let manager = CertManager::new( + vec!["example.com"], + "test@example.com", + None::, + "https://acme-staging-v02.api.letsencrypt.org/directory", + ) + .unwrap() + .with_renewal_strategy(strategy); + + let now = Utc::now().timestamp(); + + // Certificate expires in 20 days - should renew (threshold is 30 days) + let cert_data_should_renew = CertificateData { + certificate: "mock_cert".to_string(), + valid_from: now - days_to_secs(60), + expires_at: now + days_to_secs(20), + updated_at: now, + }; + assert!(manager.should_renew_cert(&cert_data_should_renew)); + + // Certificate expires in 40 days - should not renew + let cert_data_should_not_renew = CertificateData { + certificate: "mock_cert".to_string(), + valid_from: now - days_to_secs(50), + expires_at: now + days_to_secs(40), + updated_at: now, + }; + assert!(!manager.should_renew_cert(&cert_data_should_not_renew)); +} + +#[tokio::test] +async fn test_renewal_strategy_percentage_of_lifetime() { + init_crypto(); + + let strategy = RenewalStrategy::PercentageOfLifetime(Some(0.8)); + + let manager = CertManager::new( + vec!["example.com"], + "test@example.com", + None::, + "https://acme-staging-v02.api.letsencrypt.org/directory", + ) + .unwrap() + .with_renewal_strategy(strategy); + + let now = Utc::now().timestamp(); + let cert_lifetime = days_to_secs(90); + + // Certificate is at 85% of its lifetime - should renew (threshold is 80%) + let elapsed_time = (cert_lifetime as f32 * 0.85) as i64; + let cert_data_should_renew = CertificateData { + certificate: "mock_cert".to_string(), + valid_from: now - elapsed_time, + expires_at: now - elapsed_time + cert_lifetime, + updated_at: now, + }; + assert!(manager.should_renew_cert(&cert_data_should_renew)); + + // Certificate is at 70% of its lifetime - should not renew + let elapsed_time = (cert_lifetime as f32 * 0.70) as i64; + let cert_data_should_not_renew = CertificateData { + certificate: "mock_cert".to_string(), + valid_from: now - elapsed_time, + expires_at: now - elapsed_time + cert_lifetime, + updated_at: now, + }; + assert!(!manager.should_renew_cert(&cert_data_should_not_renew)); +} + +#[tokio::test] +async fn test_renewal_strategy_fixed_interval() { + init_crypto(); + + let strategy = RenewalStrategy::FixedInterval(Some(60)); + + let manager = CertManager::new( + vec!["example.com"], + "test@example.com", + None::, + "https://acme-staging-v02.api.letsencrypt.org/directory", + ) + .unwrap() + .with_renewal_strategy(strategy); + + let now = Utc::now().timestamp(); + + // Certificate issued 70 days ago - should renew (interval is 60 days) + let cert_data_should_renew = CertificateData { + certificate: "mock_cert".to_string(), + valid_from: now - days_to_secs(70), + expires_at: now + days_to_secs(40), + updated_at: now, + }; + assert!(manager.should_renew_cert(&cert_data_should_renew)); + + // Certificate issued 50 days ago - should not renew + let cert_data_should_not_renew = CertificateData { + certificate: "mock_cert".to_string(), + valid_from: now - days_to_secs(50), + expires_at: now + days_to_secs(40), + updated_at: now, + }; + assert!(!manager.should_renew_cert(&cert_data_should_not_renew)); +} + +#[tokio::test] +async fn test_certificate_returns_none_if_not_found() { + init_crypto(); + + let cert_storage = MockStorage::new(); + let cert_manager = CertManager::new( + vec!["example.com"], + "test@example.com", + None::, + "https://acme-staging-v02.api.letsencrypt.org/directory", + ) + .unwrap() + .with_cert_storage(cert_storage); + + let cert = cert_manager.certificate().await.unwrap(); + assert!(cert.is_none()); +} + +#[tokio::test] +async fn test_certificate_storage_and_retrieval() { + init_crypto(); + + let cert_storage = MockStorage::new(); + + let manager = CertManager::new( + vec!["example.com"], + "test@example.com", + None::, + "https://acme-staging-v02.api.letsencrypt.org/directory", + ) + .unwrap() + .with_cert_storage(cert_storage.clone()); + + let cert = manager.certificate().await.unwrap(); + assert!(cert.is_none()); + + // Store the certificate manually + let serialized = include_str!("../../test_resources/cert_data.json"); + let cert_data: CertificateData = serde_json::from_str(serialized).unwrap(); + + let cert_key = manager.cert_key(); + cert_storage.store(&cert_key, serialized).await.unwrap(); + + let retrieved_cert = manager.certificate().await.unwrap(); + assert!(retrieved_cert.is_some()); + let retrieved = retrieved_cert.unwrap(); + assert_eq!(retrieved.certificate, cert_data.certificate); + assert_eq!(retrieved.valid_from, cert_data.valid_from); + assert_eq!(retrieved.expires_at, cert_data.expires_at); +} + +#[tokio::test] +async fn test_signing_key_generation_and_storage() { + init_crypto(); + + let secrets_storage = MockStorage::new(); + + let manager = CertManager::new( + vec!["example.com"], + "test@example.com", + None::, + "https://acme-staging-v02.api.letsencrypt.org/directory", + ) + .unwrap() + .with_secrets_storage(secrets_storage.clone()); + + // First call should generate and store a new key + let generated_key = manager.signing_key_pem().await.unwrap(); + assert!(generated_key.starts_with("-----BEGIN PRIVATE KEY-----")); + assert!(generated_key.ends_with("-----END PRIVATE KEY-----\n")); + + // Second call should return the same key + let key = manager.signing_key_pem().await.unwrap(); + assert_eq!(key, generated_key); + + // The key should have been stored + let stored_key = secrets_storage + .load("keys/example.com") + .await + .unwrap() + .unwrap(); + assert_eq!(key, stored_key); +} + +#[test] +fn test_tld_plus_one_function() { + init_crypto(); + + let domains = vec!["www.example.com".to_string()]; + let result = tld_plus_one(&domains); + assert_eq!(result, "example.com"); + + let domains = vec!["sub.domain.example.co.uk".to_string()]; + let result = tld_plus_one(&domains); + assert_eq!(result, "example.co.uk"); + + let domains = ["sub.example.com".to_string(), "acme.test.com".to_string()]; + let result = tld_plus_one(&domains); + assert_eq!(result, "example.com"); +} + +#[test] +fn test_ts_to_local_helper() { + init_crypto(); + + let timestamp = 1749045448; + let result = ts_to_local(timestamp); + assert_eq!("2025-06-04 14:57", result); +} + +#[tokio::test] +async fn test_cert_chain_parts() { + init_crypto(); + + let cert_storage = MockStorage::new(); + let cert_manager = CertManager::new( + vec!["example.com"], + "test@example.com", + None::, + "https://acme-staging-v02.api.letsencrypt.org/directory", + ) + .unwrap() + .with_cert_storage(cert_storage.clone()); + + // there are 2 parts in the certificate chain + let serialized = include_str!("../../test_resources/cert_data.json"); + cert_storage + .store("certs/example.com/cert_data.json", serialized) + .await + .unwrap(); + + let parts = cert_manager.cert_chain_parts().await.unwrap().unwrap(); + assert_eq!(parts.len(), 2); +} diff --git a/src/utils/errors.rs b/src/utils/errors.rs index 104eff3b..6a13ee6d 100644 --- a/src/utils/errors.rs +++ b/src/utils/errors.rs @@ -2,32 +2,10 @@ use thiserror::Error; #[derive(Debug, Error)] pub enum Error { - #[error("Failed to read certificate from {0:?}")] - ReadCertificate(std::path::PathBuf), - #[error("Failed to parse certificate")] - ParseFailed, - #[error("provided pem file is not valid certficate")] - PermFailed, - #[error("Failed to generate a new keypair")] - KeyGenFailed, - #[error("Failed to generate pem from keypair")] - PemGenFailed, - #[error("invalid file type")] - InvalidFileType, #[error("error: {0}")] Generic(String), #[error("invalid index")] InvalidIndex, - #[error("Unsupported bits value")] - UnsupportedBits, #[error("Failed to decode lst")] - DecodeError, -} - -#[derive(Error, Debug)] -pub enum SecretCacheError { - #[error("Failed to retrieve secret: {0}")] - RetrieveFailed(String), - #[error("AWS SDK error: {0}")] - AwsSdkError(String), + DecodeFailed, } diff --git a/src/utils/keygen.rs b/src/utils/keygen.rs index e5a3ea23..b92d5cd4 100644 --- a/src/utils/keygen.rs +++ b/src/utils/keygen.rs @@ -1,13 +1,21 @@ +use color_eyre::eyre::Report; use p256::{ ecdsa::{SigningKey, VerifyingKey}, pkcs8::{DecodePrivateKey, EncodePrivateKey, LineEnding}, }; -use rand::{rngs::OsRng, TryRngCore}; - -use super::errors::Error; +use rand::{rand_core::OsError, rngs::OsRng, TryRngCore}; +use thiserror::Error; const SECRET_KEY_LENGTH: usize = 32; +#[derive(Debug, Error)] +pub enum Error { + #[error("Failed to generate key: {0}")] + KeyGen(#[from] OsError), + #[error("Failed to parse key: {0}")] + Parsing(#[source] Report), +} + /// A keypair for signing and verifying JWT #[derive(Debug, Clone)] pub struct Keypair { @@ -31,15 +39,12 @@ impl Keypair { Err(err) => { if attempt == MAX_ATTEMPTS - 1 { tracing::error!("Failed to generate random bytes: {err:?}"); - return Err(Error::KeyGenFailed); + return Err(Error::KeyGen(err)); } } } } - let key = SigningKey::from_slice(&seed).map_err(|err| { - tracing::error!("Failed to create signing key: {err:?}"); - Error::KeyGenFailed - })?; + let key = SigningKey::from_slice(&seed).map_err(|e| Error::Parsing(e.into()))?; let keypair = Keypair { repr: KeyRepr { key }, @@ -53,16 +58,14 @@ impl Keypair { } /// Get the verifying key + #[allow(dead_code)] pub fn verifying_key(&self) -> &VerifyingKey { self.repr.key.verifying_key() } - /// Create a keypair from a pkcs8 PEM file + /// Create a keypair from a pkcs8 PEM string pub fn from_pkcs8_pem(pem: &str) -> Result { - let key = SigningKey::from_pkcs8_pem(pem).map_err(|err| { - tracing::error!("Failed to create signing key from PEM: {err:?}"); - Error::KeyGenFailed - })?; + let key = SigningKey::from_pkcs8_pem(pem).map_err(|e| Error::Parsing(e.into()))?; Ok(Keypair { repr: KeyRepr { key }, }) @@ -73,10 +76,7 @@ impl Keypair { self.repr .key .to_pkcs8_pem(LineEnding::default()) - .map_err(|err| { - tracing::error!("Failed to convert signing key to PEM: {err:?}"); - Error::PemGenFailed - }) + .map_err(|e| Error::Parsing(e.into())) .map(|pem| pem.to_string()) } diff --git a/src/utils/lst_gen.rs b/src/utils/lst_gen.rs index 09c68e34..73e80d7a 100644 --- a/src/utils/lst_gen.rs +++ b/src/utils/lst_gen.rs @@ -2,7 +2,7 @@ use base64url::{decode, encode}; use flate2::{read::ZlibDecoder, write::ZlibEncoder, Compression}; use std::io::{Read, Write}; -use crate::model::{Status, StatusEntry, StatusList}; +use crate::models::{Status, StatusEntry, StatusList}; use super::errors::Error; @@ -201,7 +201,7 @@ pub fn update_status_list( let original_bits = current_bits as usize; let new_bits = determine_bits(&status_updates, Some(original_bits))?; - let compressed_data = decode(&existing_lst).map_err(|_| Error::DecodeError)?; + let compressed_data = decode(&existing_lst).map_err(|_| Error::DecodeFailed)?; let mut decoder = ZlibDecoder::new(&compressed_data[..]); let mut status_array = Vec::new(); decoder diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 518e7b7f..a33d3325 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -1,8 +1,6 @@ pub mod bits_validation; -pub mod ca_gen; +pub mod cert_manager; pub mod errors; pub mod keygen; pub mod lst_gen; -pub mod secretmanager; pub mod state; -pub mod x509_parser; diff --git a/src/utils/secretmanager.rs b/src/utils/secretmanager.rs deleted file mode 100644 index 727112b2..00000000 --- a/src/utils/secretmanager.rs +++ /dev/null @@ -1,69 +0,0 @@ -use super::errors::Error; -use async_trait::async_trait; -use aws_config::{BehaviorVersion, Region}; -use aws_sdk_secretsmanager::Client as AwsClient; - -pub struct AwsSecret { - secret_name: String, - _region: Region, - aws_client: AwsClient, -} - -pub struct Secret { - secret_name: String, - secret_value: String, -} - -#[async_trait] -pub trait Operations { - async fn get_key(&self) -> Result, Error>; - - async fn store_key(&self, secret: Secret) -> Result<(), Error>; -} - -impl AwsSecret { - pub async fn new(secret_name: String, _region: Region) -> Self { - let config = aws_config::defaults(BehaviorVersion::v2025_01_17()) - .region(_region.clone()) - .load() - .await; - let aws_client = AwsClient::new(&config); - - Self { - secret_name, - _region, - aws_client, - } - } -} - -#[async_trait] -impl Operations for AwsSecret { - async fn get_key(&self) -> Result, Error> { - let asm = self.aws_client.clone(); - let response = asm - .get_secret_value() - .secret_id(self.secret_name.clone()) - .send() - .await - .map_err(|e| { - tracing::error!(" error getting key: {}", e.to_string()); - Error::Generic("Failed to get key".to_string()) - })?; - Ok(response.secret_string().map(|s| s.to_string())) - } - - async fn store_key(&self, secret: Secret) -> Result<(), Error> { - let asm = self.aws_client.clone(); - asm.create_secret() - .name(secret.secret_name) - .secret_string(secret.secret_value) - .send() - .await - .map_err(|e| { - tracing::error!("error storing key: {}", e.to_string()); - Error::Generic("Failed to store secret".to_string()) - })?; - Ok(()) - } -} diff --git a/src/utils/state.rs b/src/utils/state.rs index 09108909..4daf982b 100644 --- a/src/utils/state.rs +++ b/src/utils/state.rs @@ -1,286 +1,97 @@ -use super::keygen::Keypair; use crate::{ - database::queries::SeaOrmStore, - model::{Credentials, StatusListToken}, - utils::errors::{Error, SecretCacheError}, + cert_manager::{ + challenge::{AwsRoute53DnsUpdater, Dns01Handler}, + storage::{AwsS3, AwsSecretsManager, Redis}, + CertManager, + }, + config::Config as AppConfig, + database::{queries::SeaOrmStore, Migrator}, + models::{Credentials, StatusListToken}, }; -use async_trait::async_trait; -use aws_config::BehaviorVersion; -use aws_sdk_secretsmanager::{ - config::Region, error::ProvideErrorMetadata, Client as SecretsManagerClient, -}; -use aws_secretsmanager_caching::SecretsManagerCachingClient; -use sea_orm::{Database, DatabaseConnection}; +use aws_config::{BehaviorVersion, Region}; +use color_eyre::eyre::{Context, Result as EyeResult}; +use sea_orm::Database; use sea_orm_migration::MigratorTrait; -use std::{num::NonZeroUsize, sync::Arc, time::Duration}; -use tracing::info; - -// Define the SecretCache -#[async_trait] -pub trait SecretCache: Send + Sync { - async fn get_secret_string( - &self, - secret_id: String, - ) -> Result, SecretCacheError>; -} - -// Define a wrapper struct for SecretsManagerCachingClient -pub struct AwsSecretCache { - inner: SecretsManagerCachingClient, -} +use secrecy::ExposeSecret; +use std::sync::Arc; -// Implement SecretCache for AwsSecretCache -#[async_trait] -impl SecretCache for AwsSecretCache { - async fn get_secret_string( - &self, - secret_id: String, - ) -> Result, SecretCacheError> { - match self - .inner - .get_secret_value(&secret_id, None, None, false) - .await - { - Ok(output) => Ok(output.secret_string), - Err(e) => Err(SecretCacheError::AwsSdkError(e.to_string())), - } - } -} - -/// Configuration for secret caching -#[derive(Clone)] -pub struct CacheConfig { - pub enabled: bool, - pub cache_size: NonZeroUsize, - pub ttl: Duration, -} +use super::cert_manager::{challenge::PebbleDnsUpdater, http_client::DefaultHttpClient}; -impl Default for CacheConfig { - fn default() -> Self { - Self { - enabled: true, - cache_size: NonZeroUsize::new(1024).unwrap(), - ttl: Duration::from_secs(3600), - } - } -} - -/// A type that manages server secrets and their caching -#[derive(Clone)] -pub struct SecretManager { - cache: Arc, - client: Arc, - server_secret_name: String, - cache_config: CacheConfig, -} - -impl SecretManager { - pub fn new( - cache: impl SecretCache + 'static, - client: SecretsManagerClient, - server_secret_name: String, - cache_config: CacheConfig, - ) -> Self { - Self { - cache: Arc::new(cache), - client: Arc::new(client), - server_secret_name, - cache_config, - } - } - - pub async fn get_secret_from_cache_or_aws( - &self, - secret_name: String, - use_cache: bool, - ) -> Result, SecretCacheError> { - if use_cache && self.cache_config.enabled { - self.cache.get_secret_string(secret_name).await - } else { - let result = self - .client - .get_secret_value() - .secret_id(secret_name) - .send() - .await; - match result { - Ok(output) => Ok(output.secret_string().map(String::from)), - Err(e) => Err(SecretCacheError::AwsSdkError(e.to_string())), - } - } - } - - pub async fn get_server_secret(&self) -> Result, SecretCacheError> { - if self.cache_config.enabled { - self.cache - .get_secret_string(self.server_secret_name.clone()) - .await - } else { - let result = self - .client - .get_secret_value() - .secret_id(self.server_secret_name.clone()) - .send() - .await; - match result { - Ok(output) => Ok(output.secret_string().map(String::from)), - Err(e) => Err(SecretCacheError::AwsSdkError(e.to_string())), - } - } - } - - /// Creates a new secret with the given name and value - pub async fn create_secret(&self, name: String, value: String) -> Result<(), Error> { - self.client - .create_secret() - .name(name) - .secret_string(value) - .send() - .await - .map_err(|e| Error::Generic(e.to_string()))?; - Ok(()) - } - - /// Deletes a secret by name - pub async fn delete_secret(&self, name: String) -> Result<(), Error> { - self.client - .delete_secret() - .secret_id(name) - .force_delete_without_recovery(true) - .send() - .await - .map_err(|e| Error::Generic(e.to_string()))?; - Ok(()) - } - - /// Updates the cache configuration - pub fn update_cache_config(&mut self, config: CacheConfig) { - self.cache_config = config; - } - - /// Creates a new SecretManager instance and ensures the server secret exists - pub async fn setup( - secret_name: String, - region: String, - cache_config: Option, - ) -> Result { - let config = aws_config::load_defaults(BehaviorVersion::latest()) - .await - .into_builder() - .region(Region::new(region)) - .build(); - let client = SecretsManagerClient::new(&config); - - // Ensure secret exists - match client - .describe_secret() - .secret_id(secret_name.clone()) - .send() - .await - { - Ok(e) => { - tracing::info!("Server key secret already exists in AWS Secrets Manager"); - info!("{}", e.name().unwrap_or_default()); - } - Err(e) => { - let error_message = e.to_string(); - let service_error = e.into_service_error(); - if service_error.code() == Some("ResourceNotFoundException") { - tracing::info!( - "No server key secret found in AWS, generating and storing new keypair" - ); - let keypair = Keypair::generate().map_err(|_| Error::KeyGenFailed)?; - let pem = keypair.to_pkcs8_pem().map_err(|_| Error::PemGenFailed)?; - client - .create_secret() - .name(secret_name.clone()) - .secret_string(pem) - .send() - .await - .map_err(|e| Error::Generic(e.to_string()))?; - } else { - tracing::error!("Error describing secret: {:?}", error_message); - return Err(Error::Generic(error_message)); - } - } - } - - let cache_config = cache_config.unwrap_or_default(); - let asm_builder = aws_sdk_secretsmanager::config::Builder::from(&config); - let caching_client = SecretsManagerCachingClient::from_builder( - asm_builder, - if cache_config.enabled { - cache_config.cache_size - } else { - NonZeroUsize::new(1).unwrap() // Minimal cache size when disabled - }, - if cache_config.enabled { - cache_config.ttl - } else { - Duration::from_secs(1) // Minimal TTL when disabled - }, - false, - ) - .await - .map_err(|e| Error::Generic(e.to_string()))?; - - let cache = AwsSecretCache { - inner: caching_client, - }; - - Ok(Self::new(cache, client, secret_name, cache_config)) - } -} +// Could also be passed at runtime through environment variable +const BUCKET_NAME: &str = "status-list-adorsys"; +const ENV_PRODUCTION: &str = "production"; +const ENV_DEVELOPMENT: &str = "development"; #[derive(Clone)] pub struct AppState { - pub credential_repository: Arc>, - pub status_list_token_repository: Arc>, - pub secret_manager: Arc, - pub server_public_domain: String, + pub credential_repo: SeaOrmStore, + pub status_list_token_repo: SeaOrmStore, + pub server_domain: String, + pub cert_manager: Arc, } -pub async fn setup() -> AppState { - let url = std::env::var("DATABASE_URL").expect("DATABASE_URL env not set"); - let db: DatabaseConnection = Database::connect(&url) +pub async fn build_state(config: &AppConfig) -> EyeResult { + let db = Database::connect(config.database.url.expose_secret()) .await - .expect("Failed to connect to database"); + .wrap_err("Failed to connect to database")?; - crate::database::Migrator::up(&db, None) + Migrator::up(&db, None) .await - .expect("Failed to apply migrations"); + .wrap_err("Failed to run database migrations")?; - let secret_name = - std::env::var("SERVER_KEY_SECRET_NAME").expect("SERVER_KEY_SECRET_NAME env not set"); - let region = std::env::var("AWS_REGION").expect("AWS_REGION env not set"); + let aws_config = aws_config::defaults(BehaviorVersion::latest()) + .region(Region::new(config.aws.region.clone())) + .load() + .await; - let secret_manager = SecretManager::setup(secret_name, region, None) + let redis_conn = config + .redis + .start(None, None, None) .await - .expect("Failed to setup secret manager"); - - let server_public_domain = - std::env::var("SERVER_PUBLIC_DOMAIN").expect("SERVER_PUBLIC_DOMAIN env not set"); - - let db = Arc::new(db); - AppState { - credential_repository: Arc::new(SeaOrmStore::new(Arc::clone(&db))), - status_list_token_repository: Arc::new(SeaOrmStore::new(Arc::clone(&db))), - secret_manager: Arc::new(secret_manager), - server_public_domain, + .wrap_err("Failed to connect to Redis")?; + + // Initialize the challenge handler based on the environment. + // Use a fake DNS server to validate the challenge in development. + let app_env = std::env::var("APP_ENV").unwrap_or(ENV_DEVELOPMENT.to_string()); + let challenge_handler = if app_env == ENV_PRODUCTION { + let updater = AwsRoute53DnsUpdater::new(&aws_config); + Dns01Handler::new(updater) + } else { + // Use pebble as the DNS server in development + let updater = PebbleDnsUpdater::new("http://challtestsrv:8055"); + Dns01Handler::new(updater) + }; + + // Initialize the storage backends for the certificate manager + let cache = Redis::new(redis_conn.clone()); + let cert_storage = AwsS3::new(&aws_config, BUCKET_NAME).with_cache(cache); + let secrets_storage = AwsSecretsManager::new(&aws_config).await?; + + let mut certificate_manager = CertManager::new( + [&config.server.domain], + &config.server.cert.email, + config.server.cert.organization.as_deref(), + &config.server.cert.acme_directory_url, + )? + .with_cert_storage(cert_storage) + .with_secrets_storage(secrets_storage) + .with_challenge_handler(challenge_handler) + .with_eku(&config.server.cert.eku); + + if app_env == ENV_DEVELOPMENT { + // Override the default HTTP client to use the pebble root certificate + // It is necessary to avoid https errors since pebble uses localhost over https + // with a self-signed root certificate + let root_cert = include_bytes!("../test_resources/pebble.pem"); + let http_client = DefaultHttpClient::new(Some(root_cert))?; + certificate_manager = certificate_manager.with_acme_http_client(http_client); } -} -impl AppState { - /// Helper to set up AppState with a test credential for the given issuer - pub async fn setup_test_with_credential( - issuer: &str, - public_key: &str, - alg: jsonwebtoken::Algorithm, - ) -> Self { - let state = setup().await; - let creds = crate::model::Credentials::new(issuer.to_string(), public_key.to_string(), alg); - // Ignore error if already exists - let _ = state.credential_repository.insert_one(creds).await; - state - } + let db_clone = Arc::new(db); + Ok(AppState { + credential_repo: SeaOrmStore::new(db_clone.clone()), + status_list_token_repo: SeaOrmStore::new(db_clone), + server_domain: config.server.domain.clone(), + cert_manager: Arc::new(certificate_manager), + }) } diff --git a/src/utils/x509_parser.rs b/src/utils/x509_parser.rs deleted file mode 100644 index f6716acd..00000000 --- a/src/utils/x509_parser.rs +++ /dev/null @@ -1,71 +0,0 @@ -use std::fs; -use std::path::Path; - -use pem::parse; - -use super::errors::Error; - -/// Loads a certificate from a PEM file and returns the DER bytes -#[inline] -pub fn load_certificate_der>(cert_path: P) -> Result, Error> { - let path = cert_path.as_ref(); - - // Check file extension is .pem - if path - .extension() - .and_then(|s| s.to_str()) - .and_then(|s| check(s, "pem")()) - .is_none() - { - Err(Error::InvalidFileType)?; - } - // Read the PEM-encoded certificate file - let cert_pem = - fs::read_to_string(path).map_err(|_| Error::ReadCertificate(path.to_path_buf()))?; - - // Parse the PEM - let pem = parse(cert_pem).map_err(|_| Error::ParseFailed)?; - - if pem.tag() != "CERTIFICATE" { - return Err(Error::PermFailed); - } - - // Return the DER bytes - Ok(pem.contents().to_vec()) -} - -#[inline] -fn check(s: &str, right: &str) -> impl FnOnce() -> Option { - let s = s.to_string(); - let right = right.to_string(); - move || { - if s == right { - Some(true) - } else { - None - } - } -} - -mod test { - - #[test] - fn test_certificates() { - let test_certs = vec![ - "./src/test_resources/test_cert.pem", - "./src/test_resources/test_cert2.pem", - ]; - - for cert_path in test_certs { - let res = crate::utils::x509_parser::load_certificate_der(cert_path).ok(); - assert!(res.is_some(), "Failed to parse {}", cert_path); - } - } - #[test] - fn test_der_encoded_certificate_content() { - // Pretend this is DER content (binary-like junk data in a .pem) - let test_cert = "./src/test_resources/test_cert.der"; - let res = crate::utils::x509_parser::load_certificate_der(test_cert).ok(); - assert!(res.is_none()); - } -} diff --git a/src/web/handlers/credential_issuance.rs b/src/web/handlers/credential_issuance.rs index f0b557ef..2791f19b 100644 --- a/src/web/handlers/credential_issuance.rs +++ b/src/web/handlers/credential_issuance.rs @@ -2,7 +2,7 @@ use axum::{extract::State, http::StatusCode, response::IntoResponse, Json}; use crate::{ auth::authentication::publish_credentials, database::error::RepositoryError, - model::Credentials, utils::state::AppState, + models::Credentials, utils::state::AppState, }; pub async fn credential_handler( diff --git a/src/web/handlers.rs b/src/web/handlers/mod.rs similarity index 59% rename from src/web/handlers.rs rename to src/web/handlers/mod.rs index 21db1f49..d2716264 100644 --- a/src/web/handlers.rs +++ b/src/web/handlers/mod.rs @@ -1,8 +1,10 @@ -mod credential_issuance; +pub mod credential_issuance; pub mod status_list; +pub mod status_list_aggregation; pub use credential_issuance::credential_handler; pub use status_list::{ error::StatusListError, handler::{get_status_list, update_statuslist}, }; +pub use status_list_aggregation::aggregate_status_lists; diff --git a/src/web/handlers/status_list/constants.rs b/src/web/handlers/status_list/constants.rs index f7a0601b..bf7349fb 100644 --- a/src/web/handlers/status_list/constants.rs +++ b/src/web/handlers/status_list/constants.rs @@ -1,7 +1,7 @@ -pub(super) const ACCEPT_STATUS_LISTS_HEADER_JWT: &str = "application/statuslist+jwt"; -pub(super) const ACCEPT_STATUS_LISTS_HEADER_CWT: &str = "application/statuslist+cwt"; -pub(super) const STATUS_LISTS_HEADER_JWT: &str = "statuslist+jwt"; -pub(super) const STATUS_LISTS_HEADER_CWT: &str = "statuslist+cwt"; +pub const ACCEPT_STATUS_LISTS_HEADER_JWT: &str = "application/statuslist+jwt"; +pub const ACCEPT_STATUS_LISTS_HEADER_CWT: &str = "application/statuslist+cwt"; +pub const STATUS_LISTS_HEADER_JWT: &str = "statuslist+jwt"; +pub const STATUS_LISTS_HEADER_CWT: &str = "statuslist+cwt"; // CBOR Web Token (CWT) constants pub(super) const CWT_TYPE: i64 = 16; @@ -10,5 +10,3 @@ pub(super) const ISSUED_AT: i32 = 6; pub(super) const EXP: i32 = 4; pub(super) const TTL: i32 = 65534; pub(super) const STATUS_LIST: i32 = 65533; - -pub(super) const GZIP_HEADER: &str = "gzip"; diff --git a/src/web/handlers/status_list/error.rs b/src/web/handlers/status_list/error.rs index e4d7d4a5..20f95010 100644 --- a/src/web/handlers/status_list/error.rs +++ b/src/web/handlers/status_list/error.rs @@ -33,6 +33,8 @@ pub enum StatusListError { TokenAlreadyExists, #[error("Issuer mismatch")] IssuerMismatch, + #[error("The service is currently unavailable. Please try again later")] + ServiceUnavailable, } impl IntoResponse for StatusListError { @@ -54,6 +56,7 @@ impl IntoResponse for StatusListError { Forbidden(_) => StatusCode::FORBIDDEN, TokenAlreadyExists => StatusCode::CONFLICT, IssuerMismatch => StatusCode::FORBIDDEN, + ServiceUnavailable => StatusCode::SERVICE_UNAVAILABLE, }; (status_code, self.to_string()).into_response() diff --git a/src/web/handlers/status_list/handler.rs b/src/web/handlers/status_list/handler.rs index c21ee753..ef533720 100644 --- a/src/web/handlers/status_list/handler.rs +++ b/src/web/handlers/status_list/handler.rs @@ -1,4 +1,4 @@ -use std::{fmt::Debug, io::Write as _, sync::Arc}; +use std::fmt::Debug; use axum::{ extract::{Path, State}, @@ -8,25 +8,26 @@ use axum::{ }; use chrono::Utc; use coset::{ - self, cbor::Value as CborValue, iana::Algorithm, CborSerializable, CoseSign1Builder, - HeaderBuilder, + self, + cbor::Value as CborValue, + iana::{Algorithm, EnumI64, HeaderParameter}, + CborSerializable, CoseSign1Builder, HeaderBuilder, }; -use flate2::{write::GzEncoder, Compression}; use jsonwebtoken::{EncodingKey, Header}; use p256::ecdsa::{signature::Signer, Signature}; use serde::{Deserialize, Serialize}; use serde_json::Value; use crate::{ - model::{Status, StatusEntry, StatusList, StatusListToken}, + models::{Status, StatusEntry, StatusList, StatusListToken}, utils::{keygen::Keypair, state::AppState}, web::midlw::AuthenticatedIssuer, }; use super::{ constants::{ - ACCEPT_STATUS_LISTS_HEADER_CWT, ACCEPT_STATUS_LISTS_HEADER_JWT, CWT_TYPE, EXP, GZIP_HEADER, - ISSUED_AT, STATUS_LIST, STATUS_LISTS_HEADER_CWT, STATUS_LISTS_HEADER_JWT, SUBJECT, TTL, + ACCEPT_STATUS_LISTS_HEADER_CWT, ACCEPT_STATUS_LISTS_HEADER_JWT, CWT_TYPE, EXP, ISSUED_AT, + STATUS_LIST, STATUS_LISTS_HEADER_CWT, STATUS_LISTS_HEADER_JWT, SUBJECT, TTL, }, error::StatusListError, }; @@ -65,6 +66,7 @@ impl StatusListTokenExt for StatusListToken { } } + pub async fn get_status_list( State(state): State, Path(list_id): Path, @@ -72,22 +74,17 @@ pub async fn get_status_list( ) -> Result { let accept = headers.get(header::ACCEPT).and_then(|h| h.to_str().ok()); - // build the token depending on the accept header - match accept { - None => - // assume jwt by default if no accept header is provided - { - build_status_list_token(ACCEPT_STATUS_LISTS_HEADER_JWT, &list_id, &state).await - } + // Validate accept header + let accept = match accept { + None => ACCEPT_STATUS_LISTS_HEADER_JWT, // Default to JWT if no accept header Some(accept) if accept == ACCEPT_STATUS_LISTS_HEADER_JWT || accept == ACCEPT_STATUS_LISTS_HEADER_CWT => { - build_status_list_token(accept, &list_id, &state).await + accept } - Some(_) => Err(StatusListError::InvalidAcceptHeader), - } -} + Some(_) => return Err(StatusListError::InvalidAcceptHeader), + }; async fn build_status_list_token( accept: &str, @@ -100,7 +97,7 @@ async fn build_status_list_token( .find_one_by(list_id.to_string()) .await .map_err(|err| { - tracing::error!("Failed to get status list {list_id} from database: {err:?}"); + tracing::error!("Failed to get status list {list_id_clone} from database: {err:?}"); StatusListError::InternalServerError })?; @@ -139,8 +136,8 @@ async fn build_status_list_token( }; let token_bytes = match accept { - ACCEPT_STATUS_LISTS_HEADER_CWT => issue_cwt(&status_claims, &server_key)?, - _ => issue_jwt(&status_claims, &server_key)?.into_bytes(), + ACCEPT_STATUS_LISTS_HEADER_CWT => issue_cwt(&status_claims, &server_key, vec![])?, + _ => issue_jwt(&status_claims, &server_key, vec![])?.into_bytes(), }; Ok(( @@ -155,7 +152,11 @@ async fn build_status_list_token( } // Function to create a CWT per the specification -fn issue_cwt(token: &StatusListToken, server_key: &Keypair) -> Result, StatusListError> { +fn issue_cwt( + token: &StatusListToken, + keypair: &Keypair, + cert_chain: Vec, +) -> Result, StatusListError> { let mut claims = vec![]; // Building the claims @@ -206,13 +207,15 @@ fn issue_cwt(token: &StatusListToken, server_key: &Keypair) -> Result, S StatusListError::InternalServerError })?; + let x5chain_value = build_x5chain(&cert_chain)?; // Building the protected header let protected = HeaderBuilder::new() .algorithm(Algorithm::ES256) + .value(HeaderParameter::X5Chain.to_i64(), x5chain_value) .value(CWT_TYPE, CborValue::Text(STATUS_LISTS_HEADER_CWT.into())) .build(); - let signing_key = server_key.signing_key(); + let signing_key = keypair.signing_key(); // Building the CWT let sign1 = CoseSign1Builder::new() @@ -232,6 +235,28 @@ fn issue_cwt(token: &StatusListToken, server_key: &Keypair) -> Result, S Ok(cwt_bytes) } +fn build_x5chain(cert_chain: &[String]) -> Result { + use base64::prelude::{Engine as _, BASE64_STANDARD}; + + let result: Result>, _> = cert_chain + .iter() + .map(|b64| BASE64_STANDARD.decode(b64)) + .collect(); + let certs_der = result.map_err(|err| { + tracing::error!("Failed to decode certificate chain to DER: {err:?}"); + StatusListError::InternalServerError + })?; + + let x5chain_value = if certs_der.len() == 1 { + CborValue::Bytes(certs_der.into_iter().next().unwrap()) + } else { + let cert_array: Vec = certs_der.into_iter().map(CborValue::Bytes).collect(); + CborValue::Array(cert_array) + }; + + Ok(x5chain_value) +} + #[derive(Clone, Debug, Serialize, Deserialize)] pub struct StatusListClaims { pub exp: Option, @@ -241,7 +266,11 @@ pub struct StatusListClaims { pub ttl: Option, } -fn issue_jwt(token: &StatusListToken, server_key: &Keypair) -> Result { +fn issue_jwt( + token: &StatusListToken, + keypair: &Keypair, + cert_chain: Vec, +) -> Result { let iat = Utc::now().timestamp(); let ttl = token.ttl.unwrap_or(43200); // Building the claims @@ -255,8 +284,9 @@ fn issue_jwt(token: &StatusListToken, server_key: &Keypair) -> Result Result>, + State(appstate): State, Path(list_id): Path, AuthenticatedIssuer(issuer): AuthenticatedIssuer, Json(body): Json, @@ -324,7 +354,7 @@ pub async fn update_statuslist( } }; - let store = &appstate.status_list_token_repository; + let store = &appstate.status_list_token_repo; let status_list_token = match store.find_one_by(list_id.clone()).await { Ok(token) => token, @@ -340,7 +370,7 @@ pub async fn update_statuslist( if let Some(status_list_token) = status_list_token { // Ownership check: only the owner (token.sub) can update - if status_list_token.sub != issuer { + if status_list_token.issuer != issuer { return ( StatusCode::FORBIDDEN, StatusListError::Forbidden("Issuer does not own this list".to_string()), @@ -429,12 +459,44 @@ fn update_status(lst: &str, updates: Vec) -> Result Result { + let server_key = Keypair::from_pkcs8_pem( + &state + .secret_manager + .get_server_secret() + .await + .map_err(|_| StatusListError::InternalServerError)? + .unwrap(), + ) + .map_err(|_| StatusListError::InternalServerError)?; + + if ACCEPT_STATUS_LISTS_HEADER_JWT == accept { + Ok(( + StatusCode::OK, + [(header::CONTENT_TYPE, accept)], + issue_jwt(token, &server_key, vec![])? + ) + .into_response()) + } else { + Ok(( + StatusCode::OK, + [(header::CONTENT_TYPE, accept)], + issue_cwt(token, &server_key, vec![])? + ) + .into_response()) + } +} + #[cfg(test)] mod tests { use super::*; use crate::{ - model::{status_list_tokens, StatusList, StatusListToken}, - test_utils::test::test_app_state, + models::{status_list_tokens, StatusList, StatusListToken}, + test_utils::test_app_state, }; use axum::{ body::to_bytes, @@ -444,10 +506,8 @@ mod tests { }; use coset::CoseSign1; use jsonwebtoken::{DecodingKey, Validation}; - use p256::{ - ecdsa::{signature::Verifier, VerifyingKey}, - pkcs8::{EncodePublicKey, LineEnding}, - }; + use p256::ecdsa::{signature::Verifier, VerifyingKey}; + use p256::pkcs8::{EncodePublicKey, LineEnding}; use sea_orm::{DatabaseBackend, MockDatabase}; use serde_json::json; use std::{io::Read, sync::Arc}; @@ -476,7 +536,7 @@ mod tests { .into_connection(), ); - let app_state = test_app_state(db_conn.clone()); + let app_state = test_app_state(Some(db_conn.clone())).await; let mut headers = HeaderMap::new(); headers.insert( @@ -503,15 +563,10 @@ mod tests { decoder.read_to_end(&mut body_bytes).unwrap(); let body_str = std::str::from_utf8(&body_bytes).unwrap(); - // Load the key from the cache - let pem = app_state - .secret_manager - .get_server_secret() - .await - .unwrap() - .unwrap(); - let server_key = Keypair::from_pkcs8_pem(&pem).unwrap(); - let decoding_key_pem = server_key + // Load the decoding key + let signing_key_pem = app_state.cert_manager.signing_key_pem().await.unwrap(); + let keypair = Keypair::from_pkcs8_pem(&signing_key_pem).unwrap(); + let decoding_key_pem = keypair .verifying_key() .to_public_key_pem(LineEnding::default()) .unwrap() @@ -551,12 +606,12 @@ mod tests { let db_conn = Arc::new( mock_db .append_query_results::, _>(vec![vec![ - status_list_token.clone(), + status_list_token, ]]) .into_connection(), ); - let app_state = test_app_state(db_conn.clone()); + let app_state = test_app_state(Some(db_conn.clone())).await; let mut headers = HeaderMap::new(); headers.insert( @@ -585,14 +640,9 @@ mod tests { let cwt = CoseSign1::from_slice(&body_bytes).unwrap(); // Load the key from the cache - let pem = app_state - .secret_manager - .get_server_secret() - .await - .unwrap() - .unwrap(); - let server_key = Keypair::from_pkcs8_pem(&pem).unwrap(); - let signing_key = server_key.signing_key(); + let signing_key_pem = app_state.cert_manager.signing_key_pem().await.unwrap(); + let keypair = Keypair::from_pkcs8_pem(&signing_key_pem).unwrap(); + let signing_key = keypair.signing_key(); let verifying_key = VerifyingKey::from(signing_key); // Verify signature @@ -663,7 +713,7 @@ mod tests { .into_connection(), ); - let app_state = test_app_state(db_conn.clone()); + let app_state = test_app_state(Some(db_conn.clone())).await; let mut headers = HeaderMap::new(); headers.insert( @@ -682,10 +732,7 @@ mod tests { #[tokio::test] async fn test_get_status_list_unsupported_accept_header() { - let mock_db = MockDatabase::new(DatabaseBackend::Postgres); - let db_conn = Arc::new(mock_db.into_connection()); - - let app_state = test_app_state(db_conn.clone()); + let app_state = test_app_state(None).await; let mut headers = HeaderMap::new(); headers.insert(http::header::ACCEPT, "application/xml".parse().unwrap()); // unsupported @@ -742,7 +789,7 @@ mod tests { .into_connection(), ); - let app_state = test_app_state(db_conn.clone()); + let app_state = test_app_state(Some(db_conn.clone())).await; let update_body = json!({ "updates": [ @@ -771,7 +818,7 @@ mod tests { .into_connection(), ); - let app_state = test_app_state(db_conn.clone()); + let app_state = test_app_state(Some(db_conn.clone())).await; let update_body = json!({ "updates": [ diff --git a/src/web/handlers/status_list/mod.rs b/src/web/handlers/status_list/mod.rs index 9b7b706a..59defd1c 100644 --- a/src/web/handlers/status_list/mod.rs +++ b/src/web/handlers/status_list/mod.rs @@ -1,5 +1,7 @@ -pub(super) mod constants; -pub(super) mod error; -pub(crate) mod handler; +pub mod constants; +pub mod error; +pub mod handler; pub mod publish_token_status; pub mod update_token_status; + +pub use publish_token_status::publish_token_status; diff --git a/src/web/handlers/status_list/publish_token_status.rs b/src/web/handlers/status_list/publish_token_status.rs index 794edb18..ca0c62c0 100644 --- a/src/web/handlers/status_list/publish_token_status.rs +++ b/src/web/handlers/status_list/publish_token_status.rs @@ -1,5 +1,5 @@ use crate::{ - model::{StatusList, StatusListToken, StatusRequest}, + models::{StatusList, StatusListToken, StatusRequest}, utils::{errors::Error, lst_gen::create_status_list, state::AppState}, web::handlers::status_list::error::StatusListError, web::midlw::AuthenticatedIssuer, @@ -18,14 +18,13 @@ pub async fn publish_token_status( AuthenticatedIssuer(issuer): AuthenticatedIssuer, Json(payload): Json, ) -> Result { - let store = &appstate.status_list_token_repository; + let store = &appstate.status_list_token_repo; let stl = create_status_list(payload.status).map_err(|e| { tracing::error!("lst_from failed: {:?}", e); match e { Error::Generic(msg) => StatusListError::Generic(msg), Error::InvalidIndex => StatusListError::InvalidIndex, - Error::UnsupportedBits => StatusListError::UnsupportedBits, _ => StatusListError::Generic(e.to_string()), } })?; @@ -49,15 +48,10 @@ pub async fn publish_token_status( lst: stl.lst, }; - // TODO: This field is used elsewhere to link status list tokens - // to issuers, hence the existence of a Foreign Key. We'll maybe - // have to switch to another field for this purpose. - // - // let sub = format!( - // "https://{}/statuslist/{}", - // appstate.server_public_domain, payload.list_id - // ); - let sub = issuer.clone(); + let sub = format!( + "https://{}/statuslist/{}", + appstate.server_domain, payload.list_id + ); // Build the new status list token let new_status_list_token = StatusListToken { @@ -88,9 +82,9 @@ pub async fn publish_token_status( mod tests { use super::*; use crate::{ - model::{status_list_tokens, Status, StatusEntry, StatusListToken}, + models::{status_list_tokens, Status, StatusEntry, StatusListToken}, test_resources::helper::publish_test_token, - test_utils::test::test_app_state, + test_utils::test_app_state, }; use axum::{extract::State, Json}; use sea_orm::{DatabaseBackend, MockDatabase}; @@ -145,7 +139,7 @@ mod tests { .into_connection(), ); - let app_state = test_app_state(db_conn.clone()); + let app_state = test_app_state(Some(db_conn.clone())).await; let response = publish_token_status( State(app_state), @@ -208,7 +202,7 @@ mod tests { .into_connection(), ); - let app_state = test_app_state(db_conn.clone()); + let app_state = test_app_state(Some(db_conn.clone())).await; // Perform the insertion let _ = publish_token_status( @@ -221,7 +215,7 @@ mod tests { // Verify the token is stored let result = app_state - .status_list_token_repository + .status_list_token_repo .find_one_by(token_id.to_string()) .await .unwrap(); @@ -265,7 +259,7 @@ mod tests { .into_connection(), ); - let app_state = test_app_state(db_conn.clone()); + let app_state = test_app_state(Some(db_conn.clone())).await; let response = match publish_token_status( State(app_state), @@ -317,7 +311,7 @@ mod tests { .into_connection(), ); - let app_state = test_app_state(db_conn.clone()); + let app_state = test_app_state(Some(db_conn.clone())).await; let response = publish_token_status( State(app_state.clone()), @@ -330,7 +324,7 @@ mod tests { assert_eq!(response.status(), StatusCode::CREATED); let result = app_state - .status_list_token_repository + .status_list_token_repo .find_one_by(token_id.to_string()) .await .unwrap(); @@ -383,7 +377,7 @@ mod tests { ]) .into_connection(), ); - let app_state = test_app_state(db_conn.clone()); + let app_state = test_app_state(Some(db_conn.clone())).await; let response = publish_token_status( State(app_state), @@ -408,7 +402,7 @@ mod tests { }], ); let db_conn = Arc::new(mock_db.into_connection()); - let app_state = test_app_state(db_conn.clone()); + let app_state = test_app_state(Some(db_conn.clone())).await; let response = match publish_token_status( State(app_state), diff --git a/src/web/handlers/status_list/update_token_status.rs b/src/web/handlers/status_list/update_token_status.rs index 01e06281..0b6a1d42 100644 --- a/src/web/handlers/status_list/update_token_status.rs +++ b/src/web/handlers/status_list/update_token_status.rs @@ -2,7 +2,7 @@ use axum::{extract::State, response::IntoResponse, Json}; use hyper::StatusCode; use crate::{ - model::StatusRequest, + models::StatusRequest, utils::{ bits_validation::BitFlag, errors::Error, lst_gen::update_status_list, state::AppState, }, @@ -17,7 +17,7 @@ pub async fn update_token_status( AuthenticatedIssuer(issuer): AuthenticatedIssuer, Json(payload): Json, ) -> Result { - let store = &appstate.status_list_token_repository; + let store = &appstate.status_list_token_repo; // Fetch the existing token let token = match store.find_one_by(payload.list_id.clone()).await { @@ -62,7 +62,6 @@ pub async fn update_token_status( match e { Error::Generic(msg) => StatusListError::Generic(msg), Error::InvalidIndex => StatusListError::InvalidIndex, - Error::UnsupportedBits => StatusListError::UnsupportedBits, _ => StatusListError::Generic(e.to_string()), } })?; @@ -95,10 +94,10 @@ mod test { use sea_orm::{DatabaseBackend, MockDatabase}; use crate::{ - model::{ + models::{ status_list_tokens, Status, StatusEntry, StatusList, StatusListToken, StatusRequest, }, - test_utils::test::test_app_state, + test_utils::test_app_state, utils::lst_gen::create_status_list, web::{ handlers::status_list::update_token_status::update_token_status, @@ -166,7 +165,7 @@ mod test { .into_connection(), ); - let app_state = test_app_state(db_conn.clone()); + let app_state = test_app_state(Some(db_conn.clone())).await; let response = update_token_status( State(app_state), AuthenticatedIssuer("issuer".to_string()), diff --git a/src/web/handlers/status_list_aggregation.rs b/src/web/handlers/status_list_aggregation.rs new file mode 100644 index 00000000..7ee8a8ee --- /dev/null +++ b/src/web/handlers/status_list_aggregation.rs @@ -0,0 +1,315 @@ +use std::fmt::Debug; +use std::sync::Arc; + +use axum::{ + extract::{Json, State}, + http::{header, HeaderMap}, + response::IntoResponse, +}; +use serde::{Deserialize, Serialize}; +use tracing; + +use crate::{ + model::{StatusList, StatusListToken}, + utils::state::AppState, + web::handlers::status_list::{ + constants::{ACCEPT_STATUS_LISTS_HEADER_CWT, ACCEPT_STATUS_LISTS_HEADER_JWT}, + error::StatusListError, + handler::build_status_list_token, + }, +}; + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + model::{status_list_tokens, StatusList}, + test_utils::test::test_app_state, + }; + use axum::http::StatusCode; + use sea_orm::{DatabaseBackend, MockDatabase}; + use std::sync::Arc; + + // Helper to create a test request payload + fn create_test_request(list_ids: Vec) -> StatusListAggregationRequest { + StatusListAggregationRequest { list_ids } + } + + #[tokio::test] + async fn test_aggregate_status_lists_success() { + let mock_db = MockDatabase::new(DatabaseBackend::Postgres); + let list_id1 = "list1"; + let list_id2 = "list2"; + let request = create_test_request(vec![list_id1.to_string(), list_id2.to_string()]); + + let status_list1 = StatusListToken::new( + list_id1.to_string(), + None, + chrono::Utc::now().timestamp(), + StatusList { + bits: 2, + lst: "abc".to_string(), + }, + "issuer1".to_string(), + None, + ); + + let status_list2 = StatusListToken::new( + list_id2.to_string(), + None, + chrono::Utc::now().timestamp(), + StatusList { + bits: 2, + lst: "def".to_string(), + }, + "issuer2".to_string(), + None, + ); + + let db_conn = Arc::new( + mock_db + .append_query_results::, _>(vec![ + vec![status_list1.clone()], // find_one_by for list1 + vec![status_list2.clone()], // find_one_by for list2 + ]) + .into_connection(), + ); + + let app_state = Arc::new(test_app_state(db_conn)); + + let headers = HeaderMap::new(); + let response = aggregate_status_lists(State(app_state), headers, Json(request)) + .await + .unwrap() + .into_response(); + assert_eq!(response.status(), StatusCode::OK); + } + + #[tokio::test] + async fn test_aggregate_status_lists_not_found() { + let mock_db = MockDatabase::new(DatabaseBackend::Postgres); + let list_id1 = "list1"; + let list_id2 = "list2"; + let request = create_test_request(vec![list_id1.to_string(), list_id2.to_string()]); + + let db_conn = Arc::new( + mock_db + .append_query_results::, _>(vec![ + vec![], // find_one_by for list1 returns None + ]) + .into_connection(), + ); + + let app_state = Arc::new(test_app_state(db_conn)); + + let headers = HeaderMap::new(); + let response = aggregate_status_lists(State(app_state), headers, Json(request)) + .await + .unwrap_err() + .into_response(); + assert_eq!(response.status(), StatusCode::NOT_FOUND); + } + + #[tokio::test] + async fn test_aggregate_status_lists_invalid_accept_header() { + let mock_db = MockDatabase::new(DatabaseBackend::Postgres); + let list_id1 = "list1"; + let list_id2 = "list2"; + let request = create_test_request(vec![list_id1.to_string(), list_id2.to_string()]); + + let db_conn = Arc::new( + mock_db + .append_query_results::, _>(vec![ + vec![], // find_one_by for list1 returns None + ]) + .into_connection(), + ); + + let app_state = Arc::new(test_app_state(db_conn)); + + let mut headers = HeaderMap::new(); + headers.insert(header::ACCEPT, "invalid/format".parse().unwrap()); + let response = aggregate_status_lists(State(app_state), headers, Json(request)) + .await + .unwrap_err() + .into_response(); + assert_eq!(response.status(), StatusCode::NOT_ACCEPTABLE); + } + + #[tokio::test] + async fn test_aggregate_status_lists_empty_list() { + let mock_db = MockDatabase::new(DatabaseBackend::Postgres); + let request = create_test_request(vec![]); + + let db_conn = Arc::new( + mock_db + .append_query_results::, _>(vec![ + vec![], // find_one_by returns None + ]) + .into_connection(), + ); + + let app_state = Arc::new(test_app_state(db_conn)); + + let headers = HeaderMap::new(); + let response = aggregate_status_lists(State(app_state), headers, Json(request)) + .await + .unwrap_err() + .into_response(); + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + } + + #[tokio::test] + async fn test_update_statuslist_success() { + let _mock_db = MockDatabase::new(DatabaseBackend::Postgres); + let initial_status_list = StatusList { + bits: 8, + lst: encode_lst(vec![0, 0, 0]), + }; + let _existing_token = StatusListToken::new( + "test_list".to_string(), + None, + 1234567890, + initial_status_list.clone(), + "test_subject".to_string(), + None, + ); + let updated_status_list = StatusList { + bits: 8, + lst: encode_lst(vec![0, 1, 0]), // After update: index 1 set to INVALID + }; + let _updated_token = StatusListToken::new( + "test_list".to_string(), + None, + 1234567890, + updated_status_list, + "test_subject".to_string(), + None, + ); + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct StatusListAggregationRequest { + pub list_ids: Vec, +} + +#[allow(dead_code)] +fn encode_lst(bits: Vec) -> String { + base64url::encode( + bits.iter() + .flat_map(|&n| n.to_be_bytes()) + .collect::>(), + ) +} + +pub async fn aggregate_status_lists( + State(state): State>, + headers: HeaderMap, + Json(request): Json, +) -> Result { + let accept = headers.get(header::ACCEPT).and_then(|h| h.to_str().ok()); + + // Validate accept header + let accept = match accept { + None => ACCEPT_STATUS_LISTS_HEADER_JWT, // Default to JWT if no accept header + Some(accept) + if accept == ACCEPT_STATUS_LISTS_HEADER_JWT + || accept == ACCEPT_STATUS_LISTS_HEADER_CWT => + { + accept + } + Some(_) => return Err(StatusListError::InvalidAcceptHeader), + }; + + // Get all status lists from the database + let mut status_lists = Vec::new(); + for list_id in request.list_ids { + let status_list = state + .status_list_token_repository + .find_one_by(list_id.clone()) + .await + .map_err(|err| { + tracing::error!("Failed to get status list {list_id} from database: {err:?}"); + StatusListError::InternalServerError + })? + .ok_or(StatusListError::StatusListNotFound)?; + status_lists.push(status_list); + } + + // Aggregate the status lists + let aggregated_list = aggregate_status_lists_impl(status_lists)?; + + // Create a new status list token for the aggregated list + let _aggregated_token = StatusListToken::new( + "aggregated".to_string(), + None, + chrono::Utc::now().timestamp(), + aggregated_list, + "aggregated".to_string(), + None, + ); + + // Return the aggregated list in the requested format + build_status_list_token(accept, &_aggregated_token, &state).await +} + +fn aggregate_status_lists_impl( + status_lists: Vec, +) -> Result { + if status_lists.is_empty() { + return Err(StatusListError::Generic( + "No status lists provided".to_string(), + )); + } + + // Get the maximum bits value from all lists + let max_bits = status_lists + .iter() + .map(|list| list.status_list.bits) + .max() + .unwrap_or(0); + + // Combine all status lists + let mut combined_lst = String::new(); + for list in status_lists { + combined_lst.push_str(&list.status_list.lst); + } + + Ok(StatusList { + bits: max_bits, + lst: combined_lst, + }) +} + +pub trait StatusListTokenExt { + fn new( + list_id: String, + exp: Option, + iat: i64, + status_list: StatusList, + sub: String, + ttl: Option, + ) -> Self; +} + +impl StatusListTokenExt for StatusListToken { + fn new( + list_id: String, + exp: Option, + iat: i64, + status_list: StatusList, + sub: String, + ttl: Option, + ) -> Self { + Self { + list_id, + issuer: sub.clone(), + exp, + iat, + status_list, + sub, + ttl, + } + } +} diff --git a/src/web/midlw.rs b/src/web/midlw.rs index 22c7554a..8e6f7ee1 100644 --- a/src/web/midlw.rs +++ b/src/web/midlw.rs @@ -150,7 +150,11 @@ pub async fn auth( #[cfg(test)] mod tests { use super::*; - use crate::utils::keygen::Keypair; + use crate::{ + models::{credentials, Alg}, + test_utils::test_app_state, + utils::keygen::Keypair, + }; use axum::{ body::to_bytes, http::{header, HeaderMap, Method, Request}, @@ -159,15 +163,14 @@ mod tests { use once_cell::sync::Lazy; use p256::pkcs8::EncodePublicKey; use p256::pkcs8::LineEnding; + use sea_orm::{DatabaseBackend, MockDatabase}; use std::time::{SystemTime, UNIX_EPOCH}; - use uuid::Uuid; static INIT: Lazy<()> = Lazy::new(|| { dotenvy::dotenv().ok(); }); #[tokio::test] - async fn test_authenticated_issuer_from_request_parts_success() { *INIT; // Generate keypair and JWT @@ -183,9 +186,7 @@ mod tests { .unwrap() .as_secs() as usize; - // TODO: This test is manipulating the real database, which is not wanted. - // In the meantime, we add a generated suffix to improve testing reliability. - let issuer_id = format!("test-issuer-demo-{}", Uuid::new_v4()); + let issuer_id = "test-issuer"; #[derive(serde::Serialize)] struct Claims { @@ -206,12 +207,16 @@ mod tests { .unwrap(); // Setup AppState with the issuer registered - let app_state = AppState::setup_test_with_credential( - &issuer_id, - &public_key_pem, - jsonwebtoken::Algorithm::ES256, - ) - .await; + let creds = credentials::Model { + issuer: issuer_id.to_string(), + public_key: public_key_pem.to_string(), + alg: Alg(jsonwebtoken::Algorithm::ES256), + }; + + let mock_db = MockDatabase::new(DatabaseBackend::Postgres) + .append_query_results(vec![vec![creds]]) + .into_connection(); + let app_state = test_app_state(Some(Arc::new(mock_db))).await; // Build request parts with Authorization header let mut headers = HeaderMap::new(); @@ -236,10 +241,9 @@ mod tests { } #[tokio::test] - async fn test_missing_authorization_header() { *INIT; - let app_state = crate::utils::state::setup().await; + let app_state = test_app_state(None).await; let mut parts = Request::builder() .method(Method::GET) .uri("/") @@ -259,10 +263,9 @@ mod tests { } #[tokio::test] - async fn test_invalid_authorization_header_format() { *INIT; - let app_state = crate::utils::state::setup().await; + let app_state = test_app_state(None).await; let mut headers = HeaderMap::new(); headers.insert(header::AUTHORIZATION, "NotBearer token".parse().unwrap()); let mut parts = Request::builder() @@ -284,10 +287,9 @@ mod tests { } #[tokio::test] - async fn test_invalid_token_format() { *INIT; - let app_state = crate::utils::state::setup().await; + let app_state = test_app_state(None).await; let mut headers = HeaderMap::new(); headers.insert(header::AUTHORIZATION, "Bearer not.a.jwt".parse().unwrap()); let mut parts = Request::builder() @@ -309,7 +311,6 @@ mod tests { } #[tokio::test] - async fn test_missing_issuer_identifier_in_token() { *INIT; // Create a valid JWT but without kid @@ -335,7 +336,7 @@ mod tests { &EncodingKey::from_ec_pem(&private_key_pem).unwrap(), ) .unwrap(); - let app_state = crate::utils::state::setup().await; + let app_state = test_app_state(None).await; let mut headers = HeaderMap::new(); headers.insert( header::AUTHORIZATION, @@ -356,7 +357,6 @@ mod tests { } #[tokio::test] - async fn test_invalid_token_verification() { *INIT; // Create a valid JWT with kid, but not registered in DB @@ -384,7 +384,13 @@ mod tests { &EncodingKey::from_ec_pem(&private_key_pem).unwrap(), ) .unwrap(); - let app_state = crate::utils::state::setup().await; + + // Set up mock database to return empty result (no issuer found) + let mock_db = MockDatabase::new(DatabaseBackend::Postgres) + .append_query_results::, _>(vec![vec![]]) + .into_connection(); + let app_state = test_app_state(Some(Arc::new(mock_db))).await; + let mut headers = HeaderMap::new(); headers.insert( header::AUTHORIZATION, @@ -409,7 +415,6 @@ mod tests { } #[tokio::test] - async fn test_other_authentication_error() { *INIT; // Simulate a token with a valid kid, but with an unsupported algorithm diff --git a/src/web/mod.rs b/src/web/mod.rs index 6ad1fdf4..e703c8c8 100644 --- a/src/web/mod.rs +++ b/src/web/mod.rs @@ -1,2 +1,3 @@ pub mod handlers; pub mod midlw; +pub mod status_list_aggregation; diff --git a/src/web/status_list_aggregation.rs b/src/web/status_list_aggregation.rs new file mode 100644 index 00000000..0edbf9f6 --- /dev/null +++ b/src/web/status_list_aggregation.rs @@ -0,0 +1,222 @@ +use crate::{utils::state::AppState, web::handlers::status_list::error::StatusListError}; +use axum::{ + extract::{Json, State}, + http::HeaderMap, + response::IntoResponse, +}; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + +#[derive(Debug, Serialize, Deserialize)] +pub struct StatusListAggregationRequest { + pub list_ids: Option>, + pub issuer: Option, +} + +#[derive(Debug, Serialize)] +pub struct StatusListAggregationResponse { + pub aggregation_uri: String, +} + +pub async fn aggregate_status_lists( + State(state): State>, + _headers: HeaderMap, + Json(request): Json, +) -> Result { + let mut status_lists = Vec::new(); + + if let Some(list_ids) = request.list_ids { + for list_id in list_ids { + if let Some(token) = state + .status_list_token_repository + .find_one_by(list_id.clone()) + .await + .map_err(|_| StatusListError::InternalServerError)? + { + status_lists.push(token); + } + } + } else if let Some(issuer) = request.issuer { + status_lists = state + .status_list_token_repository + .find_all_by_issuer(&issuer) + .await + .map_err(|_| StatusListError::InternalServerError)?; + } + + if status_lists.is_empty() { + return Err(StatusListError::StatusListNotFound); + } + + // TODO: Implement actual aggregation logic + // For now, just return a placeholder URI + Ok(Json(StatusListAggregationResponse { + aggregation_uri: format!( + "{}/status-list-aggregation/{}", + state.server_public_domain, + uuid::Uuid::new_v4() + ), + })) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + model::{status_list_tokens, StatusList, StatusListToken}, + test_utils::test::test_app_state, + }; + use axum::http::StatusCode; + use sea_orm::{DatabaseBackend, MockDatabase}; + use std::sync::Arc; + + // Helper to create a test request payload + fn create_test_request( + list_ids: Option>, + issuer: Option, + ) -> StatusListAggregationRequest { + StatusListAggregationRequest { list_ids, issuer } + } + + #[tokio::test] + async fn test_aggregate_by_list_ids() { + let mock_db = MockDatabase::new(DatabaseBackend::Postgres); + let list_id1 = "list1"; + let list_id2 = "list2"; + let request = + create_test_request(Some(vec![list_id1.to_string(), list_id2.to_string()]), None); + + let status_list1 = StatusListToken { + list_id: list_id1.to_string(), + issuer: "issuer1".to_string(), + exp: None, + iat: chrono::Utc::now().timestamp(), + status_list: StatusList { + bits: 2, + lst: "abc".to_string(), + }, + sub: "issuer1".to_string(), + ttl: None, + }; + + let status_list2 = StatusListToken { + list_id: list_id2.to_string(), + issuer: "issuer2".to_string(), + exp: None, + iat: chrono::Utc::now().timestamp(), + status_list: StatusList { + bits: 2, + lst: "def".to_string(), + }, + sub: "issuer2".to_string(), + ttl: None, + }; + + let db_conn = Arc::new( + mock_db + .append_query_results::, _>(vec![ + vec![status_list1.clone()], // find_one_by for list1 + vec![status_list2.clone()], // find_one_by for list2 + ]) + .into_connection(), + ); + + let app_state = Arc::new(test_app_state(db_conn)); + + let headers = HeaderMap::new(); + let response = aggregate_status_lists(State(app_state), headers, Json(request)) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::OK); + + // Verify the response contains an aggregation URI + let body = axum::body::to_bytes(response.into_body(), usize::MAX) + .await + .unwrap(); + let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); + assert!(json.get("aggregation_uri").is_some()); + } + + #[tokio::test] + async fn test_aggregate_by_issuer() { + let mock_db = MockDatabase::new(DatabaseBackend::Postgres); + let issuer = "test_issuer"; + let request = create_test_request(None, Some(issuer.to_string())); + + let status_list1 = StatusListToken { + list_id: "list1".to_string(), + issuer: issuer.to_string(), + exp: None, + iat: chrono::Utc::now().timestamp(), + status_list: StatusList { + bits: 2, + lst: "abc".to_string(), + }, + sub: issuer.to_string(), + ttl: None, + }; + + let status_list2 = StatusListToken { + list_id: "list2".to_string(), + issuer: issuer.to_string(), + exp: None, + iat: chrono::Utc::now().timestamp(), + status_list: StatusList { + bits: 2, + lst: "def".to_string(), + }, + sub: issuer.to_string(), + ttl: None, + }; + + let db_conn = Arc::new( + mock_db + .append_query_results::, _>(vec![ + vec![status_list1.clone(), status_list2.clone()], // find_by_issuer + ]) + .into_connection(), + ); + + let app_state = Arc::new(test_app_state(db_conn)); + + let headers = HeaderMap::new(); + let response = aggregate_status_lists(State(app_state), headers, Json(request)) + .await + .unwrap() + .into_response(); + + assert_eq!(response.status(), StatusCode::OK); + + // Verify the response contains an aggregation URI + let body = axum::body::to_bytes(response.into_body(), usize::MAX) + .await + .unwrap(); + let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); + assert!(json.get("aggregation_uri").is_some()); + } + + #[tokio::test] + async fn test_aggregate_no_lists_found() { + let mock_db = MockDatabase::new(DatabaseBackend::Postgres); + let request = create_test_request(Some(vec!["nonexistent".to_string()]), None); + + let db_conn = Arc::new( + mock_db + .append_query_results::, _>(vec![ + vec![], // find_one_by returns None + ]) + .into_connection(), + ); + + let app_state = Arc::new(test_app_state(db_conn)); + + let headers = HeaderMap::new(); + let response = aggregate_status_lists(State(app_state), headers, Json(request)) + .await + .into_response(); + + assert_eq!(response.status(), StatusCode::NOT_FOUND); + } +}